repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
basbeu/PyLaia
[ "d14458484b56622204b1730a7d53220c5d0f1bc1" ]
[ "laia/utils/dortmund_image_to_tensor.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\n\nimport cv2\nimport numpy as np\nimport torch\nfrom PIL import Image, ImageOps\n\n\ndef dortmund_distort(img, random_limits=(0.8, 1.1)):\n \"\"\"\n Creates an augmentation by computing a homography from three points in the\n image to three randomly generated points.\n \"\"\"\n y, x = img.shape[:2]\n src_point = np.float32([[x / 2, y / 3], [2 * x / 3, 2 * y / 3], [x / 3, 2 * y / 3]])\n random_shift = (np.random.rand(3, 2) - 0.5) * 2 * (\n random_limits[1] - random_limits[0]\n ) / 2 + np.mean(random_limits)\n dst_point = src_point * random_shift.astype(np.float32)\n transform = cv2.getAffineTransform(src_point, dst_point)\n if img.ndim == 3:\n border_value = np.median(\n np.reshape(img, (img.shape[0] * img.shape[1], -1)), axis=0\n )\n else:\n border_value = float(np.median(img))\n return cv2.warpAffine(img, transform, dsize=(x, y), borderValue=border_value)\n\n\nclass DortmundImageToTensor(object):\n def __init__(\n self, fixed_height=None, fixed_width=None, min_height=None, min_width=None\n ):\n assert fixed_height is None or fixed_height > 0\n assert fixed_width is None or fixed_width > 0\n assert min_height is None or min_height > 0\n assert min_width is None or min_width > 0\n self._fh = fixed_height\n self._fw = fixed_width\n self._mh = min_height\n self._mw = min_width\n\n def __call__(self, x):\n assert isinstance(x, Image.Image)\n x = x.convert(\"L\")\n x = ImageOps.invert(x)\n if self._fh or self._fw:\n # Optionally, resize image to a fixed size\n cw, ch = x.size\n nw = self._fw if self._fw else int(cw * self._fh / ch)\n nh = self._fh if self._fh else int(ch * self._fw / cw)\n x.resize((nw, nh), Image.BILINEAR)\n elif self._mh or self._mw:\n # Optionally, pad image to have the minimum size\n cw, ch = x.size\n nw = cw if self._mw is None or cw >= self._mw else self._mw\n nh = ch if self._mh is None or ch >= self._mh else self._mh\n if cw != nw or ch != nh:\n nx = Image.new(\"L\", (nw, nh))\n nx.paste(x, ((nw - cw) // 2, (nh - ch) // 2))\n x = nx\n\n x = np.asarray(x, dtype=np.float32)\n x = dortmund_distort(x / 255.0)\n if x.shape != 3:\n x = np.expand_dims(x, axis=-1)\n x = np.transpose(x, (2, 0, 1))\n return torch.from_numpy(x)\n\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n\n import laia.random\n from laia.data import TextImageFromTextTableDataset, ImageDataLoader\n from laia.plugins.arguments import add_argument, add_defaults, args\n\n add_defaults(\"seed\")\n add_argument(\"--num_images\", type=int, help=\"Show only this number of images\")\n add_argument(\"--shuffle\", action=\"store_true\", help=\"Shuffle the list of images\")\n add_argument(\"img_dir\", help=\"Directory containing images\")\n add_argument(\"txt_table\", help=\"Transcriptions of each image\")\n args = args()\n laia.random.manual_seed(args.seed)\n\n dataset = TextImageFromTextTableDataset(\n args.txt_table, args.img_dir, img_transform=DortmundImageToTensor()\n )\n dataset_loader = ImageDataLoader(\n dataset=dataset, image_channels=1, shuffle=args.shuffle\n )\n\n for i, batch in enumerate(dataset_loader, 1):\n if args.num_images and i > args.num_images:\n break\n # Note: batch['img'] is a PaddedTensor\n img = batch[\"img\"].data.squeeze().numpy()\n imgplt = plt.imshow(img, cmap=\"gray\")\n imgplt.axes.set_title(\" \".join(batch[\"txt\"][0]))\n plt.show()\n" ]
[ [ "numpy.transpose", "numpy.reshape", "numpy.float32", "numpy.asarray", "numpy.median", "matplotlib.pyplot.imshow", "torch.from_numpy", "matplotlib.pyplot.show", "numpy.expand_dims", "numpy.random.rand", "numpy.mean" ] ]
zhoub/dldt
[ "e42c01cf6e1d3aefa55e2c5df91f1054daddc575" ]
[ "tools/accuracy_checker/accuracy_checker/representation/pose_estimation_representation.py" ]
[ "\"\"\"\nCopyright (c) 2019 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport numpy as np\nfrom .base_representation import BaseRepresentation\n\n\nclass PoseEstimationRepresentation(BaseRepresentation):\n def __init__(self, identifier='', x_values=None, y_values=None, visibility=None, labels=None):\n super().__init__(identifier)\n self.x_values = x_values if np.size(x_values) > 0 else []\n self.y_values = y_values if np.size(y_values) > 0 else []\n self.visibility = visibility if np.size(visibility) > 0 else [2] * len(x_values)\n self.labels = labels if labels is not None else np.array([1]*len(x_values))\n\n @property\n def areas(self):\n areas = self.metadata.get('areas')\n if areas:\n return areas\n x_mins = np.min(self.x_values, axis=1)\n x_maxs = np.max(self.x_values, axis=1)\n y_mins = np.min(self.y_values, axis=1)\n y_maxs = np.max(self.y_values, axis=1)\n return (x_maxs - x_mins) * (y_maxs - y_mins)\n\n @property\n def bboxes(self):\n rects = self.metadata.get('rects')\n if rects:\n return rects\n x_mins = np.min(self.x_values, axis=1)\n x_maxs = np.max(self.x_values, axis=1)\n y_mins = np.min(self.y_values, axis=1)\n y_maxs = np.max(self.y_values, axis=1)\n return [[x_min, y_min, x_max, y_max] for x_min, y_min, x_max, y_max in zip(x_mins, y_mins, x_maxs, y_maxs)]\n\n @property\n def size(self):\n return len(self.x_values)\n\n\nclass PoseEstimationAnnotation(PoseEstimationRepresentation):\n pass\n\n\nclass PoseEstimationPrediction(PoseEstimationRepresentation):\n def __init__(self, identifier='', x_values=None, y_values=None, visibility=None, scores=None, labels=None):\n super().__init__(identifier, x_values, y_values, visibility, labels)\n self.scores = scores if scores.any() else []\n" ]
[ [ "numpy.max", "numpy.min", "numpy.size" ] ]
hmaarrfk/vispy
[ "7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2" ]
[ "examples/basics/scene/surface_plot.py" ]
[ "# -*- coding: utf-8 -*-\n# vispy: gallery 30\n# -----------------------------------------------------------------------------\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\"\"\"\nThis example demonstrates the use of the SurfacePlot visual.\n\"\"\"\n\nimport sys\nimport numpy as np\n\nfrom vispy import app, scene\nfrom vispy.util.filter import gaussian_filter\n\n\ncanvas = scene.SceneCanvas(keys='interactive', bgcolor='w')\nview = canvas.central_widget.add_view()\nview.camera = scene.TurntableCamera(up='z', fov=60)\n\n# Simple surface plot example\n# x, y values are not specified, so assumed to be 0:50\nz = np.random.normal(size=(250, 250), scale=200)\nz[100, 100] += 50000\nz = gaussian_filter(z, (10, 10))\np1 = scene.visuals.SurfacePlot(z=z, color=(0.3, 0.3, 1, 1))\np1.transform = scene.transforms.MatrixTransform()\np1.transform.scale([1/249., 1/249., 1/249.])\np1.transform.translate([-0.5, -0.5, 0])\n\nview.add(p1)\n\n# p1._update_data() # cheating.\n# cf = scene.filters.ZColormapFilter('fire', zrange=(z.max(), z.min()))\n# p1.attach(cf)\n\n\nxax = scene.Axis(pos=[[-0.5, -0.5], [0.5, -0.5]], tick_direction=(0, -1),\n font_size=16, axis_color='k', tick_color='k', text_color='k',\n parent=view.scene)\nxax.transform = scene.STTransform(translate=(0, 0, -0.2))\n\nyax = scene.Axis(pos=[[-0.5, -0.5], [-0.5, 0.5]], tick_direction=(-1, 0),\n font_size=16, axis_color='k', tick_color='k', text_color='k',\n parent=view.scene)\nyax.transform = scene.STTransform(translate=(0, 0, -0.2))\n\n# Add a 3D axis to keep us oriented\naxis = scene.visuals.XYZAxis(parent=view.scene)\n\nif __name__ == '__main__':\n canvas.show()\n if sys.flags.interactive == 0:\n app.run()\n" ]
[ [ "numpy.random.normal" ] ]
trailofbits/ceo
[ "d6a1ed729f8a1e400147b99dfcb65934e1924891" ]
[ "ceo/sampling.py" ]
[ "from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit\nfrom sklearn.utils import shuffle as skshuffle\n\ndef shuffle(x, random_state=None):\n return skshuffle(x, random_state=random_state)\n\ndef split_shuffle(X,y=None, random_state=None):\n sss = ShuffleSplit(n_splits=1, test_size=0.25, random_state=random_state)\n X_train = []\n X_test = []\n y_train = []\n y_test = []\n\n train_index, test_index = sss.split(X, y).next()\n\n for index in train_index:\n X_train.append(X[index])\n if y is not None:\n y_train.append(y[index])\n\n\n for index in test_index:\n X_test.append(X[index])\n if y is not None:\n y_test.append(y[index])\n\n if y is not None:\n return X_train, y_train, X_test, y_test\n else:\n return X_train, X_test\n\ndef stratified_shuffle(X,y, random_state=None):\n sss = StratifiedShuffleSplit(n_splits=1, test_size=0.25, random_state=random_state)\n X_train = []\n X_test = []\n y_train = []\n y_test = []\n\n\n train_index, test_index = sss.split(X, y).next()\n\n for index in train_index:\n X_train.append(X[index])\n y_train.append(y[index])\n\n\n for index in test_index:\n X_test.append(X[index])\n y_test.append(y[index])\n\n\n return X_train, y_train, X_test, y_test\n" ]
[ [ "sklearn.model_selection.ShuffleSplit", "sklearn.model_selection.StratifiedShuffleSplit", "sklearn.utils.shuffle" ] ]
xAbdalla/Machine_Learning_Exercises-Stanford_University
[ "2b38413e91948b5d2614407ac9b62a60acd191d2" ]
[ "Python/ex3/ex3.py" ]
[ "import numpy as np\nfrom scipy.io import loadmat\nfrom scipy.optimize import fmin_cg\n\n# Ignore overflow and divide by zero of np.log() and np.exp()\n# np.seterr(divide = 'ignore')\n# np.seterr(over = 'ignore') \n\ndef sigmoid(z):\n return 1.0 / (1.0 + np.exp(-z))\n\ndef computeCost(theta, X, y, lamba=1):\n m = len(y)\n \n h = sigmoid( X.dot( theta ) )\n unreg_cost = (( np.log( h ).dot( -y ) ) - (( np.log( 1. - h ).dot( 1. - y ) ))) / m\n theta[0] = 0\n reg_cost = theta.T.dot( theta ) * lamba / (2*m)\n \n return unreg_cost + reg_cost\n\ndef gradientCost(theta, X, y, lamba=1):\n m = len(y)\n grad = X.T.dot( sigmoid(X.dot(theta)) - y) / m\n grad[1:] += (theta[1:] * lamba) / m\n \n return grad\n\ndef oneVsAll(X, y, num_labels, lamba):\n # Some useful variables\n m, n = X.shape\n \n # Add ones to the X data matrix\n X = np.insert(X, 0, 1, axis= 1)\n \n # need to return the following variables correctly \n all_theta = np.zeros((n+1, num_labels))\n \n # labels are 1-indexed instead of 0-indexed\n for i in range(0, num_labels):\n theta = np.zeros(( n+1, 1 )).reshape(-1)\n y_i = ((y == (i+1)) + 0).reshape(-1)\n \n # minimize the objective function\n fmin = fmin_cg(computeCost,\n x0= theta,\n args= (X, y_i, lamba),\n fprime= gradientCost,\n maxiter= 300,\n disp= False,\n full_output= True)\n \n all_theta[:, i] = fmin[0]\n \n # np.save( \"all_theta.txt\", all_theta )\n print (\"%2d Cost: %.5f\" % (i+1, fmin[1]))\n print('===================================================')\n return all_theta\n\ndef predictOneVsAll(X, all_theta):\n # Add ones to the X data matrix\n m, n = X.shape\n X = np.insert(X, 0, 1, axis= 1)\n \n p = sigmoid(X.dot(all_theta)) # 1-D Array\n # print(p.shape)\n p_argmax = np.matrix(p.shape) # 1-D Array\n p_argmax = np.argmax(p, axis= 1) + 1\n \n return p_argmax.reshape(m, 1) # it's important to reshape to convert it to 2-D Array.\n\n# read data\ndata = loadmat('ex3data1.mat')\nX, y = data['X'], data['y']\n\nm, n = X.shape\nnum_labels = len(np.unique(y).tolist())\ninput_layer_size = n\n\nprint('\\nDataset Details:\\n')\nprint('X Shape = ' , X.shape, type(X)) \nprint('Y Shape = ', y.shape, ' ', type(y))\nprint('===================================================')\n\nlamda = 0.1\nall_theta = oneVsAll(X, y, num_labels, lamda)\n\nprint(' X.shape = ', X.shape)\nprint(' y.shape = ', y.shape)\nprint('all_theta.shape = ', all_theta.shape)\nprint(' no. of labels = ', num_labels)\nprint(' data array = ', np.unique(data['y']))\nprint('===================================================')\n\n# Compute accuracy on our training set\np = predictOneVsAll(X, all_theta)\nprint('Training Set Accuracy: %.4f%%' %(np.mean(y == p) * 100))\nprint('===================================================')" ]
[ [ "scipy.io.loadmat", "numpy.zeros", "numpy.matrix", "numpy.insert", "numpy.argmax", "numpy.exp", "numpy.log", "scipy.optimize.fmin_cg", "numpy.unique", "numpy.mean" ] ]
gtpedrosa/Python4WindEnergy
[ "8f97a5f86e81ce01d80dafb6f8104165fd3ad397" ]
[ "py4we/dakota.py" ]
[ "\"\"\" IO classes for YOUR_FILE_TYPE_NAME file types\r\n\r\nCopyright (C) 2013 DTU Wind Energy\r\n\r\nAuthor: Juan Pablo Murcia\r\nEmail: [email protected]\r\nLast revision: 28.01.2014\r\n\r\nLicense: Apache v2.0, http://www.apache.org/licenses/LICENSE-2.0\r\n\"\"\"\r\n\r\n\r\nfrom __future__ import print_function\r\nfrom we_file_io import WEFileIO, TestWEFileIO\r\nimport unittest\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass DakotaTabFileIO(WEFileIO):\r\n \"\"\" Dakota tabular self.data (.dat) file class\r\n \r\n A Multilevel Parallel Object-Oriented Framework for:\r\n\r\n Design Optimization\r\n Parameter Estimation\r\n Uncertainty Quantification\r\n Sensitivity Analysis\r\n\r\n methods:\r\n --------\r\n write: write a file\r\n read: read a file\r\n\r\n \"\"\"\r\n def _write(self):\r\n \"\"\" Write a file (overrided)\r\n \"\"\"\r\n n_col = len(self.data)\r\n n_row = len(self.data[self.keys[0]])\r\n data = list()\r\n for i_row in range(n_row):\r\n data.append('')\r\n for i_col in range(n_col):\r\n if i_col == 0:\r\n data[-1] = data[-1] + format(self.data[self.keys[i_col]][i_row], '8.0f')\r\n else:\r\n data[-1] = data[-1] + ' ' + format(self.data[self.keys[i_col]][i_row], ' 1.10e')\r\n\r\n header = ''\r\n for i_col in range(n_col):\r\n if i_col == 0:\r\n header = header + format(self.keys[i_col], '<5')\r\n else:\r\n header = header + format(self.keys[i_col], '>16')\r\n\r\n data.insert(0, header)\r\n data[0] = \"%\" + data[0]\r\n\r\n out = '\\n'.join( data )\r\n with open(self.filename, 'w') as f:\r\n f.write(out)\r\n\r\n\r\n def _read(self):\r\n \"\"\" Read the file (overrided)\r\n \"\"\"\r\n\r\n with open(self.filename, 'r') as myfile:\r\n rawData = myfile.readlines()\r\n\r\n header = rawData[0]\r\n self.keys = header.split()\r\n self.keys[0] = self.keys[0][1:]\r\n n_col = len(self.keys)\r\n\r\n rawData = rawData[1:]\r\n n_row = len(rawData)\r\n\r\n # Initialize data dictionary\r\n self.data = {}\r\n for i_col in range(n_col): \r\n self.data[self.keys[i_col]] = list()\r\n # Loop over lines and extract variables of interest \r\n for i_row in range(n_row):\r\n line = rawData[i_row]\r\n columns = line.split()\r\n for i_col in range(n_col): \r\n self.data[self.keys[i_col]].append(float(columns[i_col]))\r\n\r\n # Print out something \r\n # print (self.keys) \r\n # print (self.data[ self.keys[0] ])\r\n\r\n def _plot(self,fig):\r\n\r\n n_row = len(self.keys)\r\n for i_row in range(n_row):\r\n if i_row != 0:\r\n ax = fig.add_subplot(1, n_row-1, i_row)\r\n ax.plot(self.data[self.keys[0]],self.data[self.keys[i_row]])\r\n ax.set_xlabel(self.keys[0])\r\n ax.set_ylabel(self.keys[i_row])\r\n\r\n # setting global plot configuration using the RC configuration style\r\n plt.rc('font', family='serif')\r\n plt.rc('xtick', labelsize=20) # tick labels\r\n plt.rc('ytick', labelsize=20) # tick labels\r\n plt.rc('axes', labelsize=20) # axes labels\r\n plt.rcParams['figure.figsize'] = 14, 4\r\n\r\n plt.tight_layout()\r\n\r\n def __getitem__(self, key):\r\n \"\"\" Transform the class instance into a dictionary.\"\"\"\r\n return self.data[key]\r\n\r\n## Do Some testing -------------------------------------------------------\r\nclass TestDakotaTabFileIO(TestWEFileIO):\r\n \"\"\" Test class for MyFileType class \"\"\"\r\n\r\n test_file = './test/mann/simABLupwind.inp'\r\n\r\n def test_duplication(self):\r\n self._test_duplication(DakotaTabFileIO, './test/dakota/rosen_grad_opt.dat')\r\n\r\n\r\n## Main function ---------------------------------------------------------\r\nif __name__ == '__main__':\r\n \"\"\" This is the main fuction that will run the tests automatically\r\n\r\n $> python my_file_type.py\r\n .\r\n ----------------------------------------------------------------------\r\n Ran X test in XXXs\r\n\r\n OK\r\n \"\"\"\r\n unittest.main()\r\n \r\n ''' Example uses of DakotaTabFileIO class: \r\n '''\r\n # a = DakotaTabFileIO(\"test/dakota/rosen_grad_opt.dat\") \r\n # print (type(a))\r\n # print (a.keys)\r\n # print (a.data)\r\n # print (a['x1'])\r\n # a.plot()\r\n \r\n \r\n\r\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.rc" ] ]
teddylfwu/LanczosNetwork
[ "adb82d9bce4b14040952565708273eb7e6738d3c" ]
[ "runner/qm8_runner.py" ]
[ "from __future__ import (division, print_function)\nimport os\nimport numpy as np\nimport pickle\nfrom collections import defaultdict\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\nfrom tensorboardX import SummaryWriter\n\nfrom model import *\nfrom dataset import *\nfrom utils.logger import get_logger\nfrom utils.train_helper import data_to_gpu, snapshot, load_model, EarlyStopper\n\nlogger = get_logger('exp_logger')\n__all__ = ['QM8Runner']\n\n\nclass QM8Runner(object):\n\n def __init__(self, config):\n self.config = config\n self.dataset_conf = config.dataset\n self.model_conf = config.model\n self.train_conf = config.train\n self.test_conf = config.test\n self.use_gpu = config.use_gpu\n self.gpus = config.gpus\n self.writer = SummaryWriter(config.save_dir)\n self.meta_data = pickle.load(open(self.dataset_conf.meta_data_path, 'rb'))\n self.const_factor = self.meta_data['std'].reshape(1, -1)\n\n def train(self):\n # create data loader\n train_dataset = eval(self.dataset_conf.loader_name)(\n self.config, split='train')\n dev_dataset = eval(self.dataset_conf.loader_name)(self.config, split='dev')\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=self.train_conf.batch_size,\n shuffle=self.train_conf.shuffle,\n num_workers=self.train_conf.num_workers,\n collate_fn=train_dataset.collate_fn,\n drop_last=False)\n dev_loader = torch.utils.data.DataLoader(\n dev_dataset,\n batch_size=self.train_conf.batch_size,\n shuffle=False,\n num_workers=self.train_conf.num_workers,\n collate_fn=dev_dataset.collate_fn,\n drop_last=False)\n\n # create models\n model = eval(self.model_conf.name)(self.config)\n\n # create optimizer\n params = filter(lambda p: p.requires_grad, model.parameters())\n if self.train_conf.optimizer == 'SGD':\n optimizer = optim.SGD(\n params,\n lr=self.train_conf.lr,\n momentum=self.train_conf.momentum,\n weight_decay=self.train_conf.wd)\n elif self.train_conf.optimizer == 'Adam':\n optimizer = optim.Adam(\n params, lr=self.train_conf.lr, weight_decay=self.train_conf.wd)\n else:\n raise ValueError(\"Non-supported optimizer!\")\n\n early_stop = EarlyStopper([0.0], win_size=10, is_decrease=False)\n\n lr_scheduler = optim.lr_scheduler.MultiStepLR(\n optimizer,\n milestones=self.train_conf.lr_decay_steps,\n gamma=self.train_conf.lr_decay)\n\n # reset gradient\n optimizer.zero_grad()\n\n # resume training\n if self.train_conf.is_resume:\n load_model(model, self.train_conf.resume_model, optimizer=optimizer)\n\n if self.use_gpu:\n model = nn.DataParallel(model, device_ids=self.gpus).cuda()\n\n # Training Loop\n iter_count = 0\n best_val_loss = np.inf\n results = defaultdict(list)\n for epoch in range(self.train_conf.max_epoch):\n # validation\n if (epoch + 1) % self.train_conf.valid_epoch == 0 or epoch == 0:\n model.eval()\n val_loss = []\n\n for data in tqdm(dev_loader):\n if self.use_gpu:\n data['node_feat'], data['node_mask'], data['label'] = data_to_gpu(\n data['node_feat'], data['node_mask'], data['label'])\n\n if self.model_conf.name == 'LanczosNet':\n data['L'], data['D'], data['V'] = data_to_gpu(\n data['L'], data['D'], data['V'])\n elif self.model_conf.name == 'GraphSAGE':\n data['nn_idx'], data['nonempty_mask'] = data_to_gpu(\n data['nn_idx'], data['nonempty_mask'])\n elif self.model_conf.name == 'GPNN':\n data['L'], data['L_cluster'], data['L_cut'] = data_to_gpu(\n data['L'], data['L_cluster'], data['L_cut'])\n else:\n data['L'] = data_to_gpu(data['L'])[0]\n\n with torch.no_grad():\n if self.model_conf.name == 'AdaLanczosNet':\n pred, _ = model(\n data['node_feat'],\n data['L'],\n label=data['label'],\n mask=data['node_mask'])\n elif self.model_conf.name == 'LanczosNet':\n pred, _ = model(\n data['node_feat'],\n data['L'],\n data['D'],\n data['V'],\n label=data['label'],\n mask=data['node_mask'])\n elif self.model_conf.name == 'GraphSAGE':\n pred, _ = model(\n data['node_feat'],\n data['nn_idx'],\n data['nonempty_mask'],\n label=data['label'],\n mask=data['node_mask'])\n elif self.model_conf.name == 'GPNN':\n pred, _ = model(\n data['node_feat'],\n data['L'],\n data['L_cluster'],\n data['L_cut'],\n label=data['label'],\n mask=data['node_mask'])\n else:\n pred, _ = model(\n data['node_feat'],\n data['L'],\n label=data['label'],\n mask=data['node_mask'])\n\n curr_loss = (\n pred - data['label']).abs().cpu().numpy() * self.const_factor\n val_loss += [curr_loss]\n\n val_loss = float(np.mean(np.concatenate(val_loss)))\n logger.info(\"Avg. Validation MAE = {}\".format(val_loss))\n self.writer.add_scalar('val_loss', val_loss, iter_count)\n results['val_loss'] += [val_loss]\n\n # save best model\n if val_loss < best_val_loss:\n best_val_loss = val_loss\n snapshot(\n model.module if self.use_gpu else model,\n optimizer,\n self.config,\n epoch + 1,\n tag='best')\n\n logger.info(\"Current Best Validation MAE = {}\".format(best_val_loss))\n\n # check early stop\n if early_stop.tick([val_loss]):\n snapshot(\n model.module if self.use_gpu else model,\n optimizer,\n self.config,\n epoch + 1,\n tag='last')\n self.writer.close()\n break\n\n # training\n model.train()\n lr_scheduler.step()\n for data in train_loader:\n optimizer.zero_grad()\n\n if self.use_gpu:\n data['node_feat'], data['node_mask'], data['label'] = data_to_gpu(\n data['node_feat'], data['node_mask'], data['label'])\n\n if self.model_conf.name == 'LanczosNet':\n data['L'], data['D'], data['V'] = data_to_gpu(\n data['L'], data['D'], data['V'])\n elif self.model_conf.name == 'GraphSAGE':\n data['nn_idx'], data['nonempty_mask'] = data_to_gpu(\n data['nn_idx'], data['nonempty_mask'])\n elif self.model_conf.name == 'GPNN':\n data['L'], data['L_cluster'], data['L_cut'] = data_to_gpu(\n data['L'], data['L_cluster'], data['L_cut'])\n else:\n data['L'] = data_to_gpu(data['L'])[0]\n\n if self.model_conf.name == 'AdaLanczosNet':\n _, train_loss = model(\n data['node_feat'],\n data['L'],\n label=data['label'],\n mask=data['node_mask'])\n elif self.model_conf.name == 'LanczosNet':\n _, train_loss = model(\n data['node_feat'],\n data['L'],\n data['D'],\n data['V'],\n label=data['label'],\n mask=data['node_mask'])\n elif self.model_conf.name == 'GraphSAGE':\n _, train_loss = model(\n data['node_feat'],\n data['nn_idx'],\n data['nonempty_mask'],\n label=data['label'],\n mask=data['node_mask'])\n elif self.model_conf.name == 'GPNN':\n _, train_loss = model(\n data['node_feat'],\n data['L'],\n data['L_cluster'],\n data['L_cut'],\n label=data['label'],\n mask=data['node_mask'])\n else:\n _, train_loss = model(\n data['node_feat'],\n data['L'],\n label=data['label'],\n mask=data['node_mask'])\n\n # assign gradient\n train_loss.backward()\n optimizer.step()\n train_loss = float(train_loss.data.cpu().numpy())\n self.writer.add_scalar('train_loss', train_loss, iter_count)\n results['train_loss'] += [train_loss]\n results['train_step'] += [iter_count]\n\n # display loss\n if (iter_count + 1) % self.train_conf.display_iter == 0:\n logger.info(\"Loss @ epoch {:04d} iteration {:08d} = {}\".format(\n epoch + 1, iter_count + 1, train_loss))\n\n iter_count += 1\n\n # snapshot model\n if (epoch + 1) % self.train_conf.snapshot_epoch == 0:\n logger.info(\"Saving Snapshot @ epoch {:04d}\".format(epoch + 1))\n snapshot(model.module\n if self.use_gpu else model, optimizer, self.config, epoch + 1)\n\n results['best_val_loss'] += [best_val_loss]\n pickle.dump(results,\n open(os.path.join(self.config.save_dir, 'train_stats.p'), 'wb'))\n self.writer.close()\n logger.info(\"Best Validation MAE = {}\".format(best_val_loss))\n\n return best_val_loss\n\n def test(self):\n test_dataset = eval(self.dataset_conf.loader_name)(\n self.config, split='test')\n # create data loader\n test_loader = torch.utils.data.DataLoader(\n test_dataset,\n batch_size=self.test_conf.batch_size,\n shuffle=False,\n num_workers=self.test_conf.num_workers,\n collate_fn=test_dataset.collate_fn,\n drop_last=False)\n\n # create models\n model = eval(self.model_conf.name)(self.config)\n load_model(model, self.test_conf.test_model)\n\n if self.use_gpu:\n model = nn.DataParallel(model, device_ids=self.gpus).cuda()\n\n model.eval()\n test_loss = []\n for data in tqdm(test_loader):\n if self.use_gpu:\n data['node_feat'], data['node_mask'], data['label'] = data_to_gpu(\n data['node_feat'], data['node_mask'], data['label'])\n\n if self.model_conf.name == 'LanczosNet':\n data['D'], data['V'] = data_to_gpu(data['D'], data['V'])\n elif self.model_conf.name == 'GraphSAGE':\n data['nn_idx'], data['nonempty_mask'] = data_to_gpu(\n data['nn_idx'], data['nonempty_mask'])\n elif self.model_conf.name == 'GPNN':\n data['L'], data['L_cluster'], data['L_cut'] = data_to_gpu(\n data['L'], data['L_cluster'], data['L_cut'])\n else:\n data['L'] = data_to_gpu(data['L'])[0]\n\n with torch.no_grad():\n if self.model_conf.name == 'AdaLanczosNet':\n pred, _ = model(\n data['node_feat'],\n data['L'],\n label=data['label'],\n mask=data['node_mask'])\n elif self.model_conf.name == 'LanczosNet':\n pred, _ = model(\n data['node_feat'],\n data['L'],\n data['D'],\n data['V'],\n label=data['label'],\n mask=data['node_mask'])\n elif self.model_conf.name == 'GraphSAGE':\n pred, _ = model(\n data['node_feat'],\n data['nn_idx'],\n data['nonempty_mask'],\n label=data['label'],\n mask=data['node_mask'])\n elif self.model_conf.name == 'GPNN':\n pred, _ = model(\n data['node_feat'],\n data['L'],\n data['L_cluster'],\n data['L_cut'],\n label=data['label'],\n mask=data['node_mask'])\n else:\n pred, _ = model(\n data['node_feat'],\n data['L'],\n label=data['label'],\n mask=data['node_mask'])\n\n curr_loss = (\n pred - data['label']).abs().cpu().numpy() * self.const_factor\n test_loss += [curr_loss]\n\n test_loss = float(np.mean(np.concatenate(test_loss)))\n logger.info(\"Test MAE = {}\".format(test_loss))\n\n return test_loss\n" ]
[ [ "torch.utils.data.DataLoader", "torch.optim.SGD", "torch.no_grad", "torch.nn.DataParallel", "torch.optim.Adam", "torch.optim.lr_scheduler.MultiStepLR", "numpy.concatenate" ] ]
zhuwenzhen/DomainBed
[ "e8e8ed831bf30887675e5b3a5117d9d66d0ee46f" ]
[ "domainbed/datasets.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport os\nimport torch\nfrom PIL import Image, ImageFile\nfrom torchvision import transforms\nimport torchvision.datasets.folder\nfrom torch.utils.data import TensorDataset, Subset\nfrom torchvision.datasets import MNIST, ImageFolder\nfrom torchvision.transforms.functional import rotate\n\nfrom wilds.datasets.camelyon17_dataset import Camelyon17Dataset\nfrom wilds.datasets.fmow_dataset import FMoWDataset\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nDATASETS = [\n # Debug\n \"Debug28\",\n \"Debug224\",\n # Small images\n \"ColoredMNIST\",\n \"RotatedMNIST\",\n # Big images\n \"VLCS\",\n \"PACS\",\n \"OfficeHome\",\n \"TerraIncognita\",\n \"DomainNet\",\n \"SVIRO\",\n # WILDS datasets\n \"WILDSCamelyon\",\n \"WILDSFMoW\",\n]\n\n\ndef get_dataset_class(dataset_name):\n \"\"\"Return the dataset class with the given name.\"\"\"\n if dataset_name not in globals():\n raise NotImplementedError(\"Dataset not found: {}\".format(dataset_name))\n return globals()[dataset_name]\n\n\ndef num_environments(dataset_name):\n return len(get_dataset_class(dataset_name).ENVIRONMENTS)\n\n\nclass MultipleDomainDataset:\n N_STEPS = 5001 # Default, subclasses may override\n CHECKPOINT_FREQ = 100 # Default, subclasses may override\n N_WORKERS = 8 # Default, subclasses may override\n ENVIRONMENTS = None # Subclasses should override\n INPUT_SHAPE = None # Subclasses should override\n\n def __getitem__(self, index):\n return self.datasets[index]\n\n def __len__(self):\n return len(self.datasets)\n\n\nclass Debug(MultipleDomainDataset):\n def __init__(self, root, test_envs, hparams):\n super().__init__()\n self.input_shape = self.INPUT_SHAPE\n self.num_classes = 2\n self.datasets = []\n for _ in [0, 1, 2]:\n self.datasets.append(\n TensorDataset(\n torch.randn(16, *self.INPUT_SHAPE), torch.randint(0, self.num_classes, (16,))\n )\n )\n\n\nclass Debug28(Debug):\n INPUT_SHAPE = (3, 28, 28)\n ENVIRONMENTS = [\"0\", \"1\", \"2\"]\n\n\nclass Debug224(Debug):\n INPUT_SHAPE = (3, 224, 224)\n ENVIRONMENTS = [\"0\", \"1\", \"2\"]\n\n\nclass MultipleEnvironmentMNIST(MultipleDomainDataset):\n def __init__(self, root, environments, dataset_transform, input_shape, num_classes):\n super().__init__()\n if root is None:\n raise ValueError(\"Data directory not specified!\")\n\n original_dataset_tr = MNIST(root, train=True, download=True)\n original_dataset_te = MNIST(root, train=False, download=True)\n\n original_images = torch.cat((original_dataset_tr.data, original_dataset_te.data))\n\n original_labels = torch.cat((original_dataset_tr.targets, original_dataset_te.targets))\n\n shuffle = torch.randperm(len(original_images))\n\n original_images = original_images[shuffle]\n original_labels = original_labels[shuffle]\n\n self.datasets = []\n\n for i in range(len(environments)):\n images = original_images[i :: len(environments)]\n labels = original_labels[i :: len(environments)]\n self.datasets.append(dataset_transform(images, labels, environments[i]))\n\n self.input_shape = input_shape\n self.num_classes = num_classes\n\n\nclass ColoredMNIST(MultipleEnvironmentMNIST):\n ENVIRONMENTS = [\"+90%\", \"+80%\", \"-90%\"]\n\n def __init__(self, root, test_envs, hparams):\n super(ColoredMNIST, self).__init__(\n root,\n [0.1, 0.2, 0.9],\n self.color_dataset,\n (\n 2,\n 28,\n 28,\n ),\n 2,\n )\n\n self.input_shape = (\n 2,\n 28,\n 28,\n )\n self.num_classes = 2\n\n def color_dataset(self, images, labels, environment):\n # # Subsample 2x for computational convenience\n # images = images.reshape((-1, 28, 28))[:, ::2, ::2]\n # Assign a binary label based on the digit\n labels = (labels < 5).float()\n # Flip label with probability 0.25\n labels = self.torch_xor_(labels, self.torch_bernoulli_(0.25, len(labels)))\n\n # Assign a color based on the label; flip the color with probability e\n colors = self.torch_xor_(labels, self.torch_bernoulli_(environment, len(labels)))\n images = torch.stack([images, images], dim=1)\n # Apply the color to the image by zeroing out the other color channel\n images[torch.tensor(range(len(images))), (1 - colors).long(), :, :] *= 0\n\n x = images.float().div_(255.0)\n y = labels.view(-1).long()\n\n return TensorDataset(x, y)\n\n def torch_bernoulli_(self, p, size):\n return (torch.rand(size) < p).float()\n\n def torch_xor_(self, a, b):\n return (a - b).abs()\n\n\nclass RotatedMNIST(MultipleEnvironmentMNIST):\n ENVIRONMENTS = [\"0\", \"15\", \"30\", \"45\", \"60\", \"75\"]\n\n def __init__(self, root, test_envs, hparams):\n super(RotatedMNIST, self).__init__(\n root,\n [0, 15, 30, 45, 60, 75],\n self.rotate_dataset,\n (\n 1,\n 28,\n 28,\n ),\n 10,\n )\n\n def rotate_dataset(self, images, labels, angle):\n rotation = transforms.Compose(\n [\n transforms.ToPILImage(),\n transforms.Lambda(lambda x: rotate(x, angle, fill=(0,), resample=Image.BICUBIC)),\n transforms.ToTensor(),\n ]\n )\n\n x = torch.zeros(len(images), 1, 28, 28)\n for i in range(len(images)):\n x[i] = rotation(images[i])\n\n y = labels.view(-1)\n\n return TensorDataset(x, y)\n\n\nclass MultipleEnvironmentImageFolder(MultipleDomainDataset):\n def __init__(self, root, test_envs, augment, hparams):\n super().__init__()\n environments = [f.name for f in os.scandir(root) if f.is_dir()]\n environments = sorted(environments)\n\n transform = transforms.Compose(\n [\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ]\n )\n\n augment_transform = transforms.Compose(\n [\n # transforms.Resize((224,224)),\n transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),\n transforms.RandomGrayscale(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ]\n )\n\n self.datasets = []\n for i, environment in enumerate(environments):\n\n if augment and (i not in test_envs):\n env_transform = augment_transform\n else:\n env_transform = transform\n\n path = os.path.join(root, environment)\n env_dataset = ImageFolder(path, transform=env_transform)\n\n self.datasets.append(env_dataset)\n\n self.input_shape = (\n 3,\n 224,\n 224,\n )\n self.num_classes = len(self.datasets[-1].classes)\n\n\nclass VLCS(MultipleEnvironmentImageFolder):\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"C\", \"L\", \"S\", \"V\"]\n\n def __init__(self, root, test_envs, hparams):\n self.dir = os.path.join(root, \"VLCS/\")\n super().__init__(self.dir, test_envs, hparams[\"data_augmentation\"], hparams)\n\n\nclass PACS(MultipleEnvironmentImageFolder):\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"A\", \"C\", \"P\", \"S\"]\n\n def __init__(self, root, test_envs, hparams):\n self.dir = os.path.join(root, \"PACS/\")\n super().__init__(self.dir, test_envs, hparams[\"data_augmentation\"], hparams)\n\n\nclass DomainNet(MultipleEnvironmentImageFolder):\n CHECKPOINT_FREQ = 1000\n ENVIRONMENTS = [\"clip\", \"info\", \"paint\", \"quick\", \"real\", \"sketch\"]\n\n def __init__(self, root, test_envs, hparams):\n self.dir = os.path.join(root, \"domain_net/\")\n super().__init__(self.dir, test_envs, hparams[\"data_augmentation\"], hparams)\n\n\nclass OfficeHome(MultipleEnvironmentImageFolder):\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"A\", \"C\", \"P\", \"R\"]\n\n def __init__(self, root, test_envs, hparams):\n self.dir = os.path.join(root, \"office_home/\")\n super().__init__(self.dir, test_envs, hparams[\"data_augmentation\"], hparams)\n\n\nclass TerraIncognita(MultipleEnvironmentImageFolder):\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"L100\", \"L38\", \"L43\", \"L46\"]\n\n def __init__(self, root, test_envs, hparams):\n self.dir = os.path.join(root, \"terra_incognita/\")\n super().__init__(self.dir, test_envs, hparams[\"data_augmentation\"], hparams)\n\n\nclass SVIRO(MultipleEnvironmentImageFolder):\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\n \"aclass\",\n \"escape\",\n \"hilux\",\n \"i3\",\n \"lexus\",\n \"tesla\",\n \"tiguan\",\n \"tucson\",\n \"x5\",\n \"zoe\",\n ]\n\n def __init__(self, root, test_envs, hparams):\n self.dir = os.path.join(root, \"sviro/\")\n super().__init__(self.dir, test_envs, hparams[\"data_augmentation\"], hparams)\n\n\nclass WILDSEnvironment:\n def __init__(self, wilds_dataset, metadata_name, metadata_value, transform=None):\n self.name = metadata_name + \"_\" + str(metadata_value)\n\n metadata_index = wilds_dataset.metadata_fields.index(metadata_name)\n metadata_array = wilds_dataset.metadata_array\n subset_indices = torch.where(metadata_array[:, metadata_index] == metadata_value)[0]\n\n self.dataset = wilds_dataset\n self.indices = subset_indices\n self.transform = transform\n\n def __getitem__(self, i):\n x = self.dataset.get_input(self.indices[i])\n if type(x).__name__ != \"Image\":\n x = Image.fromarray(x)\n\n y = self.dataset.y_array[self.indices[i]]\n if self.transform is not None:\n x = self.transform(x)\n return x, y\n\n def __len__(self):\n return len(self.indices)\n\n\nclass WILDSDataset(MultipleDomainDataset):\n INPUT_SHAPE = (3, 224, 224)\n\n def __init__(self, dataset, metadata_name, test_envs, augment, hparams):\n super().__init__()\n\n transform = transforms.Compose(\n [\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ]\n )\n\n augment_transform = transforms.Compose(\n [\n transforms.Resize((224, 224)),\n transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),\n transforms.RandomGrayscale(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ]\n )\n\n self.datasets = []\n\n for i, metadata_value in enumerate(self.metadata_values(dataset, metadata_name)):\n if augment and (i not in test_envs):\n env_transform = augment_transform\n else:\n env_transform = transform\n\n env_dataset = WILDSEnvironment(dataset, metadata_name, metadata_value, env_transform)\n\n self.datasets.append(env_dataset)\n\n self.input_shape = (\n 3,\n 224,\n 224,\n )\n self.num_classes = dataset.n_classes\n\n def metadata_values(self, wilds_dataset, metadata_name):\n metadata_index = wilds_dataset.metadata_fields.index(metadata_name)\n metadata_vals = wilds_dataset.metadata_array[:, metadata_index]\n return sorted(list(set(metadata_vals.view(-1).tolist())))\n\n\nclass WILDSCamelyon(WILDSDataset):\n ENVIRONMENTS = [\"hospital_0\", \"hospital_1\", \"hospital_2\", \"hospital_3\", \"hospital_4\"]\n\n def __init__(self, root, test_envs, hparams):\n dataset = Camelyon17Dataset(root_dir=root)\n super().__init__(dataset, \"hospital\", test_envs, hparams[\"data_augmentation\"], hparams)\n\n\nclass WILDSFMoW(WILDSDataset):\n ENVIRONMENTS = [\"region_0\", \"region_1\", \"region_2\", \"region_3\", \"region_4\", \"region_5\"]\n\n def __init__(self, root, test_envs, hparams):\n dataset = FMoWDataset(root_dir=root)\n super().__init__(dataset, \"region\", test_envs, hparams[\"data_augmentation\"], hparams)\n" ]
[ [ "torch.stack", "torch.randint", "torch.randn", "torch.rand", "torch.where", "torch.utils.data.TensorDataset", "torch.cat" ] ]
cemkaraoguz/reinforcement-learning-an-introduction-second-edition
[ "735bfa6b66ffb52b7cf03966164e7bc1755942de" ]
[ "chapter07/03_WindyGridWorld_nStepSARSA_OffPolicy.py" ]
[ "'''\n03_WindyGridWorld_nStepSARSA_OffPolicy.py : n-step off-policy SARSA applied to Windy Grid World problem (Example 6.5)\n\nCem Karaoguz, 2020\nMIT License\n'''\n\nimport numpy as np\nimport pylab as pl\n\nfrom IRL.environments.Gridworlds import StochasticGridWorld\nfrom IRL.agents.TemporalDifferenceLearning import nStepOffPolicySARSA\nfrom IRL.utils.Policies import StochasticPolicy\nfrom IRL.utils.Helpers import runSimulation\n\ndef runExperiment(nEpisodes, env, agent, policy_behaviour, doUpdateBehaviourPolicy):\n reward_sums = []\n episodesvstimesteps = []\n timesteps = 0\n for e in range(nEpisodes):\n \n if(e%10==0):\n print(\"Episode : \", e)\n \n state = env.reset()\n action = policy_behaviour.sampleAction(state)\n done = False\n experiences = [{}]\n reward_sums.append(0.0)\n while not done:\n\n timesteps += 1\n \n experiences[-1]['state'] = state\n experiences[-1]['action'] = action\n experiences[-1]['done'] = done\n \n new_state, reward, done = env.step(action)\n \n #print(\"State:\", state, \"Action: \", env.actionMapping[action][1], \"Reward: \", reward, \"New state:\", new_state, \"done:\", done)\n \n new_action = policy_behaviour.sampleAction(new_state)\n \n xp = {}\n xp['state'] = new_state\n xp['reward'] = reward\n xp['done'] = done\n xp['action'] = new_action\n experiences.append(xp)\n \n agent.update(experiences[-2:], policy_behaviour)\n \n state = new_state\n action = new_action\n \n episodesvstimesteps.append([e,timesteps])\n reward_sums[-1] += reward\n \n if(doUpdateBehaviourPolicy):\n # update behaviour policy to be e-soft version of the target policy\n for idx_state in range(env.nStates):\n policy_behaviour.update(idx_state, agent.actionValueTable[idx_state,:])\n \n return reward_sums, np.array(episodesvstimesteps)\n\nif __name__==\"__main__\":\n\n exerciseID = 0\n nExperiments = 1\n nEpisodes = 800\n\n # Environment\n sizeX = 10\n sizeY = 7\n defaultReward = -1.0\n startStates = [(0,3)]\n terminalStates = [(7,3)]\n\n if exerciseID==0:\n # Example 6.5\n actionMapping = {0:(np.array([0,-1]), \"N\"), 1:(np.array([0,1]), \"S\"), 2:(np.array([1,0]), \"E\"), 3:(np.array([-1,0]), \"W\")}\n sigmaY_actionNoise = 0\n \n elif exerciseID==1:\n # Exercise 6.9 part 1\n actionMapping = {0:(np.array([0,-1]), \"N\"), 1:(np.array([0,1]), \"S\"), 2:(np.array([1,0]), \"E\"), 3:(np.array([-1,0]), \"W\"),\n 4:(np.array([1,-1]), \"NE\"), 5:(np.array([1,1]), \"SE\"), 6:(np.array([-1,-1]), \"NW\"), 7:(np.array([-1,1]), \"SW\")}\n \n # Example 6.5 and Exercise 6.9\n sigmaY_actionNoise = 0\n \n # Exercise 6.10\n sigmaY_actionNoise = 1\n \n else:\n # Exercise 6.9 part 2\n actionMapping = {0:(np.array([0,-1]), \"N\"), 1:(np.array([0,1]), \"S\"), 2:(np.array([1,0]), \"E\"), 3:(np.array([-1,0]), \"W\"),\n 4:(np.array([1,-1]), \"NE\"), 5:(np.array([1,1]), \"SE\"), 6:(np.array([-1,-1]), \"NW\"), 7:(np.array([-1,1]), \"SW\"), 8:(np.array([0,0]), \"0\")}\n sigmaY_actionNoise = 0\n\n actionNoiseParams = {}\n aux = [(x,y) for x in range(3,6) for y in range(0,7)]\n for pos in aux:\n actionNoiseParams[pos] = [0,-1,0,sigmaY_actionNoise]\n aux = [(x,y) for x in range(6,8) for y in range(0,7)]\n for pos in aux:\n actionNoiseParams[pos] = [0,-2,0,sigmaY_actionNoise]\n aux = [(8,y) for y in range(0,7)]\n for pos in aux:\n actionNoiseParams[pos] = [0,-1,0,sigmaY_actionNoise]\n \n # Agent\n alpha_nStepOPSARSA_1 = 0.1\n gamma_nStepOPSARSA_1 = 1.0\n n_nStepOPSARSA_1 = 1\n \n alpha_nStepOPSARSA_2 = 0.1\n gamma_nStepOPSARSA_2 = 1.0\n n_nStepOPSARSA_2 = 5 \n\n alpha_nStepOPSARSA_3 = 0.05\n gamma_nStepOPSARSA_3 = 1.0\n n_nStepOPSARSA_3 = 10\n \n # Policy\n doUpdateBehaviourPolicy = True\n epsilon_behaviourPolicy = 0.1\n \n env = StochasticGridWorld(sizeX, sizeY, actionNoiseParams=actionNoiseParams, startStates=startStates,\n defaultReward=defaultReward, terminalStates=terminalStates, actionMapping=actionMapping)\n\n env.printEnv()\n\n avg_reward_sums_nStepOPSARSA_1 = np.zeros(nEpisodes)\n avg_reward_sums_nStepOPSARSA_2 = np.zeros(nEpisodes)\n avg_reward_sums_nStepOPSARSA_3 = np.zeros(nEpisodes)\n for idx_experiment in range(1, nExperiments+1):\n \n print(\"Experiment : \", idx_experiment)\n \n agent_nStepOPSARSA_1 = nStepOffPolicySARSA(env.nStates, env.nActions, alpha_nStepOPSARSA_1, gamma_nStepOPSARSA_1, n_nStepOPSARSA_1)\n agent_nStepOPSARSA_2 = nStepOffPolicySARSA(env.nStates, env.nActions, alpha_nStepOPSARSA_2, gamma_nStepOPSARSA_2, n_nStepOPSARSA_2)\n agent_nStepOPSARSA_3 = nStepOffPolicySARSA(env.nStates, env.nActions, alpha_nStepOPSARSA_3, gamma_nStepOPSARSA_3, n_nStepOPSARSA_3)\n \n policy_behaviour = StochasticPolicy(env.nStates, env.nActions, policyUpdateMethod=\"esoft\", epsilon=epsilon_behaviourPolicy) \n reward_sums_nStepOPSARSA_1, evst_nStepOPSARSA_1 = runExperiment(nEpisodes, env, agent_nStepOPSARSA_1, policy_behaviour, doUpdateBehaviourPolicy)\n\n policy_behaviour = StochasticPolicy(env.nStates, env.nActions, policyUpdateMethod=\"esoft\", epsilon=epsilon_behaviourPolicy)\n reward_sums_nStepOPSARSA_2, evst_nStepOPSARSA_2 = runExperiment(nEpisodes, env, agent_nStepOPSARSA_2, policy_behaviour, doUpdateBehaviourPolicy)\n\n policy_behaviour = StochasticPolicy(env.nStates, env.nActions, policyUpdateMethod=\"esoft\", epsilon=epsilon_behaviourPolicy)\n reward_sums_nStepOPSARSA_3, evst_nStepOPSARSA_3 = runExperiment(nEpisodes, env, agent_nStepOPSARSA_3, policy_behaviour, doUpdateBehaviourPolicy)\n \n avg_reward_sums_nStepOPSARSA_1 = avg_reward_sums_nStepOPSARSA_1 + (1.0/idx_experiment)*(reward_sums_nStepOPSARSA_1 - avg_reward_sums_nStepOPSARSA_1)\n avg_reward_sums_nStepOPSARSA_2 = avg_reward_sums_nStepOPSARSA_2 + (1.0/idx_experiment)*(reward_sums_nStepOPSARSA_2 - avg_reward_sums_nStepOPSARSA_2)\n avg_reward_sums_nStepOPSARSA_3 = avg_reward_sums_nStepOPSARSA_3 + (1.0/idx_experiment)*(reward_sums_nStepOPSARSA_3 - avg_reward_sums_nStepOPSARSA_3)\n \n pl.figure()\n pl.plot(evst_nStepOPSARSA_1[:,1],evst_nStepOPSARSA_1[:,0], '-r', label=str(n_nStepOPSARSA_1)+' Step SARSA')\n pl.plot(evst_nStepOPSARSA_2[:,1],evst_nStepOPSARSA_2[:,0], '-g', label=str(n_nStepOPSARSA_2)+' Step SARSA')\n pl.plot(evst_nStepOPSARSA_3[:,1],evst_nStepOPSARSA_3[:,0], '-k', label=str(n_nStepOPSARSA_3)+' Step SARSA')\n pl.xlabel(\"Time steps\")\n pl.ylabel(\"Episodes\")\n pl.legend()\n pl.figure()\n pl.plot(avg_reward_sums_nStepOPSARSA_1, '-r', label=str(n_nStepOPSARSA_1)+' Step SARSA')\n pl.plot(avg_reward_sums_nStepOPSARSA_2, '-g', label=str(n_nStepOPSARSA_2)+' Step SARSA')\n pl.plot(avg_reward_sums_nStepOPSARSA_3, '-k', label=str(n_nStepOPSARSA_3)+' Step SARSA')\n pl.xlabel(\"Episodes\")\n pl.ylabel(\"Sum of reward during episodes\")\n pl.legend()\n pl.show()\n \n agents = [agent_nStepOPSARSA_1, agent_nStepOPSARSA_2, agent_nStepOPSARSA_3]\n for agent in agents:\n print(\"Policy for :\", agent.getName())\n env.printEnv(agent)\n\n for agent in agents:\n input(\"Press any key to simulate agent \"+agent.getName())\n agentHistory = runSimulation(env, agent) \n print(\"Simulation:\", agent.getName()) \n env.render(agentHistory)\n " ]
[ [ "numpy.array", "numpy.zeros" ] ]
rcooke-ast/PYPIT
[ "0cb9c4cb422736b855065a35aefc2bdba6d51dd0" ]
[ "pypeit/tests/test_match.py" ]
[ "\"\"\"\nModule to run tests on sort and arsetup\n\"\"\"\nfrom IPython import embed\n\nimport pytest\n\nimport numpy as np\n\nfrom pypeit.core import framematch\nfrom pypeit.tests.tstutils import dummy_fitstbl\nfrom pypeit.pypmsgs import PypeItError\n\n\[email protected]\ndef fitstbl():\n return dummy_fitstbl()\n\n\ndef test_frame_selection(fitstbl):\n \"\"\" Test that the frame bits are successfully read\n \"\"\"\n # Sort\n assert fitstbl.find_frames('bias')[0]\n assert fitstbl.find_frames('arc')[1]\n assert fitstbl.find_frames('trace')[2]\n assert fitstbl.find_frames('standard')[4]\n assert np.sum(fitstbl.find_frames('science')) == 5\n\n\ndef test_calibration_groups(fitstbl):\n \"\"\"\n Test the frame selection specific to a provided calibration group\n \"\"\"\n calib_ID = 0\n par = fitstbl.spectrograph.default_pypeit_par()\n assert fitstbl.find_frames('arc', calib_ID=calib_ID, index=True)[0] == 1\n assert fitstbl.find_frames('standard', calib_ID=calib_ID, index=True)[0] == 4\n assert fitstbl.find_frames('trace', calib_ID=calib_ID, index=True)[0] == 2\n\n\n# TODO: This doesn't test anything\n#def test_neg_match_science(fitstbl):\n# \"\"\" Test using negative number for calibs\n# \"\"\"\n# par = fitstbl.spectrograph.default_pypeit_par()\n# # Use negative number\n# for ftype in ['arc', 'pixelflat', 'bias']:\n# par['calibrations']['{0}frame'.format(ftype)]['number'] = 1\n# par['calibrations']['traceframe']['number'] = -1\n# fitstbl.match_to_science(par['calibrations'], par['rdx']['calwin'], par['fluxcalib'])\n# assert np.sum(fitstbl.find_frames('trace')) == 2\n\n\n# TODO: Need a function that checks the calibration groups have the\n# correct number of calibration frames\n#def test_match_science_errors(fitstbl):\n# par = fitstbl.spectrograph.default_pypeit_par()\n# par['calibrations']['traceframe']['number'] = 10\n# with pytest.raises(PypeItError):\n# fitstbl.match_to_science(par['calibrations'], par['rdx']['calwin'], par['fluxcalib'])\n\n\ndef test_instr_setup(fitstbl):\n \"\"\" Test instrument setup naming convention\n Tickles most of the arsetup methods\n \"\"\"\n par = fitstbl.spectrograph.default_pypeit_par()\n\n # Check the master key\n assert fitstbl.master_key(0) == 'A_1_DET01'\n # Invalid detector\n with pytest.raises(PypeItError):\n # Shane kast blue doesn't have a second detector\n fitstbl.master_key(0, det=2)\n\n\n# TODO: Need a test that adds a calibration group and checks the result\n# # New calib set\n# # Turn exposure 9 into an arc\n# fitstbl.edit_frame_type(-1, 'arc')\n# fitstbl['sci_ID'][-1] = 2\n# # Turn off other arc\n# fitstbl['sci_ID'][1] = 1 + 4 + 8\n# # Run\n# setupID3, setup_dict = pypsetup.instr_setup(2, 1, fitstbl, setup_dict=setup_dict)\n# assert setupID3 == 'A_01_ab'\n# assert setup_dict['A']['ab']['arc'][0] == 'b009.fits.gz'\n\n\ndef test_exptime():\n exptime = np.array([0, 30, None, 900])\n assert np.array_equal(framematch.check_frame_exptime(exptime, [0,None]),\n np.array([False, True, False, True]))\n assert np.array_equal(framematch.check_frame_exptime(exptime, [None,1000]),\n np.array([True, True, False, True]))\n assert np.array_equal(framematch.check_frame_exptime(exptime, [None,None]),\n np.array([True, True, False, True]))\n assert np.array_equal(framematch.check_frame_exptime(exptime, [None,500]),\n np.array([True, True, False, False]))\n assert np.array_equal(framematch.check_frame_exptime(exptime, [10,20]),\n np.array([False, False, False, False]))\n\n\n" ]
[ [ "numpy.array" ] ]
frandorr/PROBA-V
[ "89c1aa4dfc58d66e7747293f6738fdd4e2ba6e6f" ]
[ "debug/main.py" ]
[ "from trainClass import *\nfrom utils.loss import *\nfrom utils.utils import *\nfrom modelsTF import *\nfrom tensorflow.keras.optimizers import Adam, SGD, Nadam\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.metrics import Mean\nimport tensorflow as tf\nimport numpy as np\nimport logging\nimport os\nimport gc\n\nlogging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)\nlogger = logging.getLogger('__name__')\n\n\ndef main():\n\n # import data\n CLEAN_DATA_DIR = '/home/mark/DataBank/PROBA-V-CHKPT/augmentedPatchesDir'\n band = 'NIR'\n X = np.load(os.path.join(CLEAN_DATA_DIR, f'TRAINpatchesLR_{band}.npy'), allow_pickle=True)\n y = np.load(os.path.join(CLEAN_DATA_DIR, f'TRAINpatchesHR_{band}.npy'), allow_pickle=True)\n\n print(f'Input shape: {X.shape} --------> Output shape: {y.shape}')\n X_train, X_val, y_train, y_val, y_train_mask, y_val_mask = train_test_split(\n X, y, ~y.mask, test_size=0.3, random_state=17)\n\n X_train = tf.convert_to_tensor(X_train, dtype=tf.float32)\n X_val = tf.convert_to_tensor(X_val, dtype=tf.float32)\n y_train = tf.convert_to_tensor(y_train, dtype=tf.float32)\n y_val = tf.convert_to_tensor(y_val, dtype=tf.float32)\n y_train_mask = tf.convert_to_tensor(y_train_mask, dtype=tf.float32)\n y_val_mask = tf.convert_to_tensor(y_val_mask, dtype=tf.float32)\n\n y = [y_train, y_train_mask]\n valData = [X_val, y_val, y_val_mask]\n\n# model = WDSRConv3D(scale=3, numFilters=32, kernelSize=(3, 3, 3), numResBlocks=8,\n# expRate=8, decayRate=0.8, numImgLR=9, patchSizeLR=32, isGrayScale=True)\n with tf.device('/GPU:1'):\n model = WDSRConv3D(scale=3, numFilters=32, kernelSize=(3, 3, 3), numResBlocks=8,\n expRate=8, decayRate=0.8, numImgLR=9, patchSizeLR=38, isGrayScale=True)\n l = Losses()\n trainClass = ModelTrainer(model=model,\n loss=l.shiftCompensatedL1Loss,\n metric=l.shiftCompensatedcPSNR,\n optimizer=Nadam(learning_rate=5e-4),\n ckptDir=f'/home/mark/DataBank/ckpt_{band}_38',\n logDir=f'/home/mark/DataBank/logNewRed_{band}_38')\n del X\n gc.collect()\n\n trainClass.fitTrainData(X_train, y, 64, 10000, 512, valData, 1)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.device", "sklearn.model_selection.train_test_split", "tensorflow.convert_to_tensor", "tensorflow.keras.optimizers.Nadam" ] ]
Featuretools/featuretools
[ "365abd9519d2eec8eec75936644a7b865d4ef40a" ]
[ "featuretools/tests/primitive_tests/test_distancetoholiday_primitive.py" ]
[ "from datetime import datetime\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom featuretools.primitives.standard.datetime_transform_primitives import (\n DistanceToHoliday,\n)\n\n\ndef test_distanceholiday():\n distance_to_holiday = DistanceToHoliday(\"New Year's Day\")\n dates = pd.Series(\n [\n datetime(2010, 1, 1),\n datetime(2012, 5, 31),\n datetime(2017, 7, 31),\n datetime(2020, 12, 31),\n ]\n )\n\n expected = [0, -151, 154, 1]\n output = distance_to_holiday(dates).tolist()\n np.testing.assert_array_equal(output, expected)\n\n\ndef test_holiday_out_of_range():\n date_to_holiday = DistanceToHoliday(\"Boxing Day\", country=\"Canada\")\n\n array = pd.Series(\n [\n datetime(2010, 1, 1),\n datetime(2012, 5, 31),\n datetime(2017, 7, 31),\n datetime(2020, 12, 31),\n ]\n )\n answer = pd.Series([np.nan, 209, 148, np.nan])\n pd.testing.assert_series_equal(date_to_holiday(array), answer, check_names=False)\n\n\ndef test_unknown_country_error():\n error_text = r\"must be one of the available countries.*\"\n with pytest.raises(ValueError, match=error_text):\n DistanceToHoliday(\"Victoria Day\", country=\"UNK\")\n\n\ndef test_unknown_holiday_error():\n error_text = r\"must be one of the available holidays.*\"\n with pytest.raises(ValueError, match=error_text):\n DistanceToHoliday(\"Alteryx Day\")\n\n\ndef test_nat():\n date_to_holiday = DistanceToHoliday(\"New Year's Day\")\n case = pd.Series(\n [\n \"2010-01-01\",\n \"NaT\",\n \"2012-05-31\",\n \"NaT\",\n ]\n ).astype(\"datetime64\")\n answer = [0, np.nan, -151, np.nan]\n given_answer = date_to_holiday(case).astype(\"float\")\n np.testing.assert_array_equal(given_answer, answer)\n\n\ndef test_valid_country():\n distance_to_holiday = DistanceToHoliday(\"Victoria Day\", country=\"Canada\")\n case = pd.Series(\n [\n \"2010-01-01\",\n \"2012-05-31\",\n \"2017-07-31\",\n \"2020-12-31\",\n ]\n ).astype(\"datetime64\")\n answer = [143, -10, -70, 144]\n given_answer = distance_to_holiday(case).astype(\"float\")\n np.testing.assert_array_equal(given_answer, answer)\n\n\ndef test_with_timezone_aware_datetimes():\n df = pd.DataFrame(\n {\n \"non_timezone_aware_with_time\": pd.date_range(\n \"2018-07-03 09:00\", periods=3\n ),\n \"non_timezone_aware_no_time\": pd.date_range(\"2018-07-03\", periods=3),\n \"timezone_aware_with_time\": pd.date_range(\n \"2018-07-03 09:00\", periods=3\n ).tz_localize(tz=\"US/Eastern\"),\n \"timezone_aware_no_time\": pd.date_range(\n \"2018-07-03\", periods=3\n ).tz_localize(tz=\"US/Eastern\"),\n }\n )\n\n distance_to_holiday = DistanceToHoliday(\"Independence Day\", country=\"US\")\n expected = [1, 0, -1]\n for col in df.columns:\n actual = distance_to_holiday(df[col])\n np.testing.assert_array_equal(actual, expected)\n" ]
[ [ "pandas.Series", "pandas.date_range", "numpy.testing.assert_array_equal" ] ]
Agirljustsayhello/nilmtk
[ "bf985f0f637460bd8df3bb1cbf17b81a20303826" ]
[ "nilmtk/disaggregate/hart_85.py" ]
[ "from __future__ import print_function, division\nfrom collections import OrderedDict, deque\nfrom datetime import datetime\nfrom warnings import warn\n\nimport pandas as pd\n\nfrom nilmtk.feature_detectors.cluster import hart85_means_shift_cluster\nfrom nilmtk.feature_detectors.steady_states import (\n find_steady_states_transients)\nfrom nilmtk.timeframe import merge_timeframes, TimeFrame\nfrom nilmtk.disaggregate import Disaggregator\n\n\n# Fix the seed for repeatability of experiments\nSEED = 42\nimport numpy as np\n\nnp.random.seed(SEED)\n\n\nclass MyDeque(deque):\n def popmiddle(self, pos):\n self.rotate(-pos)\n ret = self.popleft()\n self.rotate(pos)\n return ret\n\n\nclass PairBuffer(object):\n \"\"\"\n Attributes:\n * transitionList (list of tuples)\n * matchedPairs (dataframe containing matched pairs of transitions)\n \"\"\"\n\n def __init__(self, buffer_size, min_tolerance, percent_tolerance,\n large_transition, num_measurements):\n \"\"\"\n Parameters\n ----------\n buffer_size: int, optional\n size of the buffer to use for finding edges\n min_tolerance: int, optional\n variance in power draw allowed for pairing a match\n percent_tolerance: float, optional\n if transition is greater than large_transition, then use percent of large_transition\n large_transition: float, optional\n power draw of a Large transition\n num_measurements: int, optional\n 2 if only active power\n 3 if both active and reactive power\n \"\"\"\n # We use a deque here, because it allows us quick access to start and end popping\n # and additionally, we can set a maxlen which drops oldest items. This nicely\n # suits Hart's recomendation that the size should be tunable.\n self._buffer_size = buffer_size\n self._min_tol = min_tolerance\n self._percent_tol = percent_tolerance\n self._large_transition = large_transition\n self.transition_list = MyDeque([], maxlen=self._buffer_size)\n self._num_measurements = num_measurements\n if self._num_measurements == 3:\n # Both active and reactive power is available\n self.pair_columns = ['T1 Time', 'T1 Active', 'T1 Reactive',\n 'T2 Time', 'T2 Active', 'T2 Reactive']\n elif self._num_measurements == 2:\n # Only active power is available\n self.pair_columns = ['T1 Time', 'T1 Active',\n 'T2 Time', 'T2 Active']\n self.matched_pairs = pd.DataFrame(columns=self.pair_columns)\n\n def clean_buffer(self):\n # Remove any matched transactions\n for idx, entry in enumerate(self.transition_list):\n if entry[self._num_measurements]:\n self.transition_list.popmiddle(idx)\n self.clean_buffer()\n break\n # Remove oldest transaction if buffer cleaning didn't remove anything\n # if len(self.transitionList) == self._bufferSize:\n # self.transitionList.popleft()\n\n def add_transition(self, transition):\n # Check transition is as expected.\n assert isinstance(transition, (tuple, list))\n # Check that we have both active and reactive powers.\n assert len(transition) == self._num_measurements\n # Convert as appropriate\n if isinstance(transition, tuple):\n mtransition = list(transition)\n # Add transition to List of transitions (set marker as unpaired)\n mtransition.append(False)\n self.transition_list.append(mtransition)\n # checking for pairs\n # self.pairTransitions()\n # self.cleanBuffer()\n\n def pair_transitions(self):\n \"\"\"\n Hart 85, P 33.\n When searching the working buffer for pairs, the order in which \n entries are examined is very important. If an Appliance has \n on and off several times in succession, there can be many \n pairings between entries in the buffer. The algorithm must not\n allow an 0N transition to match an OFF which occurred at the end \n of a different cycle, so that only ON/OFF pairs which truly belong \n together are paired up. Otherwise the energy consumption of the \n appliance will be greatly overestimated. The most straightforward \n search procedures can make errors of this nature when faced with \n types of transition sequences.\n\n Hart 85, P 32.\n For the two-state load monitor, a pair is defined as two entries\n which meet the following four conditions:\n (1) They are on the same leg, or are both 240 V,\n (2) They are both unmarked, \n (3) The earlier has a positive real power component, and \n (4) When added together, they result in a vector in which the \n absolute value of the real power component is less than 35 \n Watts (or 3.5% of the real power, if the transitions are \n over 1000 W) and the absolute value of the reactive power \n component is less than 35 VAR (or 3.5%).\n\n ... the correct way to search the buffer is to start by checking \n elements which are close together in the buffer, and gradually \n increase the distance. First, adjacent elements are checked for \n pairs which meet all four requirements above; if any are found \n they are processed and marked. Then elements two entries apart \n are checked, then three, and so on, until the first and last \n element are checked...\n\n \"\"\"\n\n tlength = len(self.transition_list)\n pairmatched = False\n if tlength < 2:\n return pairmatched\n\n # Can we reduce the running time of this algorithm?\n # My gut feeling is no, because we can't re-order the list...\n # I wonder if we sort but then check the time... maybe. TO DO\n # (perhaps!).\n\n # Start the element distance at 1, go up to current length of buffer\n for eDistance in range(1, tlength):\n idx = 0\n while idx < tlength - 1:\n # We don't want to go beyond length of array\n compindex = idx + eDistance\n if compindex < tlength:\n val = self.transition_list[idx]\n # val[1] is the active power and\n # val[self._num_measurements] is match status\n if (val[1] > 0) and (val[self._num_measurements] is False):\n compval = self.transition_list[compindex]\n if compval[self._num_measurements] is False:\n # Add the two elements for comparison\n vsum = np.add(\n val[1:self._num_measurements],\n compval[1:self._num_measurements])\n # Set the allowable tolerance for reactive and\n # active\n matchtols = [self._min_tol, self._min_tol]\n for ix in range(1, self._num_measurements):\n matchtols[ix - 1] = self._min_tol if (max(np.fabs([val[ix], compval[ix]]))\n < self._large_transition) else (self._percent_tol\n * max(\n np.fabs([val[ix], compval[ix]])))\n if self._num_measurements == 3:\n condition = (np.fabs(vsum[0]) < matchtols[0]) and (\n np.fabs(vsum[1]) < matchtols[1])\n\n elif self._num_measurements == 2:\n condition = np.fabs(vsum[0]) < matchtols[0]\n\n if condition:\n # Mark the transition as complete\n self.transition_list[idx][\n self._num_measurements] = True\n self.transition_list[compindex][\n self._num_measurements] = True\n pairmatched = True\n\n # Append the OFF transition to the ON. Add to\n # dataframe.\n matchedpair = val[\n 0:self._num_measurements] + compval[0:self._num_measurements]\n self.matched_pairs.loc[\n len(self.matched_pairs)] = matchedpair\n\n # Iterate Index\n idx += 1\n else:\n break\n\n return pairmatched\n\n\nclass Hart85(Disaggregator):\n \"\"\"1 or 2 dimensional Hart 1985 algorithm.\n\n Attributes\n ----------\n model : dict\n Each key is either the instance integer for an ElecMeter,\n or a tuple of instances for a MeterGroup.\n Each value is a sorted list of power in different states.\n \"\"\"\n\n def __init__(self):\n self.model = {}\n self.MODEL_NAME = \"Hart85\"\n\n def train(self, metergroup, cols=[('power', 'active')],\n buffer_size=20, noise_level=70, state_threshold=15,\n min_tolerance=100, percent_tolerance=0.035,\n large_transition=1000, **kwargs):\n \"\"\"\n Train using Hart85. Places the learnt model in `model` attribute.\n\n Parameters\n ----------\n metergroup : a nilmtk.MeterGroup object\n cols: nilmtk.Measurement, should be one of the following\n [('power','active')]\n [('power','apparent')]\n [('power','reactive')]\n [('power','active'), ('power', 'reactive')]\n buffer_size: int, optional\n size of the buffer to use for finding edges\n min_tolerance: int, optional\n variance in power draw allowed for pairing a match\n percent_tolerance: float, optional\n if transition is greater than large_transition,\n then use percent of large_transition\n large_transition: float, optional\n power draw of a Large transition\n \"\"\"\n self.cols = cols\n self.state_threshold = state_threshold\n self.noise_level = noise_level\n [self.steady_states, self.transients] = find_steady_states_transients(\n metergroup, cols, noise_level, state_threshold, **kwargs)\n self.pair_df = self.pair(\n buffer_size, min_tolerance, percent_tolerance, large_transition)\n self.centroids = hart85_means_shift_cluster(self.pair_df, cols)\n\n def pair(self, buffer_size, min_tolerance, percent_tolerance,\n large_transition):\n subset = list(self.transients.itertuples())\n buffer = PairBuffer(\n min_tolerance=min_tolerance, buffer_size=buffer_size,\n percent_tolerance=percent_tolerance,\n large_transition=large_transition,\n num_measurements=len(self.transients.columns) + 1)\n for s in subset:\n # if len(buffer.transitionList) < bsize\n if len(buffer.transition_list) == buffer_size:\n buffer.clean_buffer()\n buffer.add_transition(s)\n buffer.pair_transitions()\n return buffer.matched_pairs\n\n def disaggregate_chunk(self, chunk, prev, transients):\n \"\"\"\n Parameters\n ----------\n chunk : pd.DataFrame\n mains power\n prev\n transients : returned by find_steady_state_transients\n\n Returns\n -------\n states : pd.DataFrame\n with same index as `chunk`.\n \"\"\"\n\n states = pd.DataFrame(\n -1, index=chunk.index, columns=self.centroids.index.values)\n for transient_tuple in transients.itertuples():\n if transient_tuple[0] < chunk.index[0]:\n # Transient occurs before chunk has started; do nothing\n pass\n elif transient_tuple[0] > chunk.index[-1]:\n # Transient occurs after chunk has ended; do nothing\n pass\n else:\n # Absolute value of transient\n abs_value = np.abs(transient_tuple[1:])\n positive = transient_tuple[1] > 0\n abs_value_transient_minus_centroid = pd.DataFrame(\n (self.centroids - abs_value).abs())\n if len(transient_tuple) == 2:\n # 1d data\n index_least_delta = (\n abs_value_transient_minus_centroid.idxmin().values[0])\n else:\n # 2d data.\n # Need to find absolute value before computing minimum\n columns = abs_value_transient_minus_centroid.columns\n abs_value_transient_minus_centroid[\"multidim\"] = (\n abs_value_transient_minus_centroid[columns[0]] ** 2\n +\n abs_value_transient_minus_centroid[columns[1]] ** 2)\n index_least_delta = (\n abs_value_transient_minus_centroid[\"multidim\"].argmin())\n if positive:\n # Turned on\n states.loc[transient_tuple[0]][index_least_delta] = 1\n else:\n # Turned off\n states.loc[transient_tuple[0]][index_least_delta] = 0\n prev = states.iloc[-1].to_dict()\n power_chunk_dict = self.assign_power_from_states(states, prev)\n return pd.DataFrame(power_chunk_dict, index=chunk.index)\n\n def assign_power_from_states(self, states_chunk, prev):\n di = {}\n ndim = len(self.centroids.columns)\n for appliance in states_chunk.columns:\n values = states_chunk[[appliance]].values.flatten()\n if ndim == 1:\n power = np.zeros(len(values), dtype=int)\n else:\n power = np.zeros((len(values), 2), dtype=int)\n # on = False\n i = 0\n while i < len(values) - 1:\n if values[i] == 1:\n # print(\"A\", values[i], i)\n on = True\n i = i + 1\n power[i] = self.centroids.ix[appliance].values\n while values[i] != 0 and i < len(values) - 1:\n # print(\"B\", values[i], i)\n power[i] = self.centroids.ix[appliance].values\n i = i + 1\n elif values[i] == 0:\n # print(\"C\", values[i], i)\n on = False\n i = i + 1\n power[i] = 0\n while values[i] != 1 and i < len(values) - 1:\n # print(\"D\", values[i], i)\n if ndim == 1:\n power[i] = 0\n else:\n power[i] = [0, 0]\n i = i + 1\n else:\n # print(\"E\", values[i], i)\n # Unknown state. If previously we know about this\n # appliance's state, we can\n # use that. Else, it defaults to 0\n if prev[appliance] == -1 or prev[appliance] == 0:\n # print(\"F\", values[i], i)\n on = False\n power[i] = 0\n while values[i] != 1 and i < len(values) - 1:\n # print(\"G\", values[i], i)\n if ndim == 1:\n power[i] = 0\n else:\n power[i] = [0, 0]\n i = i + 1\n else:\n # print(\"H\", values[i], i)\n on = True\n power[i] = self.centroids.ix[appliance].values\n while values[i] != 0 and i < len(values) - 1:\n # print(\"I\", values[i], i)\n power[i] = self.centroids.ix[appliance].values\n i = i + 1\n\n di[appliance] = power\n # print(power.sum())\n return di\n\n def disaggregate(self, mains, output_datastore, **load_kwargs):\n \"\"\"Disaggregate mains according to the model learnt previously.\n\n Parameters\n ----------\n mains : nilmtk.ElecMeter or nilmtk.MeterGroup\n output_datastore : instance of nilmtk.DataStore subclass\n For storing power predictions from disaggregation algorithm.\n sample_period : number, optional\n The desired sample period in seconds.\n **load_kwargs : key word arguments\n Passed to `mains.power_series(**kwargs)`\n \"\"\"\n load_kwargs = self._pre_disaggregation_checks(load_kwargs)\n\n load_kwargs.setdefault('sample_period', 60)\n load_kwargs.setdefault('sections', mains.good_sections())\n\n timeframes = []\n building_path = '/building{}'.format(mains.building())\n mains_data_location = building_path + '/elec/meter1'\n data_is_available = False\n\n [_, transients] = find_steady_states_transients(\n mains, cols=self.cols, state_threshold=self.state_threshold,\n noise_level=self.noise_level, **load_kwargs)\n\n # For now ignoring the first transient\n # transients = transients[1:]\n\n # Initially all appliances/meters are in unknown state (denoted by -1)\n prev = OrderedDict()\n learnt_meters = self.centroids.index.values\n for meter in learnt_meters:\n prev[meter] = -1\n\n timeframes = []\n # Now iterating over mains data and disaggregating chunk by chunk\n for chunk in mains.power_series(**load_kwargs):\n # Record metadata\n timeframes.append(chunk.timeframe)\n measurement = chunk.name\n power_df = self.disaggregate_chunk(\n chunk, prev, transients)\n\n cols = pd.MultiIndex.from_tuples([chunk.name])\n\n for meter in learnt_meters:\n data_is_available = True\n df = power_df[[meter]]\n df.columns = cols\n key = '{}/elec/meter{:d}'.format(building_path, meter + 2)\n output_datastore.append(key, df)\n\n output_datastore.append(key=mains_data_location,\n value=pd.DataFrame(chunk, columns=cols))\n\n if data_is_available:\n self._save_metadata_for_disaggregation(\n output_datastore=output_datastore,\n sample_period=load_kwargs['sample_period'],\n measurement=measurement,\n timeframes=timeframes,\n building=mains.building(),\n supervised=False,\n num_meters=len(self.centroids)\n )\n\n \"\"\"\n def export_model(self, filename):\n model_copy = {}\n for appliance, appliance_states in self.model.iteritems():\n model_copy[\n \"{}_{}\".format(appliance.name, appliance.instance)] = appliance_states\n j = json.dumps(model_copy)\n with open(filename, 'w+') as f:\n f.write(j)\n\n def import_model(self, filename):\n with open(filename, 'r') as f:\n temp = json.loads(f.read())\n for appliance, centroids in temp.iteritems():\n appliance_name = appliance.split(\"_\")[0].encode(\"ascii\")\n appliance_instance = int(appliance.split(\"_\")[1])\n appliance_name_instance = ApplianceID(\n appliance_name, appliance_instance)\n self.model[appliance_name_instance] = centroids\n \"\"\"\n" ]
[ [ "numpy.fabs", "pandas.DataFrame", "numpy.abs", "numpy.random.seed", "numpy.add", "pandas.MultiIndex.from_tuples" ] ]
anderlli0053/SourceIO
[ "3c0c4839939ce698439987ac52154f89ee2f5341" ]
[ "library/goldsrc/mdl_v4/structs/model.py" ]
[ "from typing import List\n\nimport numpy as np\n\nfrom .mesh import StudioMesh\nfrom .....library.utils.byte_io_mdl import ByteIO\n\n\nclass StudioModel:\n vertex_dtype = np.dtype([\n ('id', np.uint32, (1,)),\n ('pos', np.float32, (3,)),\n ])\n\n def __init__(self):\n self.name = ''\n self.unk_1 = 0\n self.unk_2 = 0\n self.bounding_radius = 0.0\n self.vertex_count = 0\n self.normal_count = 0\n self.mesh_count = 0\n\n self._vertices = np.array([])\n self._normals = np.array([])\n self.meshes: List[StudioMesh] = []\n\n @property\n def bone_vertex_info(self):\n return self._vertices['id'].flatten()\n\n @property\n def bone_normal_info(self):\n return self._normals['id'].flatten()\n\n @property\n def vertices(self):\n return self._vertices['pos']\n\n @property\n def normals(self):\n return self._normals['pos']\n\n def read(self, reader: ByteIO):\n self.name = reader.read_ascii_string(32)\n (self.unk_1, self.unk_2,\n self.bounding_radius,\n self.vertex_count,\n self.normal_count,\n self.mesh_count,\n ) = reader.read_fmt('2if3i')\n\n self._vertices = np.frombuffer(reader.read(16 * self.vertex_count), self.vertex_dtype)\n self._normals = np.frombuffer(reader.read(16 * self.normal_count), self.vertex_dtype)\n for _ in range(self.mesh_count):\n mesh = StudioMesh()\n mesh.read(reader)\n self.meshes.append(mesh)\n" ]
[ [ "numpy.array", "numpy.dtype" ] ]
shuu-tatsu/subwordsLM
[ "baae8657e3cd2957689b2365155156cd9d18cad8" ]
[ "flair/models/language_model.py" ]
[ "import torch.nn as nn\nimport torch\nimport math\nfrom torch.autograd import Variable\nfrom typing import Dict, List\nfrom flair.data import Dictionary\n\n\nclass LanguageModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self,\n dictionary: Dictionary,\n is_forward_lm: bool,\n hidden_size: int,\n nlayers: int,\n embedding_size: int = 100,\n nout=None,\n dropout=0.5):\n\n super(LanguageModel, self).__init__()\n\n self.dictionary = dictionary\n self.is_forward_lm: bool = is_forward_lm\n\n self.dropout = dropout\n self.hidden_size = hidden_size\n self.embedding_size = embedding_size\n self.nlayers = nlayers\n\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(len(dictionary), embedding_size)\n\n if nlayers == 1:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers)\n else:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers, dropout=dropout)\n\n self.hidden = None\n\n self.nout = nout\n if nout is not None:\n self.proj = nn.Linear(hidden_size, nout)\n self.initialize(self.proj.weight)\n self.decoder = nn.Linear(nout, len(dictionary))\n else:\n self.proj = None\n self.decoder = nn.Linear(hidden_size, len(dictionary))\n\n self.init_weights()\n\n # auto-spawn on GPU if available\n if torch.cuda.is_available():\n self.cuda()\n\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.decoder.bias.data.fill_(0)\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def set_hidden(self, hidden):\n self.hidden = hidden\n\n def forward(self, input, hidden, ordered_sequence_lengths=None):\n #import pdb; pdb.set_trace()\n encoded = self.encoder(input)\n emb = self.drop(encoded)\n\n output, hidden = self.rnn(emb, hidden)\n\n if self.proj is not None:\n output = self.proj(output)\n\n output = self.drop(output)\n\n decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))\n\n return decoded.view(output.size(0), output.size(1), decoded.size(1)), output, hidden\n\n def init_hidden(self, bsz):\n #import pdb; pdb.set_trace()\n weight = next(self.parameters()).data\n return (Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()),\n Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()))\n\n def get_representation(self, strings: List[str], detach_from_lm=True):\n\n sequences_as_char_indices: List[List[int]] = []\n for string in strings:\n #char_indices = [self.dictionary.get_idx_for_item(char) for char in string]\n sub_tok = string.split('_') #add\n char_indices = [self.dictionary.get_idx_for_item(sub) for sub in sub_tok] #add\n sequences_as_char_indices.append(char_indices)\n\n batch = Variable(torch.LongTensor(sequences_as_char_indices).transpose(0, 1))\n\n if torch.cuda.is_available():\n batch = batch.cuda()\n\n hidden = self.init_hidden(len(strings))\n prediction, rnn_output, hidden = self.forward(batch, hidden)\n\n if detach_from_lm: rnn_output = self.repackage_hidden(rnn_output)\n\n return rnn_output\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Variables, to detach them from their history.\"\"\"\n if type(h) == torch.Tensor:\n return Variable(h.data)\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initialize(self, matrix):\n in_, out_ = matrix.size()\n stdv = math.sqrt(3. / (in_ + out_))\n matrix.data.uniform_(-stdv, stdv)\n\n @classmethod\n def load_language_model(cls, model_file):\n state = torch.load(model_file)\n model = LanguageModel(state['dictionary'],\n state['is_forward_lm'],\n state['hidden_size'],\n state['nlayers'],\n state['embedding_size'],\n state['nout'],\n state['dropout'])\n model.load_state_dict(state['state_dict'])\n model.eval()\n if torch.cuda.is_available():\n model.cuda()\n return model\n\n def save(self, file):\n model_state = {\n 'state_dict': self.state_dict(),\n 'dictionary': self.dictionary,\n 'is_forward_lm': self.is_forward_lm,\n 'hidden_size': self.hidden_size,\n 'nlayers': self.nlayers,\n 'embedding_size': self.embedding_size,\n 'nout': self.nout,\n 'dropout': self.dropout\n }\n torch.save(model_state, file, pickle_protocol=4)\n" ]
[ [ "torch.nn.LSTM", "torch.nn.Linear", "torch.load", "torch.save", "torch.autograd.Variable", "torch.cuda.is_available", "torch.LongTensor", "torch.nn.Dropout" ] ]
Santhanalakshmimano/SpeedBump_detection_usingCV
[ "7b68f260cf1351d757983a48c5a62e063df807c9", "7b68f260cf1351d757983a48c5a62e063df807c9" ]
[ "research/object_detection/anchor_generators/multiple_grid_anchor_generator.py", "research/object_detection/dataset_tools/create_oid_tf_record.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Generates grid anchors on the fly corresponding to multiple CNN layers.\n\nGenerates grid anchors on the fly corresponding to multiple CNN layers as\ndescribed in:\n\"SSD: Single Shot MultiBox Detector\"\nWei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,\nCheng-Yang Fu, Alexander C. Berg\n(see Section 2.2: Choosing scales and aspect ratios for default boxes)\n\"\"\"\n\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom anchor_generators import grid_anchor_generator\nfrom core import anchor_generator\nfrom core import box_list_ops\n\n\nclass MultipleGridAnchorGenerator(anchor_generator.AnchorGenerator):\n \"\"\"Generate a grid of anchors for multiple CNN layers.\"\"\"\n\n def __init__(self,\n box_specs_list,\n base_anchor_size=None,\n anchor_strides=None,\n anchor_offsets=None,\n clip_window=None):\n \"\"\"Constructs a MultipleGridAnchorGenerator.\n\n To construct anchors, at multiple grid resolutions, one must provide a\n list of feature_map_shape_list (e.g., [(8, 8), (4, 4)]), and for each grid\n size, a corresponding list of (scale, aspect ratio) box specifications.\n\n For example:\n box_specs_list = [[(.1, 1.0), (.1, 2.0)], # for 8x8 grid\n [(.2, 1.0), (.3, 1.0), (.2, 2.0)]] # for 4x4 grid\n\n To support the fully convolutional setting, we pass grid sizes in at\n generation time, while scale and aspect ratios are fixed at construction\n time.\n\n Args:\n box_specs_list: list of list of (scale, aspect ratio) pairs with the\n outside list having the same number of entries as feature_map_shape_list\n (which is passed in at generation time).\n base_anchor_size: base anchor size as [height, width]\n (length-2 float tensor, default=[1.0, 1.0]).\n The height and width values are normalized to the\n minimum dimension of the input height and width, so that\n when the base anchor height equals the base anchor\n width, the resulting anchor is square even if the input\n image is not square.\n anchor_strides: list of pairs of strides in pixels (in y and x directions\n respectively). For example, setting anchor_strides=[(25, 25), (50, 50)]\n means that we want the anchors corresponding to the first layer to be\n strided by 25 pixels and those in the second layer to be strided by 50\n pixels in both y and x directions. If anchor_strides=None, they are set\n to be the reciprocal of the corresponding feature map shapes.\n anchor_offsets: list of pairs of offsets in pixels (in y and x directions\n respectively). The offset specifies where we want the center of the\n (0, 0)-th anchor to lie for each layer. For example, setting\n anchor_offsets=[(10, 10), (20, 20)]) means that we want the\n (0, 0)-th anchor of the first layer to lie at (10, 10) in pixel space\n and likewise that we want the (0, 0)-th anchor of the second layer to\n lie at (25, 25) in pixel space. If anchor_offsets=None, then they are\n set to be half of the corresponding anchor stride.\n clip_window: a tensor of shape [4] specifying a window to which all\n anchors should be clipped. If clip_window is None, then no clipping\n is performed.\n\n Raises:\n ValueError: if box_specs_list is not a list of list of pairs\n ValueError: if clip_window is not either None or a tensor of shape [4]\n \"\"\"\n if isinstance(box_specs_list, list) and all(\n [isinstance(list_item, list) for list_item in box_specs_list]):\n self._box_specs = box_specs_list\n else:\n raise ValueError('box_specs_list is expected to be a '\n 'list of lists of pairs')\n if base_anchor_size is None:\n base_anchor_size = tf.constant([256, 256], dtype=tf.float32)\n self._base_anchor_size = base_anchor_size\n self._anchor_strides = anchor_strides\n self._anchor_offsets = anchor_offsets\n if clip_window is not None and clip_window.get_shape().as_list() != [4]:\n raise ValueError('clip_window must either be None or a shape [4] tensor')\n self._clip_window = clip_window\n self._scales = []\n self._aspect_ratios = []\n for box_spec in self._box_specs:\n if not all([isinstance(entry, tuple) and len(entry) == 2\n for entry in box_spec]):\n raise ValueError('box_specs_list is expected to be a '\n 'list of lists of pairs')\n scales, aspect_ratios = zip(*box_spec)\n self._scales.append(scales)\n self._aspect_ratios.append(aspect_ratios)\n\n for arg, arg_name in zip([self._anchor_strides, self._anchor_offsets],\n ['anchor_strides', 'anchor_offsets']):\n if arg and not (isinstance(arg, list) and\n len(arg) == len(self._box_specs)):\n raise ValueError('%s must be a list with the same length '\n 'as self._box_specs' % arg_name)\n if arg and not all([\n isinstance(list_item, tuple) and len(list_item) == 2\n for list_item in arg\n ]):\n raise ValueError('%s must be a list of pairs.' % arg_name)\n\n def name_scope(self):\n return 'MultipleGridAnchorGenerator'\n\n def num_anchors_per_location(self):\n \"\"\"Returns the number of anchors per spatial location.\n\n Returns:\n a list of integers, one for each expected feature map to be passed to\n the Generate function.\n \"\"\"\n return [len(box_specs) for box_specs in self._box_specs]\n\n def _generate(self, feature_map_shape_list, im_height=1, im_width=1):\n \"\"\"Generates a collection of bounding boxes to be used as anchors.\n\n The number of anchors generated for a single grid with shape MxM where we\n place k boxes over each grid center is k*M^2 and thus the total number of\n anchors is the sum over all grids. In our box_specs_list example\n (see the constructor docstring), we would place two boxes over each grid\n point on an 8x8 grid and three boxes over each grid point on a 4x4 grid and\n thus end up with 2*8^2 + 3*4^2 = 176 anchors in total. The layout of the\n output anchors follows the order of how the grid sizes and box_specs are\n specified (with box_spec index varying the fastest, followed by width\n index, then height index, then grid index).\n\n Args:\n feature_map_shape_list: list of pairs of convnet layer resolutions in the\n format [(height_0, width_0), (height_1, width_1), ...]. For example,\n setting feature_map_shape_list=[(8, 8), (7, 7)] asks for anchors that\n correspond to an 8x8 layer followed by a 7x7 layer.\n im_height: the height of the image to generate the grid for. If both\n im_height and im_width are 1, the generated anchors default to\n absolute coordinates, otherwise normalized coordinates are produced.\n im_width: the width of the image to generate the grid for. If both\n im_height and im_width are 1, the generated anchors default to\n absolute coordinates, otherwise normalized coordinates are produced.\n\n Returns:\n boxes_list: a list of BoxLists each holding anchor boxes corresponding to\n the input feature map shapes.\n\n Raises:\n ValueError: if feature_map_shape_list, box_specs_list do not have the same\n length.\n ValueError: if feature_map_shape_list does not consist of pairs of\n integers\n \"\"\"\n if not (isinstance(feature_map_shape_list, list)\n and len(feature_map_shape_list) == len(self._box_specs)):\n raise ValueError('feature_map_shape_list must be a list with the same '\n 'length as self._box_specs')\n if not all([isinstance(list_item, tuple) and len(list_item) == 2\n for list_item in feature_map_shape_list]):\n raise ValueError('feature_map_shape_list must be a list of pairs.')\n\n im_height = tf.to_float(im_height)\n im_width = tf.to_float(im_width)\n\n if not self._anchor_strides:\n anchor_strides = [(1.0 / tf.to_float(pair[0]), 1.0 / tf.to_float(pair[1]))\n for pair in feature_map_shape_list]\n else:\n anchor_strides = [(tf.to_float(stride[0]) / im_height,\n tf.to_float(stride[1]) / im_width)\n for stride in self._anchor_strides]\n if not self._anchor_offsets:\n anchor_offsets = [(0.5 * stride[0], 0.5 * stride[1])\n for stride in anchor_strides]\n else:\n anchor_offsets = [(tf.to_float(offset[0]) / im_height,\n tf.to_float(offset[1]) / im_width)\n for offset in self._anchor_offsets]\n\n for arg, arg_name in zip([anchor_strides, anchor_offsets],\n ['anchor_strides', 'anchor_offsets']):\n if not (isinstance(arg, list) and len(arg) == len(self._box_specs)):\n raise ValueError('%s must be a list with the same length '\n 'as self._box_specs' % arg_name)\n if not all([isinstance(list_item, tuple) and len(list_item) == 2\n for list_item in arg]):\n raise ValueError('%s must be a list of pairs.' % arg_name)\n\n anchor_grid_list = []\n min_im_shape = tf.minimum(im_height, im_width)\n scale_height = min_im_shape / im_height\n scale_width = min_im_shape / im_width\n base_anchor_size = [\n scale_height * self._base_anchor_size[0],\n scale_width * self._base_anchor_size[1]\n ]\n for feature_map_index, (grid_size, scales, aspect_ratios, stride,\n offset) in enumerate(\n zip(feature_map_shape_list, self._scales,\n self._aspect_ratios, anchor_strides,\n anchor_offsets)):\n tiled_anchors = grid_anchor_generator.tile_anchors(\n grid_height=grid_size[0],\n grid_width=grid_size[1],\n scales=scales,\n aspect_ratios=aspect_ratios,\n base_anchor_size=base_anchor_size,\n anchor_stride=stride,\n anchor_offset=offset)\n if self._clip_window is not None:\n tiled_anchors = box_list_ops.clip_to_window(\n tiled_anchors, self._clip_window, filter_nonoverlapping=False)\n num_anchors_in_layer = tiled_anchors.num_boxes_static()\n if num_anchors_in_layer is None:\n num_anchors_in_layer = tiled_anchors.num_boxes()\n anchor_indices = feature_map_index * tf.ones([num_anchors_in_layer])\n tiled_anchors.add_field('feature_map_index', anchor_indices)\n anchor_grid_list.append(tiled_anchors)\n\n return anchor_grid_list\n\n\ndef create_ssd_anchors(num_layers=6,\n min_scale=0.2,\n max_scale=0.95,\n scales=None,\n aspect_ratios=(1.0, 2.0, 3.0, 1.0 / 2, 1.0 / 3),\n interpolated_scale_aspect_ratio=1.0,\n base_anchor_size=None,\n anchor_strides=None,\n anchor_offsets=None,\n reduce_boxes_in_lowest_layer=True):\n \"\"\"Creates MultipleGridAnchorGenerator for SSD anchors.\n\n This function instantiates a MultipleGridAnchorGenerator that reproduces\n ``default box`` construction proposed by Liu et al in the SSD paper.\n See Section 2.2 for details. Grid sizes are assumed to be passed in\n at generation time from finest resolution to coarsest resolution --- this is\n used to (linearly) interpolate scales of anchor boxes corresponding to the\n intermediate grid sizes.\n\n Anchors that are returned by calling the `generate` method on the returned\n MultipleGridAnchorGenerator object are always in normalized coordinates\n and clipped to the unit square: (i.e. all coordinates lie in [0, 1]x[0, 1]).\n\n Args:\n num_layers: integer number of grid layers to create anchors for (actual\n grid sizes passed in at generation time)\n min_scale: scale of anchors corresponding to finest resolution (float)\n max_scale: scale of anchors corresponding to coarsest resolution (float)\n scales: As list of anchor scales to use. When not None and not empty,\n min_scale and max_scale are not used.\n aspect_ratios: list or tuple of (float) aspect ratios to place on each\n grid point.\n interpolated_scale_aspect_ratio: An additional anchor is added with this\n aspect ratio and a scale interpolated between the scale for a layer\n and the scale for the next layer (1.0 for the last layer).\n This anchor is not included if this value is 0.\n base_anchor_size: base anchor size as [height, width].\n The height and width values are normalized to the minimum dimension of the\n input height and width, so that when the base anchor height equals the\n base anchor width, the resulting anchor is square even if the input image\n is not square.\n anchor_strides: list of pairs of strides in pixels (in y and x directions\n respectively). For example, setting anchor_strides=[(25, 25), (50, 50)]\n means that we want the anchors corresponding to the first layer to be\n strided by 25 pixels and those in the second layer to be strided by 50\n pixels in both y and x directions. If anchor_strides=None, they are set to\n be the reciprocal of the corresponding feature map shapes.\n anchor_offsets: list of pairs of offsets in pixels (in y and x directions\n respectively). The offset specifies where we want the center of the\n (0, 0)-th anchor to lie for each layer. For example, setting\n anchor_offsets=[(10, 10), (20, 20)]) means that we want the\n (0, 0)-th anchor of the first layer to lie at (10, 10) in pixel space\n and likewise that we want the (0, 0)-th anchor of the second layer to lie\n at (25, 25) in pixel space. If anchor_offsets=None, then they are set to\n be half of the corresponding anchor stride.\n reduce_boxes_in_lowest_layer: a boolean to indicate whether the fixed 3\n boxes per location is used in the lowest layer.\n\n Returns:\n a MultipleGridAnchorGenerator\n \"\"\"\n if base_anchor_size is None:\n base_anchor_size = [1.0, 1.0]\n base_anchor_size = tf.constant(base_anchor_size, dtype=tf.float32)\n box_specs_list = []\n if scales is None or not scales:\n scales = [min_scale + (max_scale - min_scale) * i / (num_layers - 1)\n for i in range(num_layers)] + [1.0]\n else:\n # Add 1.0 to the end, which will only be used in scale_next below and used\n # for computing an interpolated scale for the largest scale in the list.\n scales += [1.0]\n\n for layer, scale, scale_next in zip(\n range(num_layers), scales[:-1], scales[1:]):\n layer_box_specs = []\n if layer == 0 and reduce_boxes_in_lowest_layer:\n layer_box_specs = [(0.1, 1.0), (scale, 2.0), (scale, 0.5)]\n else:\n for aspect_ratio in aspect_ratios:\n layer_box_specs.append((scale, aspect_ratio))\n # Add one more anchor, with a scale between the current scale, and the\n # scale for the next layer, with a specified aspect ratio (1.0 by\n # default).\n if interpolated_scale_aspect_ratio > 0.0:\n layer_box_specs.append((np.sqrt(scale*scale_next),\n interpolated_scale_aspect_ratio))\n box_specs_list.append(layer_box_specs)\n\n return MultipleGridAnchorGenerator(box_specs_list, base_anchor_size,\n anchor_strides, anchor_offsets)\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Creates TFRecords of Open Images dataset for object detection.\n\nExample usage:\n python object_detection/dataset_tools/create_oid_tf_record.py \\\n --input_box_annotations_csv=/path/to/input/annotations-human-bbox.csv \\\n --input_image_label_annotations_csv=/path/to/input/annotations-label.csv \\\n --input_images_directory=/path/to/input/image_pixels_directory \\\n --input_label_map=/path/to/input/labels_bbox_545.labelmap \\\n --output_tf_record_path_prefix=/path/to/output/prefix.tfrecord\n\nCSVs with bounding box annotations and image metadata (including the image URLs)\ncan be downloaded from the Open Images GitHub repository:\nhttps://github.com/openimages/dataset\n\nThis script will include every image found in the input_images_directory in the\noutput TFRecord, even if the image has no corresponding bounding box annotations\nin the input_annotations_csv. If input_image_label_annotations_csv is specified,\nit will add image-level labels as well. Note that the information of whether a\nlabel is positivelly or negativelly verified is NOT added to tfrecord.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport contextlib2\nimport pandas as pd\nimport tensorflow as tf\n\nfrom dataset_tools import oid_tfrecord_creation\nfrom dataset_tools import tf_record_creation_util\nfrom utils import label_map_util\n\ntf.flags.DEFINE_string('input_box_annotations_csv', None,\n 'Path to CSV containing image bounding box annotations')\ntf.flags.DEFINE_string('input_images_directory', None,\n 'Directory containing the image pixels '\n 'downloaded from the OpenImages GitHub repository.')\ntf.flags.DEFINE_string('input_image_label_annotations_csv', None,\n 'Path to CSV containing image-level labels annotations')\ntf.flags.DEFINE_string('input_label_map', None, 'Path to the label map proto')\ntf.flags.DEFINE_string(\n 'output_tf_record_path_prefix', None,\n 'Path to the output TFRecord. The shard index and the number of shards '\n 'will be appended for each output shard.')\ntf.flags.DEFINE_integer('num_shards', 100, 'Number of TFRecord shards')\n\nFLAGS = tf.flags.FLAGS\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n required_flags = [\n 'input_box_annotations_csv', 'input_images_directory', 'input_label_map',\n 'output_tf_record_path_prefix'\n ]\n for flag_name in required_flags:\n if not getattr(FLAGS, flag_name):\n raise ValueError('Flag --{} is required'.format(flag_name))\n\n label_map = label_map_util.get_label_map_dict(FLAGS.input_label_map)\n all_box_annotations = pd.read_csv(FLAGS.input_box_annotations_csv)\n if FLAGS.input_image_label_annotations_csv:\n all_label_annotations = pd.read_csv(FLAGS.input_image_label_annotations_csv)\n all_label_annotations.rename(\n columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True)\n else:\n all_label_annotations = None\n all_images = tf.gfile.Glob(\n os.path.join(FLAGS.input_images_directory, '*.jpg'))\n all_image_ids = [os.path.splitext(os.path.basename(v))[0] for v in all_images]\n all_image_ids = pd.DataFrame({'ImageID': all_image_ids})\n all_annotations = pd.concat(\n [all_box_annotations, all_image_ids, all_label_annotations])\n\n tf.logging.log(tf.logging.INFO, 'Found %d images...', len(all_image_ids))\n\n with contextlib2.ExitStack() as tf_record_close_stack:\n output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(\n tf_record_close_stack, FLAGS.output_tf_record_path_prefix,\n FLAGS.num_shards)\n\n for counter, image_data in enumerate(all_annotations.groupby('ImageID')):\n tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000,\n counter)\n\n image_id, image_annotations = image_data\n # In OID image file names are formed by appending \".jpg\" to the image ID.\n image_path = os.path.join(FLAGS.input_images_directory, image_id + '.jpg')\n with tf.gfile.Open(image_path) as image_file:\n encoded_image = image_file.read()\n\n tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(\n image_annotations, label_map, encoded_image)\n if tf_example:\n shard_idx = int(image_id, 16) % FLAGS.num_shards\n output_tfrecords[shard_idx].write(tf_example.SerializeToString())\n\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "tensorflow.minimum", "tensorflow.to_float", "tensorflow.ones", "numpy.sqrt", "tensorflow.constant" ], [ "tensorflow.flags.DEFINE_integer", "tensorflow.app.run", "pandas.read_csv", "tensorflow.logging.set_verbosity", "pandas.DataFrame", "tensorflow.logging.log_every_n", "pandas.concat", "tensorflow.flags.DEFINE_string", "tensorflow.gfile.Open" ] ]
FrancisTembo/tensorflow-models
[ "042f74690d0cf412cb6b7fc19f4a41afdf547905" ]
[ "syntaxnet/dragnn/python/wrapped_units.py" ]
[ "\"\"\"Network units wrapping TensorFlows' tf.contrib.rnn cells.\n\nPlease put all wrapping logic for tf.contrib.rnn in this module; this will help\ncollect common subroutines that prove useful.\n\"\"\"\n\nimport abc\n\nimport tensorflow as tf\n\nfrom dragnn.python import network_units as dragnn\nfrom syntaxnet.util import check\n\n\nclass BaseLSTMNetwork(dragnn.NetworkUnitInterface):\n \"\"\"Base class for wrapped LSTM networks.\n\n This LSTM network unit supports multiple layers with layer normalization.\n Because it is imported from tf.contrib.rnn, we need to capture the created\n variables during initialization time.\n\n Layers:\n ...subclass-specific layers...\n last_layer: Alias for the activations of the last hidden layer.\n logits: Logits associated with component actions.\n \"\"\"\n\n def __init__(self, component):\n \"\"\"Initializes the LSTM base class.\n\n Parameters used:\n hidden_layer_sizes: Comma-delimited number of hidden units for each layer.\n input_dropout_rate (-1.0): Input dropout rate for each layer. If < 0.0,\n use the global |dropout_rate| hyperparameter.\n recurrent_dropout_rate (0.8): Recurrent dropout rate. If < 0.0, use the\n global |recurrent_dropout_rate| hyperparameter.\n layer_norm (True): Whether or not to use layer norm.\n\n Hyperparameters used:\n dropout_rate: Input dropout rate.\n recurrent_dropout_rate: Recurrent dropout rate.\n\n Args:\n component: parent ComponentBuilderBase object.\n \"\"\"\n self._attrs = dragnn.get_attrs_with_defaults(\n component.spec.network_unit.parameters,\n defaults={\n 'layer_norm': True,\n 'input_dropout_rate': -1.0,\n 'recurrent_dropout_rate': 0.8,\n 'hidden_layer_sizes': '256',\n })\n\n self._hidden_layer_sizes = map(int,\n self._attrs['hidden_layer_sizes'].split(','))\n\n self._input_dropout_rate = self._attrs['input_dropout_rate']\n if self._input_dropout_rate < 0.0:\n self._input_dropout_rate = component.master.hyperparams.dropout_rate\n\n self._recurrent_dropout_rate = self._attrs['recurrent_dropout_rate']\n if self._recurrent_dropout_rate < 0.0:\n self._recurrent_dropout_rate = (\n component.master.hyperparams.recurrent_dropout_rate)\n if self._recurrent_dropout_rate < 0.0:\n self._recurrent_dropout_rate = component.master.hyperparams.dropout_rate\n\n tf.logging.info('[%s] input_dropout_rate=%s recurrent_dropout_rate=%s',\n component.name, self._input_dropout_rate,\n self._recurrent_dropout_rate)\n\n layers, context_layers = self.create_hidden_layers(component,\n self._hidden_layer_sizes)\n last_layer_dim = layers[-1].dim\n layers.append(\n dragnn.Layer(component, name='last_layer', dim=last_layer_dim))\n layers.append(\n dragnn.Layer(component, name='logits', dim=component.num_actions))\n\n # Provide initial layers and context layers, so the base class constructor\n # can safely use accessors like get_layer_size().\n super(BaseLSTMNetwork, self).__init__(\n component, init_layers=layers, init_context_layers=context_layers)\n\n # Allocate parameters for the softmax.\n self._params.append(\n tf.get_variable(\n 'weights_softmax', [last_layer_dim, component.num_actions],\n initializer=tf.random_normal_initializer(\n stddev=1e-4, seed=self._seed)))\n self._params.append(\n tf.get_variable(\n 'bias_softmax', [component.num_actions],\n initializer=tf.zeros_initializer()))\n\n def get_logits(self, network_tensors):\n \"\"\"Returns the logits for prediction.\"\"\"\n return network_tensors[self.get_layer_index('logits')]\n\n @abc.abstractmethod\n def create_hidden_layers(self, component, hidden_layer_sizes):\n \"\"\"Creates hidden network layers.\n\n Args:\n component: Parent ComponentBuilderBase object.\n hidden_layer_sizes: List of requested hidden layer activation sizes.\n\n Returns:\n layers: List of layers created by this network.\n context_layers: List of context layers created by this network.\n \"\"\"\n pass\n\n def _append_base_layers(self, hidden_layers):\n \"\"\"Appends layers defined by the base class to the |hidden_layers|.\"\"\"\n last_layer = hidden_layers[-1]\n\n # TODO(googleuser): Uncomment the version that uses component.get_variable()\n # and delete the uses of tf.get_variable().\n # logits = tf.nn.xw_plus_b(last_layer,\n # self._component.get_variable('weights_softmax'),\n # self._component.get_variable('bias_softmax'))\n logits = tf.nn.xw_plus_b(last_layer,\n tf.get_variable('weights_softmax'),\n tf.get_variable('bias_softmax'))\n return hidden_layers + [last_layer, logits]\n\n def _create_cell(self, num_units, during_training):\n \"\"\"Creates a single LSTM cell, possibly with dropout.\n\n Requires that BaseLSTMNetwork.__init__() was called.\n\n Args:\n num_units: Number of hidden units in the cell.\n during_training: Whether to create a cell for training (vs inference).\n\n Returns:\n A RNNCell of the requested size, possibly with dropout.\n \"\"\"\n # No dropout in inference mode.\n if not during_training:\n return tf.contrib.rnn.LayerNormBasicLSTMCell(\n num_units, layer_norm=self._attrs['layer_norm'], reuse=True)\n\n # Otherwise, apply dropout to inputs and recurrences.\n cell = tf.contrib.rnn.LayerNormBasicLSTMCell(\n num_units,\n dropout_keep_prob=self._recurrent_dropout_rate,\n layer_norm=self._attrs['layer_norm'])\n cell = tf.contrib.rnn.DropoutWrapper(\n cell, input_keep_prob=self._input_dropout_rate)\n return cell\n\n def _create_train_cells(self):\n \"\"\"Creates a list of LSTM cells for training.\"\"\"\n return [\n self._create_cell(num_units, during_training=True)\n for num_units in self._hidden_layer_sizes\n ]\n\n def _create_inference_cells(self):\n \"\"\"Creates a list of LSTM cells for inference.\"\"\"\n return [\n self._create_cell(num_units, during_training=False)\n for num_units in self._hidden_layer_sizes\n ]\n\n def _capture_variables_as_params(self, function):\n \"\"\"Captures variables created by a function in |self._params|.\n\n Args:\n function: Function whose variables should be captured. The function\n should take one argument, its enclosing variable scope.\n \"\"\"\n created_vars = {}\n\n def _custom_getter(getter, *args, **kwargs):\n \"\"\"Calls the real getter and captures its result in |created_vars|.\"\"\"\n real_variable = getter(*args, **kwargs)\n created_vars[real_variable.name] = real_variable\n return real_variable\n\n with tf.variable_scope(\n 'cell', reuse=None, custom_getter=_custom_getter) as scope:\n function(scope)\n self._params.extend(created_vars.values())\n\n def _apply_with_captured_variables(self, function):\n \"\"\"Applies a function using previously-captured variables.\n\n Args:\n function: Function to apply using captured variables. The function\n should take one argument, its enclosing variable scope.\n\n Returns:\n Results of function application.\n \"\"\"\n\n def _custom_getter(getter, *args, **kwargs):\n \"\"\"Retrieves the normal or moving-average variables.\"\"\"\n return self._component.get_variable(var_params=getter(*args, **kwargs))\n\n with tf.variable_scope(\n 'cell', reuse=True, custom_getter=_custom_getter) as scope:\n return function(scope)\n\n\nclass LayerNormBasicLSTMNetwork(BaseLSTMNetwork):\n \"\"\"Wrapper around tf.contrib.rnn.LayerNormBasicLSTMCell.\n\n Features:\n All inputs are concatenated.\n\n Subclass-specific layers:\n state_c_<n>: Cell states for the <n>'th LSTM layer (0-origin).\n state_h_<n>: Hidden states for the <n>'th LSTM layer (0-origin).\n \"\"\"\n\n def __init__(self, component):\n \"\"\"Sets up context and output layers, as well as a final softmax.\"\"\"\n super(LayerNormBasicLSTMNetwork, self).__init__(component)\n\n # Wrap lists of training and inference sub-cells into multi-layer RNN cells.\n # Note that a |MultiRNNCell| state is a tuple of per-layer sub-states.\n self._train_cell = tf.contrib.rnn.MultiRNNCell(self._create_train_cells())\n self._inference_cell = tf.contrib.rnn.MultiRNNCell(\n self._create_inference_cells())\n\n def _cell_closure(scope):\n \"\"\"Applies the LSTM cell to placeholder inputs and state.\"\"\"\n placeholder_inputs = tf.placeholder(\n dtype=tf.float32, shape=(1, self._concatenated_input_dim))\n\n placeholder_substates = []\n for num_units in self._hidden_layer_sizes:\n placeholder_substate = tf.contrib.rnn.LSTMStateTuple(\n tf.placeholder(dtype=tf.float32, shape=(1, num_units)),\n tf.placeholder(dtype=tf.float32, shape=(1, num_units)))\n placeholder_substates.append(placeholder_substate)\n placeholder_state = tuple(placeholder_substates)\n\n self._train_cell(\n inputs=placeholder_inputs, state=placeholder_state, scope=scope)\n\n self._capture_variables_as_params(_cell_closure)\n\n def create_hidden_layers(self, component, hidden_layer_sizes):\n \"\"\"See base class.\"\"\"\n # Construct the layer meta info for the DRAGNN builder. Note that the order\n # of h and c are reversed compared to the vanilla DRAGNN LSTM cell, as\n # this is the standard in tf.contrib.rnn.\n #\n # NB: The h activations of the last LSTM must be the last layer, in order\n # for _append_base_layers() to work.\n layers = []\n for index, num_units in enumerate(hidden_layer_sizes):\n layers.append(\n dragnn.Layer(component, name='state_c_%d' % index, dim=num_units))\n layers.append(\n dragnn.Layer(component, name='state_h_%d' % index, dim=num_units))\n context_layers = list(layers) # copy |layers|, don't alias it\n return layers, context_layers\n\n def create(self,\n fixed_embeddings,\n linked_embeddings,\n context_tensor_arrays,\n attention_tensor,\n during_training,\n stride=None):\n \"\"\"See base class.\"\"\"\n # NB: This cell pulls the lstm's h and c vectors from context_tensor_arrays\n # instead of through linked features.\n check.Eq(\n len(context_tensor_arrays), 2 * len(self._hidden_layer_sizes),\n 'require two context tensors per hidden layer')\n\n # Rearrange the context tensors into a tuple of LSTM sub-states.\n length = context_tensor_arrays[0].size()\n substates = []\n for index, num_units in enumerate(self._hidden_layer_sizes):\n state_c = context_tensor_arrays[2 * index].read(length - 1)\n state_h = context_tensor_arrays[2 * index + 1].read(length - 1)\n\n # Fix shapes that for some reason are not set properly for an unknown\n # reason. TODO(googleuser): Why are the shapes not set?\n state_c.set_shape([tf.Dimension(None), num_units])\n state_h.set_shape([tf.Dimension(None), num_units])\n substates.append(tf.contrib.rnn.LSTMStateTuple(state_c, state_h))\n state = tuple(substates)\n\n input_tensor = dragnn.get_input_tensor(fixed_embeddings, linked_embeddings)\n cell = self._train_cell if during_training else self._inference_cell\n\n def _cell_closure(scope):\n \"\"\"Applies the LSTM cell to the current inputs and state.\"\"\"\n return cell(input_tensor, state, scope)\n\n unused_h, state = self._apply_with_captured_variables(_cell_closure)\n\n # Return tensors to be put into the tensor arrays / used to compute\n # objective.\n output_tensors = []\n for new_substate in state:\n new_c, new_h = new_substate\n output_tensors.append(new_c)\n output_tensors.append(new_h)\n return self._append_base_layers(output_tensors)\n\n\nclass BulkBiLSTMNetwork(BaseLSTMNetwork):\n \"\"\"Bulk wrapper around tf.contrib.rnn.stack_bidirectional_dynamic_rnn().\n\n Features:\n lengths: [stride, 1] sequence lengths per batch item.\n All other features are concatenated into input activations.\n\n Subclass-specific layers:\n outputs: [stride * num_steps, self._output_dim] bi-LSTM activations.\n \"\"\"\n\n def __init__(self, component):\n super(BulkBiLSTMNetwork, self).__init__(component)\n\n check.In('lengths', self._linked_feature_dims,\n 'Missing required linked feature')\n check.Eq(self._linked_feature_dims['lengths'], 1,\n 'Wrong dimension for \"lengths\" feature')\n self._input_dim = self._concatenated_input_dim - 1 # exclude 'lengths'\n self._output_dim = self.get_layer_size('outputs')\n tf.logging.info('[%s] Bulk bi-LSTM with input_dim=%d output_dim=%d',\n component.name, self._input_dim, self._output_dim)\n\n # Create one training and inference cell per layer and direction.\n self._train_cells_forward = self._create_train_cells()\n self._train_cells_backward = self._create_train_cells()\n self._inference_cells_forward = self._create_inference_cells()\n self._inference_cells_backward = self._create_inference_cells()\n\n def _bilstm_closure(scope):\n \"\"\"Applies the bi-LSTM to placeholder inputs and lengths.\"\"\"\n # Use singleton |stride| and |steps| because their values don't affect the\n # weight variables.\n stride, steps = 1, 1\n placeholder_inputs = tf.placeholder(\n dtype=tf.float32, shape=[stride, steps, self._input_dim])\n placeholder_lengths = tf.placeholder(dtype=tf.int64, shape=[stride])\n\n # Omit the initial states and sequence lengths for simplicity; they don't\n # affect the weight variables.\n tf.contrib.rnn.stack_bidirectional_dynamic_rnn(\n self._train_cells_forward,\n self._train_cells_backward,\n placeholder_inputs,\n dtype=tf.float32,\n sequence_length=placeholder_lengths,\n scope=scope)\n\n self._capture_variables_as_params(_bilstm_closure)\n\n # Allocate parameters for the initial states. Note that an LSTM state is a\n # tuple of two substates (c, h), so there are 4 variables per layer.\n for index, num_units in enumerate(self._hidden_layer_sizes):\n for direction in ['forward', 'backward']:\n for substate in ['c', 'h']:\n self._params.append(\n tf.get_variable(\n 'initial_state_%s_%s_%d' % (direction, substate, index),\n [1, num_units], # leading 1 for later batch-wise tiling\n dtype=tf.float32,\n initializer=tf.constant_initializer(0.0)))\n\n def create_hidden_layers(self, component, hidden_layer_sizes):\n \"\"\"See base class.\"\"\"\n dim = 2 * hidden_layer_sizes[-1]\n return [dragnn.Layer(component, name='outputs', dim=dim)], []\n\n def create(self,\n fixed_embeddings,\n linked_embeddings,\n context_tensor_arrays,\n attention_tensor,\n during_training,\n stride=None):\n \"\"\"Requires |stride|; otherwise see base class.\"\"\"\n check.NotNone(stride,\n 'BulkBiLSTMNetwork requires \"stride\" and must be called '\n 'in the bulk feature extractor component.')\n\n # Flatten the lengths into a vector.\n lengths = dragnn.lookup_named_tensor('lengths', linked_embeddings)\n lengths_s = tf.squeeze(lengths.tensor, [1])\n\n # Collect all other inputs into a batched tensor.\n linked_embeddings = [\n named_tensor for named_tensor in linked_embeddings\n if named_tensor.name != 'lengths'\n ]\n inputs_sxnxd = dragnn.get_input_tensor_with_stride(\n fixed_embeddings, linked_embeddings, stride)\n\n # Since get_input_tensor_with_stride() concatenates the input embeddings, it\n # obscures the static activation dimension, which the RNN library requires.\n # Restore it using set_shape(). Note that set_shape() merges into the known\n # shape, so only specify the activation dimension.\n inputs_sxnxd.set_shape(\n [tf.Dimension(None), tf.Dimension(None), self._input_dim])\n\n initial_states_forward, initial_states_backward = (\n self._create_initial_states(stride))\n\n if during_training:\n cells_forward = self._train_cells_forward\n cells_backward = self._train_cells_backward\n else:\n cells_forward = self._inference_cells_forward\n cells_backward = self._inference_cells_backward\n\n def _bilstm_closure(scope):\n \"\"\"Applies the bi-LSTM to the current inputs.\"\"\"\n outputs_sxnxd, _, _ = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(\n cells_forward,\n cells_backward,\n inputs_sxnxd,\n initial_states_fw=initial_states_forward,\n initial_states_bw=initial_states_backward,\n sequence_length=lengths_s,\n scope=scope)\n return outputs_sxnxd\n\n # Layer outputs are not batched; flatten out the batch dimension.\n outputs_sxnxd = self._apply_with_captured_variables(_bilstm_closure)\n outputs_snxd = tf.reshape(outputs_sxnxd, [-1, self._output_dim])\n return self._append_base_layers([outputs_snxd])\n\n def _create_initial_states(self, stride):\n \"\"\"Returns stacked and batched initial states for the bi-LSTM.\"\"\"\n initial_states_forward = []\n initial_states_backward = []\n for index in range(len(self._hidden_layer_sizes)):\n # Retrieve the initial states for this layer.\n states_sxd = []\n for direction in ['forward', 'backward']:\n for substate in ['c', 'h']:\n state_1xd = self._component.get_variable('initial_state_%s_%s_%d' %\n (direction, substate, index))\n state_sxd = tf.tile(state_1xd, [stride, 1]) # tile across the batch\n states_sxd.append(state_sxd)\n\n # Assemble and append forward and backward LSTM states.\n initial_states_forward.append(\n tf.contrib.rnn.LSTMStateTuple(states_sxd[0], states_sxd[1]))\n initial_states_backward.append(\n tf.contrib.rnn.LSTMStateTuple(states_sxd[2], states_sxd[3]))\n return initial_states_forward, initial_states_backward\n" ]
[ [ "tensorflow.contrib.rnn.stack_bidirectional_dynamic_rnn", "tensorflow.placeholder", "tensorflow.constant_initializer", "tensorflow.reshape", "tensorflow.logging.info", "tensorflow.zeros_initializer", "tensorflow.contrib.rnn.DropoutWrapper", "tensorflow.variable_scope", "tensorflow.contrib.rnn.LayerNormBasicLSTMCell", "tensorflow.squeeze", "tensorflow.Dimension", "tensorflow.random_normal_initializer", "tensorflow.tile", "tensorflow.get_variable", "tensorflow.contrib.rnn.LSTMStateTuple" ] ]
bobolee1239/levoice
[ "56f20afe15f9e171b8971e14551180180cb86cfb" ]
[ "script/model/LeVoice.py" ]
[ "# ----------------------------------\n# File: LeVoice.py\n# ----------------------------------\nimport sys \nif '..' not in sys.path:\n sys.path.append('..')\n\nimport torch\nimport torch.nn.functional as F\nimport config\n\nfrom torch import nn\n\n# -----------------------------------------\n\ndef pad_freq(x, padding):\n '''\n Args:\n --------\n - x: (N, ch, nFrm, nFreq)\n - padding: (freq_low, freq_up)\n '''\n return F.pad(x, padding, \"constant\", 0) \n\ndef pad_time(x, padding):\n '''\n Args:\n --------\n - x: (N, ch, nFrm, nFreq)\n - padding: (time_left, time_right)\n '''\n return F.pad(x, (0, 0, *padding), \"constant\", 0) \n\n\nclass LeVoice(nn.Module):\n def __init__(self, nfreq):\n super(LeVoice, self).__init__()\n\n self.nfreq = nfreq\n \n nclass = config.N_CLASS\n nhid = 128\n\n self.relu = nn.ReLU()\n\n self.conv1 = nn.Conv2d(1, 4, (3, 3))\n self.bn1 = nn.BatchNorm2d(4)\n\n self.conv2_dep = nn.Conv2d(4, 8, (3, 3), groups=4)\n self.conv2_pt = nn.Conv2d(8, 8, (1, 1))\n self.bn2 = nn.BatchNorm2d(8)\n\n self.conv3_dep = nn.Conv2d( 8, 16, (3, 1), groups=8)\n self.conv3_pt = nn.Conv2d(16, 16, (1, 1))\n self.bn3 = nn.BatchNorm2d(16)\n\n self.conv4_dep = nn.Conv2d(16, 32, (3, 1), groups=16)\n self.conv4_pt = nn.Conv2d(32, 32, (1, 1))\n self.bn4 = nn.BatchNorm2d(32)\n\n self.pre_gru_pt = nn.Conv2d(32, 8, (1, 1))\n self.tf = nn.Linear(40*8, nhid)\n\n self.gru1 = nn.GRU(nhid, nhid)\n self.gru2 = nn.GRU(nhid, nhid)\n\n self.output = nn.Sequential(\n nn.Linear(nhid, nclass)\n )\n\n def forward(self, spectra):\n '''\n Args:\n - feat: <tensor> (N, nFrm, nFreq)\n '''\n # (N, 1, nFrm, nFreq)\n spectra = spectra.unsqueeze(1)\n spectra = pad_time(spectra, (2, 6))\n spectra = pad_freq(spectra, (2, 2))\n\n spec_hid1 = self.conv1(spectra)\n spec_hid1 = self.bn1(spec_hid1)\n spec_hid1 = self.relu(spec_hid1)\n\n spec_hid2 = self.conv2_dep(spec_hid1)\n spec_hid2 = self.conv2_pt(spec_hid2)\n spec_hid2 = self.bn2(spec_hid2)\n spec_hid2 = self.relu(spec_hid2)\n\n spec_hid3 = self.conv3_dep(spec_hid2)\n spec_hid3 = self.conv3_pt(spec_hid3)\n spec_hid3 = self.bn3(spec_hid3)\n spec_hid3 = self.relu(spec_hid3)\n\n spec_hid4 = self.conv4_dep(spec_hid3)\n spec_hid4 = self.conv4_pt(spec_hid4)\n spec_hid4 = self.bn4(spec_hid4)\n spec_hid4 = self.relu(spec_hid4)\n\n # (N, 8, nFrm, nFreq)\n spec_hid5 = self.pre_gru_pt(spec_hid4)\n N, nCh, nFrm, nFreq = spec_hid5.shape \n # (nFrm, N, nFreq)\n feat = spec_hid5.permute(2, 0, 1, 3)\n feat = feat.reshape((nFrm, N, nCh*nFreq))\n hid1 = self.tf(feat)\n\n hid2, hn2 = self.gru1(hid1)\n hid3, hn3 = self.gru2(hid2)\n\n hid4 = 0.5 * (hid2 + hid3)\n pred = self.output(hid4)\n pred = pred.permute(1, 0, 2)\n\n return pred\n\n\nif __name__ == '__main__':\n import pdb\n\n nfrm = 100\n nfreq = 40\n batch = 8\n\n model = LeVoice(nfreq)\n\n x = torch.rand(batch, nfrm, nfreq)\n pred = model(x)\n\n pdb.set_trace()\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.Linear", "torch.nn.functional.pad", "torch.rand", "torch.nn.GRU", "torch.nn.Conv2d", "torch.nn.ReLU" ] ]
ajey091/neml
[ "23dd2cdb83057fdd17a37fa19f4592c54f821dbf" ]
[ "test/test_tensors.py" ]
[ "#!/usr/bin/env python\n\nfrom neml.math import tensors\n\nimport common\n\nimport unittest\nimport numpy as np\nimport numpy.linalg as la\n\nclass TestVector(unittest.TestCase):\n def setUp(self):\n self.a = np.array([2.2,-1.2,2.5])\n self.b = np.array([0.0,5.8,1.1])\n\n self.va = tensors.Vector(self.a)\n self.vb = tensors.Vector(self.b)\n\n self.s = 2.1\n \n def test_assign(self):\n self.vb = self.va\n self.assertTrue(np.allclose(self.vb.data, self.a))\n\n def test_norm(self):\n self.assertAlmostEqual(self.va.norm(), la.norm(self.a))\n\n def test_dot(self):\n self.assertAlmostEqual(np.dot(self.a,self.b), self.va.dot(self.vb))\n\n def test_smultiply(self):\n self.assertTrue(np.allclose(\n (self.s * self.va).data,\n self.s * self.a))\n self.assertTrue(np.allclose(\n (self.va * self.s).data,\n self.s * self.a))\n\n def test_sdivide(self):\n self.assertTrue(np.allclose(\n (self.va / self.s).data,\n self.a / self.s))\n\n def test_add(self):\n self.assertTrue(np.allclose(\n (self.va + self.vb).data,\n self.a + self.b))\n self.assertTrue(np.allclose(\n (self.vb + self.va).data,\n self.a + self.b))\n\n def test_subtract(self):\n self.assertTrue(np.allclose(\n (self.va - self.vb).data,\n self.a - self.b))\n self.assertTrue(np.allclose(\n (self.vb - self.va).data,\n self.b - self.a))\n\n def test_negate(self):\n self.assertTrue(np.allclose(\n (-self.va).data,\n -self.a))\n self.assertTrue(np.allclose(\n self.va.opposite().data,\n -self.a))\n\n def test_normalize(self):\n self.va.normalize()\n self.assertTrue(np.allclose(\n self.va.data, self.a / la.norm(self.a)))\n\n def test_cross(self):\n self.assertTrue(np.allclose(\n self.va.cross(self.vb).data,\n np.cross(self.a, self.b)))\n\n def test_equality(self):\n self.assertTrue(self.va == self.va)\n self.assertFalse(self.va == self.vb)\n\n def test_inequality(self):\n self.assertFalse(self.va != self.va)\n self.assertTrue(self.va != self.vb)\n\n def test_get(self):\n for i in range(3):\n self.assertTrue(np.isclose(self.va[i], self.a[i]))\n\n def test_set(self):\n for i in range(3):\n self.va[i] = 2.0\n self.a[i] = 2.0\n self.assertTrue(np.isclose(self.va[i], self.a[i]))\n\n def test_outer(self):\n self.assertEqual(self.va.outer(self.vb), tensors.outer(self.va, self.vb))\n self.assertEqual(tensors.RankTwo(np.outer(self.a, self.b)), \n tensors.outer(self.va, self.vb))\n\nclass TestRankTwo(unittest.TestCase):\n def setUp(self):\n self.A = np.array([[4.1,2.8,-1.2],[3.1,7.1,0.2],[4,2,3]])\n self.TA = tensors.RankTwo(self.A)\n self.B = np.array([[10.2,-9.3,2.5],[0.1,3.1,2.8],[0.1,3.2,-6.1]])\n self.TB = tensors.RankTwo(self.B)\n\n self.a = np.array([2.2,-1.2,2.5])\n self.va = tensors.Vector(self.a)\n\n self.s = 2.1\n\n def test_norm(self):\n self.assertTrue(np.isclose(self.TA.norm(), np.sqrt(np.sum(self.A*self.A))))\n\n def test_equality(self):\n self.assertEqual(self.TA, self.TA)\n\n def test_inequality(self):\n self.assertNotEqual(self.TA, self.TB)\n\n def test_get(self):\n self.assertTrue(np.isclose(self.TA[0,0], self.A[0,0]))\n\n def test_set(self):\n self.A[0,0] = 1.5\n self.assertTrue(np.isclose(self.A[0,0], 1.5))\n\n def test_scalar_mult(self):\n self.assertEqual(tensors.RankTwo(self.s*self.A), self.s * self.TA)\n self.assertEqual(tensors.RankTwo(self.A / self.s), self.TA / self.s)\n\n def test_add(self):\n self.assertEqual(tensors.RankTwo(self.A + self.B), self.TA + self.TB)\n self.assertEqual(tensors.RankTwo(self.A - self.B), self.TA - self.TB)\n\n def test_matrix_vector(self):\n self.assertEqual(tensors.Vector(np.dot(self.A, self.a)), self.TA*self.va)\n self.assertEqual(tensors.Vector(np.dot(self.a, self.A)), self.va*self.TA)\n\n def test_matrix_matrix(self):\n self.assertEqual(tensors.RankTwo(np.dot(self.A, self.B)), self.TA*self.TB)\n\n def test_inverse(self):\n self.assertEqual(tensors.RankTwo(la.inv(self.A)), self.TA.inverse())\n\n def test_transpose(self):\n self.assertEqual(tensors.RankTwo(self.A.T), self.TA.transpose())\n\nclass TestSymmetric(unittest.TestCase):\n def setUp(self):\n self.A = np.array([[4.1,2.8,-1.2],[3.1,7.1,0.2],[4,2,3]])\n self.A = 0.5*(self.A + self.A.T)\n self.TA = tensors.Symmetric(self.A)\n self.B = np.array([[10.2,-9.3,2.5],[0.1,3.1,2.8],[0.1,3.2,-6.1]])\n self.B = 0.5*(self.B + self.B.T)\n self.TB = tensors.Symmetric(self.B)\n\n self.a = np.array([2.2,-1.2,2.5])\n self.va = tensors.Vector(self.a)\n\n self.s = 2.1\n\n def test_norm(self):\n self.assertTrue(np.isclose(self.TA.norm(), np.sqrt(np.sum(self.A*self.A))))\n\n def test_equality(self):\n self.assertEqual(self.TA, self.TA)\n\n def test_inequality(self):\n self.assertNotEqual(self.TA, self.TB)\n\n def test_scalar_mult(self):\n self.assertEqual(tensors.Symmetric(self.s*self.A), self.s * self.TA)\n self.assertEqual(tensors.Symmetric(self.A / self.s), self.TA / self.s)\n\n def test_add(self):\n self.assertEqual(tensors.Symmetric(self.A + self.B), self.TA + self.TB)\n self.assertEqual(tensors.Symmetric(self.A - self.B), self.TA - self.TB)\n\n def test_matrix_vector(self):\n self.assertEqual(tensors.Vector(np.dot(self.A, self.a)), self.TA*self.va)\n self.assertEqual(tensors.Vector(np.dot(self.a, self.A)), self.va*self.TA)\n\n def test_matrix_matrix(self):\n self.assertEqual(tensors.Symmetric(np.dot(self.A, self.B)), self.TA*self.TB)\n\n def test_inverse(self):\n self.assertEqual(tensors.Symmetric(la.inv(self.A)), self.TA.inverse())\n\n def test_transpose(self):\n self.assertEqual(tensors.Symmetric(self.A.T), self.TA.transpose())\n\n def test_id(self):\n self.assertEqual(tensors.Symmetric.id(), tensors.Symmetric(np.eye(3)))\n\n def test_trace(self):\n self.assertTrue(np.isclose(self.TA.trace(), np.trace(self.A)))\n\n def test_dev(self):\n self.assertTrue(self.TA.dev(), tensors.Symmetric(\n self.A - np.trace(self.A)/3.0 * np.eye(3)))\n\nclass TestSkew(unittest.TestCase):\n def setUp(self):\n self.A = np.array([[4.1,2.8,-1.2],[3.1,7.1,0.2],[4,2,3]])\n self.A = 0.5*(self.A - self.A.T)\n self.TA = tensors.Skew(self.A)\n self.B = np.array([[10.2,-9.3,2.5],[0.1,3.1,2.8],[0.1,3.2,-6.1]])\n self.B = 0.5*(self.B - self.B.T)\n self.TB = tensors.Skew(self.B)\n\n self.a = np.array([2.2,-1.2,2.5])\n self.va = tensors.Vector(self.a)\n\n self.s = 2.1\n\n def test_equality(self):\n self.assertEqual(self.TA, self.TA)\n\n def test_inequality(self):\n self.assertNotEqual(self.TA, self.TB)\n\n def test_scalar_mult(self):\n self.assertEqual(tensors.Skew(self.s*self.A), self.s * self.TA)\n self.assertEqual(tensors.Skew(self.A / self.s), self.TA / self.s)\n\n def test_add(self):\n self.assertEqual(tensors.Skew(self.A + self.B), self.TA + self.TB)\n self.assertEqual(tensors.Skew(self.A - self.B), self.TA - self.TB)\n\n def test_matrix_vector(self):\n self.assertEqual(tensors.Vector(np.dot(self.A, self.a)), self.TA*self.va)\n self.assertEqual(tensors.Vector(np.dot(self.a, self.A)), self.va*self.TA)\n\n def test_matrix_matrix(self):\n self.assertEqual(tensors.Skew(np.dot(self.A, self.B)), self.TA*self.TB)\n\n def test_transpose(self):\n self.assertEqual(tensors.Skew(self.A.T), self.TA.transpose())\n\n# Test various multiplicative combinations of tensors\nclass TestComboTensorMultiply(unittest.TestCase):\n def setUp(self):\n self.S = np.array([[4.1,2.8,-1.2],[3.1,7.1,0.2],[4,2,3]])\n self.S = 0.5*(self.S + self.S.T)\n self.TS = tensors.Symmetric(self.S)\n self.G = np.array([[10.2,-9.3,2.5],[0.1,3.1,2.8],[0.1,3.2,-6.1]])\n self.TG = tensors.RankTwo(self.G)\n self.W = np.array([[-5.0,7.1,1.0],[-0.2,0.25,1.2],[-0.4,0.4,-2]])\n self.W = 0.5*(self.W - self.W.T)\n self.TW = tensors.Skew(self.W)\n\n def test_sym_general(self):\n self.assertEqual(tensors.RankTwo(np.dot(self.S, self.G)), self.TS * self.TG)\n\n def test_general_sym(self):\n self.assertEqual(tensors.RankTwo(np.dot(self.G, self.S)), self.TG * self.TS)\n\n def test_skew_general(self):\n self.assertEqual(tensors.RankTwo(np.dot(self.W, self.G)), self.TW * self.TG)\n\n def test_general_skew(self):\n self.assertEqual(tensors.RankTwo(np.dot(self.G, self.W)), self.TG * self.TW)\n\n def test_skew_sym(self):\n self.assertEqual(tensors.RankTwo(np.dot(self.W, self.S)), self.TW * self.TS)\n \n def test_sym_skew(self):\n self.assertEqual(tensors.RankTwo(np.dot(self.S, self.W)), self.TS * self.TW)\n\n def test_contract_general_sym(self):\n self.assertTrue(np.isclose(\n np.einsum('ij,ij', self.G, self.S),\n self.TG.contract(self.TS)))\n\n def test_contract_sym_general(self):\n self.assertTrue(np.isclose(\n np.einsum('ij,ij', self.G, self.S),\n self.TS.contract(self.TG)))\n\n def test_contract_general_skew(self):\n self.assertTrue(np.isclose(\n np.einsum('ij,ij', self.G, self.W),\n self.TG.contract(self.TW)))\n\n def test_contract_skew_general(self):\n self.assertTrue(np.isclose(\n np.einsum('ij,ij', self.G, self.W),\n self.TW.contract(self.TG)))\n\n def test_contract_skew_sym(self):\n self.assertTrue(np.isclose(\n np.einsum('ij,ij', self.W, self.S),\n self.TW.contract(self.TS)))\n\n def test_contract_sym_skew(self):\n self.assertTrue(np.isclose(\n np.einsum('ij,ij', self.W, self.S),\n self.TS.contract(self.TW)))\n\n def test_contract_general_general(self):\n self.assertTrue(np.isclose(\n np.einsum('ij,ij', self.G, self.G),\n self.TG.contract(self.TG)))\n\n def test_contract_sym_sym(self):\n self.assertTrue(np.isclose(\n np.einsum('ij,ij', self.S, self.S),\n self.TS.contract(self.TS)))\n\n def test_contract_skew_skew(self):\n self.assertTrue(np.isclose(\n np.einsum('ij,ij', self.W, self.W),\n self.TW.contract(self.TW)))\n \nclass TestComboTensorAdd(unittest.TestCase):\n def setUp(self):\n self.S = np.array([[4.1,2.8,-1.2],[3.1,7.1,0.2],[4,2,3]])\n self.S = 0.5*(self.S + self.S.T)\n self.TS = tensors.Symmetric(self.S)\n self.G = np.array([[10.2,-9.3,2.5],[0.1,3.1,2.8],[0.1,3.2,-6.1]])\n self.TG = tensors.RankTwo(self.G)\n self.W = np.array([[-5.0,7.1,1.0],[-0.2,0.25,1.2],[-0.4,0.4,-2]])\n self.W = 0.5*(self.W - self.W.T)\n self.TW = tensors.Skew(self.W)\n\n def test_add_sym_general(self):\n self.assertEqual(tensors.RankTwo(self.S + self.G), self.TS + self.TG)\n\n def test_add_general_sym(self):\n self.assertEqual(tensors.RankTwo(self.G + self.S), self.TG + self.TS)\n\n def test_add_skew_general(self):\n self.assertEqual(tensors.RankTwo(self.W + self.G), self.TW + self.TG)\n\n def test_add_general_skew(self):\n self.assertEqual(tensors.RankTwo(self.G + self.W), self.TG + self.TW)\n\n def test_sub_sym_general(self):\n self.assertEqual(tensors.RankTwo(self.S - self.G), self.TS - self.TG)\n\n def test_sub_general_sym(self):\n self.assertEqual(tensors.RankTwo(self.G - self.S), self.TG - self.TS)\n\n def test_sub_skew_general(self):\n self.assertEqual(tensors.RankTwo(self.W - self.G), self.TW - self.TG)\n\n def test_sub_general_skew(self):\n self.assertEqual(tensors.RankTwo(self.G - self.W), self.TG - self.TW)\n\nclass TestRankFour(unittest.TestCase):\n def setUp(self):\n self.R1 = np.array([[[[ 7.09627147, 9.22330744, -1.36602973],\n [-7.86118175, -1.6342633 , -5.75516189],\n [ 2.61734248, 6.40678382, 3.37981603]],\n [[ 5.65100254, -7.88797059, 7.31396665],\n [-6.35471595, 5.67698069, -8.18795178],\n [ 9.10447016, 8.91183436, -6.65254333]],\n [[ 3.20429862, 2.99308849, 4.0035241 ],\n [-4.02440197, -4.39975872, -4.33542791],\n [ 9.36746226, -2.91156335, 4.51572032]]],\n [[[-9.23675199, 8.63546962, 6.83448027],\n [ 4.35044123, 2.24508666, 9.80054664],\n [ 0.30835223, -4.05208575, 5.68966326]],\n [[ 6.40300092, -8.25998136, 5.63566553],\n [-5.02801101, 5.64005224, -7.39586166],\n [ 5.90893633, 6.02074669, 1.37112738]],\n [[-2.68485216, -4.67660156, 3.52618441],\n [-2.52484812, -0.08561168, 3.39072868],\n [ 9.11295675, 2.63102786, -4.82285415]]],\n [[[ 8.31973154, 4.76081593, 4.38377207],\n [ 6.22896742, -3.83995097, 5.37501029],\n [-0.16770967, 7.9453854 , -4.95548491]],\n [[-5.67884611, -8.44970885, -7.42037867],\n [-5.19908193, -7.87006493, 1.65949787],\n [-3.25934672, 6.27340198, 5.98643056]],\n [[-4.20166968, -2.38276224, 3.04551936],\n [ 3.68445989, -5.84357996, 3.61183543],\n [ 1.54886677, 3.3659842 , 6.43067337]]]])\n self.TR1 = tensors.RankFour(self.R1)\n\n self.R2 = np.array([[[[-8.03675620e+00, 2.58575052e+00, 2.44069661e+00],\n [ 4.75021663e+00, 1.24463394e+00, -8.69751301e-01],\n [-1.46310894e+00, -1.15053235e+00, -3.75342982e+00]],\n [[-7.64033956e+00, 4.19956720e+00, -4.87644982e+00],\n [ 1.06577507e+00, 8.94272637e+00, 6.57264250e-01],\n [-4.22613258e+00, -5.08830314e+00, 1.57718186e+00]],\n [[-4.02243082e+00, -4.75463781e+00, -8.88662152e+00],\n [-1.30383950e+00, -1.98063574e+00, -3.18963544e+00],\n [-7.52071674e+00, 1.08931933e+00, 2.86988431e+00]]],\n [[[ 5.28621060e+00, -6.83799668e+00, 8.98005935e+00],\n [-7.92741122e+00, 5.75699425e-01, 1.66782544e+00],\n [ 2.60041984e+00, -1.04476986e-02, -6.12424787e+00]],\n [[-3.73727368e+00, 6.59764771e+00, -1.18045587e+00],\n [ 4.08567441e+00, 2.66148943e+00, -6.82495588e-01],\n [-1.64417262e+00, 5.33119298e+00, 8.11045988e-03]],\n [[-5.90193883e+00, -2.63316107e+00, 5.61381825e+00],\n [-6.08591194e+00, 8.77285539e+00, -7.15230533e+00],\n [ 3.15093096e+00, 1.41350149e+00, 1.11702016e+00]]],\n [[[-9.61472764e-01, -1.91492497e+00, 9.48275324e+00],\n [ 6.68841134e+00, 3.23412041e+00, -3.41944541e+00],\n [-9.80203467e+00, 6.58425335e+00, -2.16548636e+00]],\n [[ 6.63950740e+00, 3.91551441e+00, -8.98229111e+00],\n [ 9.84606756e+00, -8.16145090e+00, 8.41929062e-01],\n [-1.93839620e+00, 7.44485127e+00, -2.70832414e+00]],\n [[ 9.79265531e+00, -1.18212395e+00, -5.39433704e+00],\n [ 4.87152614e+00, 9.47287450e+00, 5.53838514e+00],\n [ 9.30443367e+00, 1.27090319e+00, 1.60409739e+00]]]])\n self.TR2 = tensors.RankFour(self.R2)\n\n self.SS = np.array([\n [ 5.99159801, -2.24342348, 0.26667281, -0.95466199, 3.98931478, -0.10846981],\n [ 1.86468226, -4.32391908, -7.82738638, -7.45008989, 5.89874777, 0.45820648],\n [-5.92565398, 2.4862829 , -6.02112389, 6.75455965, 4.65183463, 9.96900579],\n [ 0.60378883, -3.72189328, -7.63388446, -5.76559403, -0.3119789 , -1.1527258 ],\n [ 4.56813135, -6.06783828, -6.18341368, 8.06169686, -9.56928844, 9.08114655],\n [-8.25516614, 6.30663846, 7.2084381 , -7.38280703, -5.96279902, 8.9935982 ]])\n self.SS_full = common.ms2ts(self.SS)\n self.TSS = tensors.SymSymR4(self.SS)\n\n self.SW = np.array([\n [ 5.43434005, -6.55983214, 0.29737664],\n [-4.77472172, -8.51287287, -3.19380185],\n [ 4.43407952, -6.02555614, 5.87786914],\n [ 1.89488869, -5.65383917, 8.83717547],\n [-7.18030867, 1.56100537, -9.83238641],\n [-4.52369317, -3.07284914, -7.54966999]])\n self.SW_full = common.ws2ts(self.SW)\n self.TSW = tensors.SymSkewR4(self.SW)\n\n self.WS = np.array([\n [-8.3567359 , -5.39728818, -8.00844442, -8.33365112, -0.97903364, -8.23943149],\n [-6.97125417, 4.34802055, 7.06281056, -1.57511617, 7.83359933, -9.37625432],\n [-6.0799489 , -6.0309543 , 3.68575895, 8.84296976, 6.55799427, -9.22029379]])\n self.WS_full = common.wws2ts(self.WS)\n self.TWS = tensors.SkewSymR4(self.WS)\n\n self.scalar = -2.2\n\n self.G = np.array([[ 9.50640677, 1.79084726, -2.8877036 ],\n [-1.63159958, 2.52866904, -8.71585042],\n [ 5.01859685, -8.7324075 , -0.42919134]])\n self.TG = tensors.RankTwo(self.G)\n\n self.S = np.array([[ 6.19999242, -6.95811611, -6.02901899],\n [ 8.38508084, 6.01607694, 6.79839425],\n [-4.4214246 , -2.36795313, -8.84070728]])\n self.S = 0.5*(self.S+self.S.T)\n self.TS = tensors.Symmetric(self.S)\n\n self.W = np.array([[-9.36416517, 2.95527444, 8.70983194],\n [-1.54693052, 8.7905658 , -5.10895168],\n [-8.52740468, -0.7741642 , 2.89544992]])\n self.W = 0.5 * (self.W - self.W.T)\n self.TW = tensors.Skew(self.W)\n\n def test_add(self):\n self.assertEqual(tensors.RankFour(self.R1 + self.R2), self.TR2 + self.TR1)\n self.assertEqual(tensors.RankFour(self.R1 - self.R2), self.TR1 - self.TR2)\n\n def test_equality(self):\n self.assertEqual(self.TR1, self.TR1)\n\n def test_inequality(self):\n self.assertNotEqual(self.TR1, self.TR2)\n\n def test_negate(self):\n self.assertEqual(tensors.RankFour(-self.R1), -self.TR1)\n\n def test_scalar_mult(self):\n self.assertEqual(tensors.RankFour(self.scalar * self.R1), self.scalar * self.TR1)\n self.assertEqual(tensors.RankFour(self.scalar * self.R2), self.TR2 * self.scalar)\n self.assertEqual(tensors.RankFour(self.R1 / self.scalar), self.TR1 / self.scalar)\n \n def test_double_contraction(self):\n self.assertEqual(tensors.RankFour(np.einsum('ijkl,klmn', self.R1, self.R2)), self.TR1 * self.TR2)\n\n def test_sym_sym(self):\n self.assertEqual(tensors.RankFour(np.einsum('ijkl,klmn', self.R1, self.SS_full)), self.TR1 * self.TSS)\n\n def test_sym_skew(self):\n self.assertEqual(tensors.RankFour(np.einsum('ijkl,klmn', self.R1, self.SW_full)), self.TR1 * self.TSW)\n\n def test_skew_sym(self):\n self.assertEqual(tensors.RankFour(np.einsum('ijkl,klmn', self.R1, self.WS_full)), self.TR1 * self.TWS)\n\n def test_ranktwo(self):\n self.assertEqual(tensors.RankTwo(np.einsum('ijkl,kl', self.R1, self.G)), self.TR1 * self.TG)\n\n def test_symmetric(self):\n self.assertEqual(tensors.RankTwo(np.einsum('ijkl,kl', self.R1, self.S)), self.TR1 * self.TS)\n\n def test_skew(self):\n self.assertEqual(tensors.RankTwo(np.einsum('ijkl,kl', self.R1, self.W)), self.TR1 * self.TW)\n\n def test_get(self):\n self.assertTrue(np.isclose(self.R1[1,2,0,1], self.TR1[1,2,0,1]))\n\n def test_set(self):\n self.TR1[1,1,1,1] = 4.0\n self.assertTrue(np.isclose(self.TR1[1,1,1,1],4.0))\n\nclass TestSymSymR4(unittest.TestCase):\n def setUp(self):\n self.SS1 = np.array([\n [ 5.99159801, -2.24342348, 0.26667281, -0.95466199, 3.98931478, -0.10846981],\n [ 1.86468226, -4.32391908, -7.82738638, -7.45008989, 5.89874777, 0.45820648],\n [-5.92565398, 2.4862829 , -6.02112389, 6.75455965, 4.65183463, 9.96900579],\n [ 0.60378883, -3.72189328, -7.63388446, -5.76559403, -0.3119789 , -1.1527258 ],\n [ 4.56813135, -6.06783828, -6.18341368, 8.06169686, -9.56928844, 9.08114655],\n [-8.25516614, 6.30663846, 7.2084381 , -7.38280703, -5.96279902, 8.9935982 ]])\n self.SS1_full = common.ms2ts(self.SS1)\n self.TSS1 = tensors.SymSymR4(self.SS1)\n\n self.SS2 = np.array([\n [-3.83767383, -8.63726504, -4.52095938, 9.35252323, 2.12800902, 3.26478511],\n [ 0.41705962, 3.95885105, -4.21676978, 4.12817198, 7.38839962, 5.79308578],\n [ 6.09635931, 2.31981366, -4.40237946, -5.51856189, 5.63572381, -5.55192385],\n [-0.97547288, -6.35708101, -4.35087656, -2.56567326, 4.32627031, 5.99408963],\n [ 6.30359707, 5.72926973, 2.47121354, -7.26333416, -5.08412215, -9.21872687],\n [-6.10780884, 1.01881487, -1.93491321, 6.13272186, -8.8721007, -2.97045116]])\n self.TSS2 = tensors.SymSymR4(self.SS2)\n\n self.SW = np.array([\n [ 5.43434005, -6.55983214, 0.29737664],\n [-4.77472172, -8.51287287, -3.19380185],\n [ 4.43407952, -6.02555614, 5.87786914],\n [ 1.89488869, -5.65383917, 8.83717547],\n [-7.18030867, 1.56100537, -9.83238641],\n [-4.52369317, -3.07284914, -7.54966999]])\n self.SW_full = common.ws2ts(self.SW)\n self.TSW = tensors.SymSkewR4(self.SW)\n\n self.WS = np.array([\n [-8.3567359 , -5.39728818, -8.00844442, -8.33365112, -0.97903364, -8.23943149],\n [-6.97125417, 4.34802055, 7.06281056, -1.57511617, 7.83359933, -9.37625432],\n [-6.0799489 , -6.0309543 , 3.68575895, 8.84296976, 6.55799427, -9.22029379]])\n self.WS_full = common.wws2ts(self.WS)\n self.TWS = tensors.SkewSymR4(self.WS)\n\n self.R = np.array([[[[-8.03675620e+00, 2.58575052e+00, 2.44069661e+00],\n [ 4.75021663e+00, 1.24463394e+00, -8.69751301e-01],\n [-1.46310894e+00, -1.15053235e+00, -3.75342982e+00]],\n [[-7.64033956e+00, 4.19956720e+00, -4.87644982e+00],\n [ 1.06577507e+00, 8.94272637e+00, 6.57264250e-01],\n [-4.22613258e+00, -5.08830314e+00, 1.57718186e+00]],\n [[-4.02243082e+00, -4.75463781e+00, -8.88662152e+00],\n [-1.30383950e+00, -1.98063574e+00, -3.18963544e+00],\n [-7.52071674e+00, 1.08931933e+00, 2.86988431e+00]]],\n [[[ 5.28621060e+00, -6.83799668e+00, 8.98005935e+00],\n [-7.92741122e+00, 5.75699425e-01, 1.66782544e+00],\n [ 2.60041984e+00, -1.04476986e-02, -6.12424787e+00]],\n [[-3.73727368e+00, 6.59764771e+00, -1.18045587e+00],\n [ 4.08567441e+00, 2.66148943e+00, -6.82495588e-01],\n [-1.64417262e+00, 5.33119298e+00, 8.11045988e-03]],\n [[-5.90193883e+00, -2.63316107e+00, 5.61381825e+00],\n [-6.08591194e+00, 8.77285539e+00, -7.15230533e+00],\n [ 3.15093096e+00, 1.41350149e+00, 1.11702016e+00]]],\n [[[-9.61472764e-01, -1.91492497e+00, 9.48275324e+00],\n [ 6.68841134e+00, 3.23412041e+00, -3.41944541e+00],\n [-9.80203467e+00, 6.58425335e+00, -2.16548636e+00]],\n [[ 6.63950740e+00, 3.91551441e+00, -8.98229111e+00],\n [ 9.84606756e+00, -8.16145090e+00, 8.41929062e-01],\n [-1.93839620e+00, 7.44485127e+00, -2.70832414e+00]],\n [[ 9.79265531e+00, -1.18212395e+00, -5.39433704e+00],\n [ 4.87152614e+00, 9.47287450e+00, 5.53838514e+00],\n [ 9.30443367e+00, 1.27090319e+00, 1.60409739e+00]]]])\n self.TR = tensors.RankFour(self.R)\n\n self.S = np.array([[4.1,2.8,-1.2],[3.1,7.1,0.2],[4,2,3]])\n self.S = 0.5*(self.S + self.S.T)\n self.TS = tensors.Symmetric(self.S)\n\n self.S2 = np.array([[10.2,-9.3,2.5],[0.1,3.1,2.8],[0.1,3.2,-6.1]])\n self.S2 = 0.5*(self.S2 + self.S2.T)\n self.TS2 = tensors.Symmetric(self.S2)\n\n self.scalar = 5.2\n\n self.G = np.array([[ 9.50640677, 1.79084726, -2.8877036 ],\n [-1.63159958, 2.52866904, -8.71585042],\n [ 5.01859685, -8.7324075 , -0.42919134]])\n self.TG = tensors.RankTwo(self.G)\n\n self.W = np.array([[-9.36416517, 2.95527444, 8.70983194],\n [-1.54693052, 8.7905658 , -5.10895168],\n [-8.52740468, -0.7741642 , 2.89544992]])\n self.W = 0.5 * (self.W - self.W.T)\n self.TW = tensors.Skew(self.W)\n\n def test_to_full(self):\n full_np = common.ms2ts(self.SS1)\n full_t = tensors.RankFour(full_np)\n full = self.TSS1.to_full()\n self.assertEqual(full_t, full)\n\n def test_from_full(self):\n full = self.TSS1.to_full()\n new = full.to_sym()\n self.assertEqual(self.TSS1, new)\n\n def test_add(self):\n self.assertEqual(tensors.SymSymR4(self.SS1 + self.SS2), self.TSS2 + self.TSS1)\n self.assertEqual(tensors.SymSymR4(self.SS1 - self.SS2), self.TSS1 - self.TSS2)\n\n def test_equality(self):\n self.assertEqual(self.TSS1, self.TSS1)\n\n def test_inequality(self):\n self.assertNotEqual(self.TSS1, self.TSS2)\n\n def test_negate(self):\n self.assertEqual(tensors.SymSymR4(-self.SS1), -self.TSS1)\n\n def test_scalar_mult(self):\n self.assertEqual(tensors.SymSymR4(self.scalar * self.SS1), self.scalar * self.TSS1)\n self.assertEqual(tensors.SymSymR4(self.scalar * self.SS2), self.TSS2 * self.scalar)\n self.assertEqual(tensors.SymSymR4(self.SS1 / self.scalar), self.TSS1 / self.scalar)\n\n def test_product_sym_sym(self):\n self.assertEqual(tensors.SymSymR4(np.dot(self.SS1, self.SS2)), self.TSS1 * self.TSS2)\n\n def test_product_sym_full(self):\n self.assertEqual(tensors.RankFour(np.einsum('ijkl,klmn', self.SS1_full, self.R)), self.TSS1 * self.TR)\n\n def test_product_sym_skew(self):\n self.assertEqual(tensors.RankFour(np.einsum('ijkl,klmn', self.SS1_full, self.SW_full)), self.TSS1 * self.TSW)\n\n def test_product_skew_sym(self):\n self.assertEqual(tensors.RankFour(np.einsum('ijkl,klmn', self.SS1_full, self.WS_full)), self.TSS1 * self.TWS)\n\n def test_product_symmetric(self):\n self.assertEqual(tensors.Symmetric(common.usym(np.dot(self.SS1, common.sym(self.S)))), self.TSS1 * self.TS)\n\n def test_product_general(self):\n self.assertEqual(tensors.RankTwo(np.einsum('ijkl,kl', self.SS1_full, self.G)), self.TSS1 * self.TG)\n\n def test_product_skew(self):\n self.assertEqual(tensors.RankTwo(np.einsum('ijkl,kl', self.SS1_full, self.W)), self.TSS1 * self.TW)\n\n def test_douter(self):\n self.assertEqual(tensors.SymSymR4(common.ts2ms(np.einsum('ij,kl', self.S, self.S2))), tensors.douter(self.TS, self.TS2))\n\n def test_id(self):\n id_t = tensors.SymSymR4(np.eye(6))\n self.assertEqual(id_t, tensors.SymSymR4.id())\n\n def test_id_dev(self):\n ot = np.zeros((6,6))\n ot[:3,:3] = 1.0/3.0\n id_t = tensors.SymSymR4(np.eye(6) - ot)\n self.assertEqual(id_t, tensors.SymSymR4.id_dev())\n\nclass TestSymSkewR4(unittest.TestCase):\n def setUp(self):\n self.SW1 = np.array([\n [ 5.43434005, -6.55983214, 0.29737664],\n [-4.77472172, -8.51287287, -3.19380185],\n [ 4.43407952, -6.02555614, 5.87786914],\n [ 1.89488869, -5.65383917, 8.83717547],\n [-7.18030867, 1.56100537, -9.83238641],\n [-4.52369317, -3.07284914, -7.54966999]])\n self.SW1_full = common.ws2ts(self.SW1)\n self.TSW1 = tensors.SymSkewR4(self.SW1)\n\n self.SW2 = np.array([\n [ 7.90885123, -1.89089468, -6.95528566],\n [-2.53495619, 9.47533071, -2.76302205],\n [-8.57887706, 4.21216331, -7.68619983],\n [-5.45955495, 2.0523769 , -9.71153458],\n [-5.61696943, -4.02142773, -6.41654212],\n [-8.76272792, -3.60354692, 2.7402794 ]])\n\n self.SW2_full = common.ws2ts(self.SW2)\n self.TSW2 = tensors.SymSkewR4(self.SW2)\n\n self.WS = np.array([\n [-8.3567359 , -5.39728818, -8.00844442, -8.33365112, -0.97903364, -8.23943149],\n [-6.97125417, 4.34802055, 7.06281056, -1.57511617, 7.83359933, -9.37625432],\n [-6.0799489 , -6.0309543 , 3.68575895, 8.84296976, 6.55799427, -9.22029379]])\n self.WS_full = common.wws2ts(self.WS)\n self.TWS = tensors.SkewSymR4(self.WS)\n\n self.SS = np.array([\n [ 5.99159801, -2.24342348, 0.26667281, -0.95466199, 3.98931478, -0.10846981],\n [ 1.86468226, -4.32391908, -7.82738638, -7.45008989, 5.89874777, 0.45820648],\n [-5.92565398, 2.4862829 , -6.02112389, 6.75455965, 4.65183463, 9.96900579],\n [ 0.60378883, -3.72189328, -7.63388446, -5.76559403, -0.3119789 , -1.1527258 ],\n [ 4.56813135, -6.06783828, -6.18341368, 8.06169686, -9.56928844, 9.08114655],\n [-8.25516614, 6.30663846, 7.2084381 , -7.38280703, -5.96279902, 8.9935982 ]])\n self.SS_full = common.ms2ts(self.SS)\n self.TSS = tensors.SymSymR4(self.SS)\n\n self.R = np.array([[[[-8.03675620e+00, 2.58575052e+00, 2.44069661e+00],\n [ 4.75021663e+00, 1.24463394e+00, -8.69751301e-01],\n [-1.46310894e+00, -1.15053235e+00, -3.75342982e+00]],\n [[-7.64033956e+00, 4.19956720e+00, -4.87644982e+00],\n [ 1.06577507e+00, 8.94272637e+00, 6.57264250e-01],\n [-4.22613258e+00, -5.08830314e+00, 1.57718186e+00]],\n [[-4.02243082e+00, -4.75463781e+00, -8.88662152e+00],\n [-1.30383950e+00, -1.98063574e+00, -3.18963544e+00],\n [-7.52071674e+00, 1.08931933e+00, 2.86988431e+00]]],\n [[[ 5.28621060e+00, -6.83799668e+00, 8.98005935e+00],\n [-7.92741122e+00, 5.75699425e-01, 1.66782544e+00],\n [ 2.60041984e+00, -1.04476986e-02, -6.12424787e+00]],\n [[-3.73727368e+00, 6.59764771e+00, -1.18045587e+00],\n [ 4.08567441e+00, 2.66148943e+00, -6.82495588e-01],\n [-1.64417262e+00, 5.33119298e+00, 8.11045988e-03]],\n [[-5.90193883e+00, -2.63316107e+00, 5.61381825e+00],\n [-6.08591194e+00, 8.77285539e+00, -7.15230533e+00],\n [ 3.15093096e+00, 1.41350149e+00, 1.11702016e+00]]],\n [[[-9.61472764e-01, -1.91492497e+00, 9.48275324e+00],\n [ 6.68841134e+00, 3.23412041e+00, -3.41944541e+00],\n [-9.80203467e+00, 6.58425335e+00, -2.16548636e+00]],\n [[ 6.63950740e+00, 3.91551441e+00, -8.98229111e+00],\n [ 9.84606756e+00, -8.16145090e+00, 8.41929062e-01],\n [-1.93839620e+00, 7.44485127e+00, -2.70832414e+00]],\n [[ 9.79265531e+00, -1.18212395e+00, -5.39433704e+00],\n [ 4.87152614e+00, 9.47287450e+00, 5.53838514e+00],\n [ 9.30443367e+00, 1.27090319e+00, 1.60409739e+00]]]])\n self.TR = tensors.RankFour(self.R)\n\n self.S = np.array([[4.1,2.8,-1.2],[3.1,7.1,0.2],[4,2,3]])\n self.S = 0.5*(self.S + self.S.T)\n self.TS = tensors.Symmetric(self.S)\n\n self.scalar = 5.2\n\n self.G = np.array([[ 9.50640677, 1.79084726, -2.8877036 ],\n [-1.63159958, 2.52866904, -8.71585042],\n [ 5.01859685, -8.7324075 , -0.42919134]])\n self.TG = tensors.RankTwo(self.G)\n\n self.W = np.array([[-9.36416517, 2.95527444, 8.70983194],\n [-1.54693052, 8.7905658 , -5.10895168],\n [-8.52740468, -0.7741642 , 2.89544992]])\n self.W = 0.5 * (self.W - self.W.T)\n self.TW = tensors.Skew(self.W)\n\n def test_to_full(self):\n full_np = common.ws2ts(self.SW1)\n full_t = tensors.RankFour(full_np)\n full = self.TSW1.to_full()\n self.assertEqual(full_t, full)\n\n def test_from_full(self):\n full = self.TSW1.to_full()\n new = full.to_symskew()\n self.assertEqual(self.TSW1, new)\n\n def test_add(self):\n self.assertEqual(tensors.SymSkewR4(self.SW1 + self.SW2), self.TSW2 + self.TSW1)\n self.assertEqual(tensors.SymSkewR4(self.SW1 - self.SW2), self.TSW1 - self.TSW2)\n\n def test_equality(self):\n self.assertEqual(self.TSW1, self.TSW1)\n\n def test_inequality(self):\n self.assertNotEqual(self.TSW1, self.TSW2)\n\n def test_negate(self):\n self.assertEqual(tensors.SymSkewR4(-self.SW1), -self.TSW1)\n\n def test_scalar_mult(self):\n self.assertEqual(tensors.SymSkewR4(self.scalar * self.SW1), self.scalar * self.TSW1)\n self.assertEqual(tensors.SymSkewR4(self.scalar * self.SW2), self.TSW2 * self.scalar)\n self.assertEqual(tensors.SymSkewR4(self.SW1 / self.scalar), self.TSW1 / self.scalar)\n\n def test_product_sym_sym(self):\n self.assertEqual(tensors.RankFour(np.einsum('ijkl,klmn', self.SW1_full, self.SS_full)), self.TSW1 * self.TSS)\n\n def test_product_sym_full(self):\n self.assertEqual(tensors.RankFour(np.einsum('ijkl,klmn', self.SW1_full, self.R)), self.TSW1 * self.TR)\n\n def test_product_sym_skew(self):\n self.assertEqual(tensors.RankFour(np.einsum('ijkl,klmn', self.SW1_full, self.SW2_full)), self.TSW1 * self.TSW2)\n\n def test_product_skew_sym(self):\n self.assertEqual(tensors.RankFour(np.einsum('ijkl,klmn', self.SW1_full, self.WS_full)), self.TSW1 * self.TWS)\n\n def test_product_symmetric(self):\n self.assertEqual(tensors.RankTwo(np.einsum('ijkl,kl', self.SW1_full, self.S)), self.TSW1 * self.TS)\n\n def test_product_general(self):\n self.assertEqual(tensors.RankTwo(np.einsum('ijkl,kl', self.SW1_full, self.G)), self.TSW1 * self.TG)\n\n def test_product_skew(self):\n self.assertEqual(tensors.RankTwo(np.einsum('ijkl,kl', self.SW1_full, self.W)), self.TSW1 * self.TW)\n\nclass TestSkewSymR4(unittest.TestCase):\n def setUp(self):\n self.WS1 = np.array([\n [-8.3567359 , -5.39728818, -8.00844442, -8.33365112, -0.97903364, -8.23943149],\n [-6.97125417, 4.34802055, 7.06281056, -1.57511617, 7.83359933, -9.37625432],\n [-6.0799489 , -6.0309543 , 3.68575895, 8.84296976, 6.55799427, -9.22029379]])\n self.WS1_full = common.wws2ts(self.WS1)\n self.TWS1 = tensors.SkewSymR4(self.WS1)\n\n self.WS2 = np.array([\n [-8.80662663, 0.46179936, -5.49454144, 7.91618428, 5.34053953, -6.68997405],\n [ 4.15874971, -4.59781751, 7.43746813, 8.99981425, -0.97692573, 2.5075246 ],\n [ 9.53201007, -8.03524224, 0.94329443, -6.44415877, -9.92911741, 3.51742689]])\n self.WS2_full = common.wws2ts(self.WS2)\n self.TWS2 = tensors.SkewSymR4(self.WS2)\n\n self.SW = np.array([\n [ 5.43434005, -6.55983214, 0.29737664],\n [-4.77472172, -8.51287287, -3.19380185],\n [ 4.43407952, -6.02555614, 5.87786914],\n [ 1.89488869, -5.65383917, 8.83717547],\n [-7.18030867, 1.56100537, -9.83238641],\n [-4.52369317, -3.07284914, -7.54966999]])\n self.SW_full = common.ws2ts(self.SW)\n self.TSW = tensors.SymSkewR4(self.SW)\n\n self.SS = np.array([\n [ 5.99159801, -2.24342348, 0.26667281, -0.95466199, 3.98931478, -0.10846981],\n [ 1.86468226, -4.32391908, -7.82738638, -7.45008989, 5.89874777, 0.45820648],\n [-5.92565398, 2.4862829 , -6.02112389, 6.75455965, 4.65183463, 9.96900579],\n [ 0.60378883, -3.72189328, -7.63388446, -5.76559403, -0.3119789 , -1.1527258 ],\n [ 4.56813135, -6.06783828, -6.18341368, 8.06169686, -9.56928844, 9.08114655],\n [-8.25516614, 6.30663846, 7.2084381 , -7.38280703, -5.96279902, 8.9935982 ]])\n self.SS_full = common.ms2ts(self.SS)\n self.TSS = tensors.SymSymR4(self.SS)\n\n self.R = np.array([[[[-8.03675620e+00, 2.58575052e+00, 2.44069661e+00],\n [ 4.75021663e+00, 1.24463394e+00, -8.69751301e-01],\n [-1.46310894e+00, -1.15053235e+00, -3.75342982e+00]],\n [[-7.64033956e+00, 4.19956720e+00, -4.87644982e+00],\n [ 1.06577507e+00, 8.94272637e+00, 6.57264250e-01],\n [-4.22613258e+00, -5.08830314e+00, 1.57718186e+00]],\n [[-4.02243082e+00, -4.75463781e+00, -8.88662152e+00],\n [-1.30383950e+00, -1.98063574e+00, -3.18963544e+00],\n [-7.52071674e+00, 1.08931933e+00, 2.86988431e+00]]],\n [[[ 5.28621060e+00, -6.83799668e+00, 8.98005935e+00],\n [-7.92741122e+00, 5.75699425e-01, 1.66782544e+00],\n [ 2.60041984e+00, -1.04476986e-02, -6.12424787e+00]],\n [[-3.73727368e+00, 6.59764771e+00, -1.18045587e+00],\n [ 4.08567441e+00, 2.66148943e+00, -6.82495588e-01],\n [-1.64417262e+00, 5.33119298e+00, 8.11045988e-03]],\n [[-5.90193883e+00, -2.63316107e+00, 5.61381825e+00],\n [-6.08591194e+00, 8.77285539e+00, -7.15230533e+00],\n [ 3.15093096e+00, 1.41350149e+00, 1.11702016e+00]]],\n [[[-9.61472764e-01, -1.91492497e+00, 9.48275324e+00],\n [ 6.68841134e+00, 3.23412041e+00, -3.41944541e+00],\n [-9.80203467e+00, 6.58425335e+00, -2.16548636e+00]],\n [[ 6.63950740e+00, 3.91551441e+00, -8.98229111e+00],\n [ 9.84606756e+00, -8.16145090e+00, 8.41929062e-01],\n [-1.93839620e+00, 7.44485127e+00, -2.70832414e+00]],\n [[ 9.79265531e+00, -1.18212395e+00, -5.39433704e+00],\n [ 4.87152614e+00, 9.47287450e+00, 5.53838514e+00],\n [ 9.30443367e+00, 1.27090319e+00, 1.60409739e+00]]]])\n self.TR = tensors.RankFour(self.R)\n\n self.S = np.array([[4.1,2.8,-1.2],[3.1,7.1,0.2],[4,2,3]])\n self.S = 0.5*(self.S + self.S.T)\n self.TS = tensors.Symmetric(self.S)\n\n self.scalar = 5.2\n\n self.G = np.array([[ 9.50640677, 1.79084726, -2.8877036 ],\n [-1.63159958, 2.52866904, -8.71585042],\n [ 5.01859685, -8.7324075 , -0.42919134]])\n self.TG = tensors.RankTwo(self.G)\n\n self.W = np.array([[-9.36416517, 2.95527444, 8.70983194],\n [-1.54693052, 8.7905658 , -5.10895168],\n [-8.52740468, -0.7741642 , 2.89544992]])\n self.W = 0.5 * (self.W - self.W.T)\n self.TW = tensors.Skew(self.W)\n\n def test_to_full(self):\n full_np = common.wws2ts(self.WS1)\n full_t = tensors.RankFour(full_np)\n full = self.TWS1.to_full()\n self.assertEqual(full_t, full)\n\n def test_from_full(self):\n full = self.TWS1.to_full()\n new = full.to_skewsym()\n self.assertEqual(self.TWS1, new)\n\n def test_add(self):\n self.assertEqual(tensors.SkewSymR4(self.WS1 + self.WS2), self.TWS2 + self.TWS1)\n self.assertEqual(tensors.SkewSymR4(self.WS1 - self.WS2), self.TWS1 - self.TWS2)\n\n def test_equality(self):\n self.assertEqual(self.TWS1, self.TWS1)\n\n def test_inequality(self):\n self.assertNotEqual(self.TWS1, self.TWS2)\n\n def test_negate(self):\n self.assertEqual(tensors.SkewSymR4(-self.WS1), -self.TWS1)\n\n def test_scalar_mult(self):\n self.assertEqual(tensors.SkewSymR4(self.scalar * self.WS1), self.scalar * self.TWS1)\n self.assertEqual(tensors.SkewSymR4(self.scalar * self.WS2), self.TWS2 * self.scalar)\n self.assertEqual(tensors.SkewSymR4(self.WS1 / self.scalar), self.TWS1 / self.scalar)\n\n def test_product_sym_sym(self):\n self.assertEqual(tensors.RankFour(np.einsum('ijkl,klmn', self.WS1_full, self.SS_full)), self.TWS1 * self.TSS)\n\n def test_product_sym_full(self):\n self.assertEqual(tensors.RankFour(np.einsum('ijkl,klmn', self.WS1_full, self.R)), self.TWS1 * self.TR)\n\n def test_product_sym_skew(self):\n self.assertEqual(tensors.RankFour(np.einsum('ijkl,klmn', self.WS1_full, self.SW_full)), self.TWS1 * self.TSW)\n\n def test_product_skew_sym(self):\n self.assertEqual(tensors.RankFour(np.einsum('ijkl,klmn', self.WS1_full, self.WS2_full)), self.TWS1 * self.TWS2)\n\n def test_product_symmetric(self):\n self.assertEqual(tensors.RankTwo(np.einsum('ijkl,kl', self.WS1_full, self.S)), self.TWS1 * self.TS)\n\n def test_product_general(self):\n self.assertEqual(tensors.RankTwo(np.einsum('ijkl,kl', self.WS1_full, self.G)), self.TWS1 * self.TG)\n\n def test_product_skew(self):\n self.assertEqual(tensors.RankTwo(np.einsum('ijkl,kl', self.WS1_full, self.W)), self.TWS1 * self.TW)\n\n def test_douter(self):\n self.assertEqual(tensors.SkewSymR4(common.ts2wws(np.einsum('ij,kl', self.W, self.S))), tensors.douter(self.TW, self.TS))\n\nclass TestCPSpeciality(unittest.TestCase):\n def setUp(self):\n self.SS = np.array([\n [ 5.99159801, -2.24342348, 0.26667281, -0.95466199, 3.98931478, -0.10846981],\n [ 1.86468226, -4.32391908, -7.82738638, -7.45008989, 5.89874777, 0.45820648],\n [-5.92565398, 2.4862829 , -6.02112389, 6.75455965, 4.65183463, 9.96900579],\n [ 0.60378883, -3.72189328, -7.63388446, -5.76559403, -0.3119789 , -1.1527258 ],\n [ 4.56813135, -6.06783828, -6.18341368, 8.06169686, -9.56928844, 9.08114655],\n [-8.25516614, 6.30663846, 7.2084381 , -7.38280703, -5.96279902, 8.9935982 ]])\n self.SS_full = common.ms2ts(self.SS)\n self.TSS = tensors.SymSymR4(self.SS)\n\n self.W = np.array([[-9.36416517, 2.95527444, 8.70983194],\n [-1.54693052, 8.7905658 , -5.10895168],\n [-8.52740468, -0.7741642 , 2.89544992]])\n self.W = 0.5 * (self.W - self.W.T)\n self.TW = tensors.Skew(self.W)\n\n self.S = np.array([[4.1,2.8,-1.2],[3.1,7.1,0.2],[4,2,3]])\n self.S = 0.5*(self.S + self.S.T)\n self.TS = tensors.Symmetric(self.S)\n\n self.WS = np.array([\n [-8.3567359 , -5.39728818, -8.00844442, -8.33365112, -0.97903364, -8.23943149],\n [-6.97125417, 4.34802055, 7.06281056, -1.57511617, 7.83359933, -9.37625432],\n [-6.0799489 , -6.0309543 , 3.68575895, 8.84296976, 6.55799427, -9.22029379]])\n self.WS_full = common.wws2ts(self.WS)\n self.TWS = tensors.SkewSymR4(self.WS)\n\n def test_symsymskew_skewsymsym(self):\n A1 = tensors.SymSymR4Skew_SkewSymR4SymR4(self.TSS, self.TW)\n\n A2_ten = np.einsum('kmst,ml', self.SS_full, self.W) - np.einsum('km,mlst',\n self.W, self.SS_full)\n A2 = tensors.SymSymR4(common.ts2ms(A2_ten))\n\n self.assertEqual(A1, A2)\n\n def test_symskewsym_skewsymsym(self):\n A1 = tensors.SymSkewR4Sym_SkewSymR4SymR4(self.TWS, self.TS)\n\n A2_ten = np.einsum('km,mlst', self.S, self.WS_full) - np.einsum('kmst,ml',\n self.WS_full, self.S)\n A2 = tensors.SymSymR4(common.ts2ms(A2_ten))\n\n self.assertEqual(A1, A2)\n\n def test_special(self):\n A1 = tensors.SpecialSymSymR4Sym(self.TSS, self.TS)\n A2_ten = np.einsum('ijkz,ky', self.SS_full, self.S) - np.einsum(\n 'ijyl,zl', self.SS_full, self.S)\n A2 = tensors.SymSkewR4(common.ts2sww(A2_ten))\n\n self.assertEqual(A1, A2)\n" ]
[ [ "numpy.allclose", "numpy.eye", "numpy.sum", "numpy.zeros", "numpy.einsum", "numpy.outer", "numpy.linalg.inv", "numpy.isclose", "numpy.cross", "numpy.trace", "numpy.array", "numpy.dot", "numpy.linalg.norm" ] ]
dirmeier/jax
[ "9ba28d263479ed5b9cada97bf73aec92ccc69bc6" ]
[ "jax/core.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport operator\nfrom operator import attrgetter\nfrom contextlib import contextmanager, suppress\nfrom collections import namedtuple\nfrom functools import total_ordering\nimport itertools as it\nfrom weakref import ref\nimport threading\nimport types\nfrom typing import (Any, Callable, ClassVar, Dict, Generator,\n Iterator, List, NamedTuple, Optional, Sequence, Set, Tuple,\n Type, Union, cast)\n\nimport numpy as np\n\nfrom . import dtypes\nfrom .config import FLAGS, config\nfrom . import linear_util as lu\n\nfrom . import source_info_util\nfrom .util import safe_zip, safe_map, partial, curry, prod, partialmethod\nfrom .pprint_util import pp, vcat, PrettyPrint\n\n# TODO(dougalm): compilation cache breaks the leak detector. Consisder solving.\ncheck_leaks = False\n\n# Disables internal invariant checks\nskip_checks = not FLAGS.jax_enable_checks # not __debug__ # google doesn't use -O\n\n@contextmanager\ndef skipping_checks():\n \"\"\"Context manager for temporarily disabling checks.\"\"\"\n global skip_checks\n old_value, skip_checks = skip_checks, True\n try:\n yield\n finally:\n skip_checks = old_value\n\nzip = safe_zip\nmap = safe_map\n\n\n# -------------------- jaxprs --------------------\n\nclass Jaxpr:\n constvars: List['Var']\n invars: List['Var']\n outvars: List['Atom']\n eqns: List['JaxprEqn']\n\n def __init__(self, constvars: Sequence['Var'], invars: Sequence['Var'],\n outvars: Sequence['Atom'], eqns: Sequence['JaxprEqn']):\n \"\"\"\n Args:\n constvars: list of variables introduced for constants. Array constants are\n replaced with such variables while scalar constants are kept inline.\n invars: list of input variables. Together, `constvars` and `invars` are\n the inputs to the Jaxpr.\n outvars: list of output variables.\n eqns: list of equations.\n \"\"\"\n self.constvars = list(constvars)\n self.invars = list(invars)\n self.outvars = list(outvars)\n self.eqns = list(eqns)\n\n def __str__(self):\n return str(pp_jaxpr(self))\n __repr__ = __str__\n\n\ndef jaxprs_in_params(params) -> Iterator[Jaxpr]:\n for val in params.values():\n vals = val if isinstance(val, tuple) else (val,)\n for v in vals:\n if isinstance(v, Jaxpr):\n yield v\n elif isinstance(v, ClosedJaxpr):\n yield v.jaxpr\n\n\ndef subjaxprs(jaxpr: Jaxpr) -> Iterator[Jaxpr]:\n \"\"\"Generator for all subjaxprs found in the params of jaxpr.eqns.\n\n Does not descend recursively into the found subjaxprs.\n \"\"\"\n for eqn in jaxpr.eqns:\n yield from jaxprs_in_params(eqn.params)\n\n\nclass ClosedJaxpr:\n jaxpr: Jaxpr\n consts: List['Any']\n\n def __init__(self, jaxpr: Jaxpr, consts: Sequence):\n assert len(consts) == len(jaxpr.constvars)\n self.jaxpr = jaxpr\n self.consts = list(consts)\n\n @property\n def in_avals(self):\n return [v.aval for v in self.jaxpr.invars]\n\n @property\n def out_avals(self):\n return [v.aval for v in self.jaxpr.outvars]\n\n @property\n def literals(self):\n return self.consts # backwards compatible alias\n\n def __str__(self): return str(self.jaxpr)\n def __repr__(self): return repr(self.jaxpr)\n\n@curry\ndef jaxpr_as_fun(closed_jaxpr: ClosedJaxpr, *args):\n return eval_jaxpr(closed_jaxpr.jaxpr, closed_jaxpr.consts, *args)\n\n\nclass JaxprEqn(NamedTuple):\n invars: List['Atom']\n outvars: List['Var']\n primitive: 'Primitive'\n params: Dict[str, Any]\n source_info: Optional[source_info_util.Traceback]\n\n def __repr__(self): return str(pp_eqn(self)).rstrip()\n\ndef new_jaxpr_eqn(invars, outvars, primitive, params, source_info=None):\n return JaxprEqn(invars, outvars, primitive, params, source_info)\n\n\n@total_ordering\nclass Var:\n # TODO(frostig,mattjj): We don't override __eq__ or __hash__, so comparison is\n # by object id, but pretty printing might collide.\n count: int\n suffix: str\n aval: 'AbstractValue'\n\n def __init__(self, count: int, suffix: str, aval: 'AbstractValue'):\n self.count = count\n self.suffix = suffix\n self.aval = raise_to_shaped(aval)\n\n def __lt__(self, other):\n if not isinstance(other, Var):\n return NotImplemented\n else:\n return (self.count, self.suffix) < (other.count, other.suffix)\n\n def __repr__(self):\n rem = self.count\n s = ''\n while True:\n rem, i = rem // 26, rem % 26\n s = chr(97 + i % 26) + s\n if not rem:\n break\n return s + self.suffix\n\ndef _jaxpr_vars(jaxpr):\n return it.chain(\n jaxpr.invars, jaxpr.constvars,\n (v for eqn in jaxpr.eqns for v in eqn.outvars))\n\ndef gensym(jaxprs: Optional[Sequence[Jaxpr]] = None,\n suffix: str = '') -> Callable[['AbstractValue'], Var]:\n \"\"\"Produce distinct variables, printed with the optional suffix.\n\n If `jaxprs` is provided, the variables produced will be distinct from those in\n any of the given jaxprs.\n \"\"\"\n if jaxprs is None:\n start = 0\n else:\n all_vars = it.chain.from_iterable(_jaxpr_vars(j) for j in jaxprs)\n start = 1 + max((v.count for v in all_vars), default=-1)\n counter = it.count(start=start)\n return lambda aval: Var(next(counter), suffix, aval)\n\n# In a jaxpr, `dropvar` can appear in place of a bound variable to indicate that\n# the assignment is dropped, i.e. that an expression's output value will never\n# be read. In that sense, `dropvar` is not a variable, but it is convenient to\n# treat it as a special case of one. Its `aval` is similarly inexact.\nclass DropVar(Var):\n count = -1\n suffix = ''\n def __init__(self): pass\n @property\n def aval(self): return abstract_unit\n def __repr__(self): return '_'\ndropvar = DropVar()\n\nclass Literal:\n __slots__ = [\"val\", \"hash\"]\n\n val: Any\n hash: Optional[int]\n\n def __init__(self, val):\n self.val = val\n try:\n self.hash = hash(val)\n except TypeError:\n if type(val) in literalable_types:\n try:\n self.hash = hash((val.item(), val.dtype))\n except (TypeError, AttributeError, ValueError):\n self.hash = None\n\n @property\n def aval(self):\n return raise_to_shaped(get_aval(self.val))\n\n def __hash__(self):\n assert False\n\n def __repr__(self):\n if hasattr(self, 'hash'):\n return '{}'.format(self.val)\n else:\n return 'Literal(val={})'.format(self.val)\n\nliteralable_types: Set[type] = set()\n\nAtom = Union[Var, Literal]\n\nclass Primitive:\n name: str\n multiple_results = False # set for multi-output primitives\n call_primitive = False # set for call primitives processed in final style\n map_primitive = False # set for map primitives processed in final style\n\n def __init__(self, name: str):\n self.name = name\n\n def __repr__(self):\n return '{}'.format(self.name)\n\n\n def bind(self, *args, **params):\n assert skip_checks or all(isinstance(arg, Tracer)\n or valid_jaxtype(arg) for arg in args), args\n top_trace = find_top_trace(args)\n tracers = map(top_trace.full_raise, args)\n out = top_trace.process_primitive(self, tracers, params)\n return map(full_lower, out) if self.multiple_results else full_lower(out)\n\n def def_impl(self, impl):\n self.impl = impl\n return impl\n\n def def_abstract_eval(self, abstract_eval):\n self.abstract_eval = abstract_eval\n return abstract_eval\n\n def def_custom_bind(self, bind):\n self.bind = bind\n return bind\n\n def impl(self, *args, **params):\n raise NotImplementedError(\"Evaluation rule for '{}' not implemented\"\n .format(self.name))\n\n def abstract_eval(self, *args, **params):\n raise NotImplementedError(\"Abstract evaluation for '{}' not implemented\"\n .format(self.name))\n\n\n# -------------------- lifting --------------------\n\n# TODO(necula): this belongs next to pe.new_eqn_recipe, but is needed in\n# core.py. Plan to move all these utilities to jaxpr.py.\ndef extract_call_jaxpr(\n primitive: Primitive,\n params: Dict[str, Any]) -> Tuple[Optional[Jaxpr], Dict[str, Any]]:\n \"\"\"Extract the call primitive subjaxpr from the params.\n\n Returns the subjaxpr and the params without the \"call_jaxpr\" value. If this is\n not a call primitive then returns (None, params).\n \"\"\"\n if not (primitive.call_primitive or primitive.map_primitive):\n return (None, params)\n else:\n assert \"call_jaxpr\" in params\n new_params = dict(params)\n del new_params[\"call_jaxpr\"]\n return (params[\"call_jaxpr\"], new_params)\n\n\ndef eval_jaxpr(jaxpr: Jaxpr, consts, *args):\n def read(v):\n if type(v) is Literal:\n return v.val\n else:\n return env[v]\n\n def write(v, val):\n env[v] = val\n\n env: Dict[Var, Any] = {}\n write(unitvar, unit)\n map(write, jaxpr.constvars, consts)\n map(write, jaxpr.invars, args)\n for eqn in jaxpr.eqns:\n in_vals = map(read, eqn.invars)\n call_jaxpr, params = extract_call_jaxpr(eqn.primitive, eqn.params)\n if call_jaxpr:\n subfuns = [lu.wrap_init(partial(eval_jaxpr, call_jaxpr, ()))]\n else:\n subfuns = []\n with source_info_util.user_context(eqn.source_info):\n ans = eqn.primitive.bind(*(subfuns + in_vals), **params)\n if eqn.primitive.multiple_results:\n map(write, eqn.outvars, ans)\n else:\n write(eqn.outvars[0], ans)\n return map(read, jaxpr.outvars)\n\n\n# -------------------- tracing --------------------\n\n\nclass Trace:\n __slots__ = ['main', 'level', 'sublevel']\n\n main: 'MainTrace'\n level: int\n sublevel: 'Sublevel'\n\n def __init__(self, main: 'MainTrace', sublevel: 'Sublevel') -> None:\n self.main = main\n self.level = main.level\n self.sublevel = sublevel\n\n def full_raise(self, val) -> 'Tracer':\n if not isinstance(val, Tracer):\n return self.pure(val)\n val._assert_live()\n level = self.level\n sublevel = self.sublevel\n if val._trace.main is self.main:\n if val._trace.sublevel == sublevel:\n return val\n elif val._trace.sublevel < sublevel:\n return self.sublift(val)\n else:\n raise escaped_tracer_error(\"Can't lift sublevels {} to {}\"\n .format(val._trace.sublevel, sublevel))\n elif val._trace.level < level:\n if val._trace.sublevel > sublevel:\n raise escaped_tracer_error(\"Incompatible sublevel: {}, {}\"\n .format(val._trace, (level, sublevel)))\n return self.lift(val)\n elif val._trace.level > level:\n raise escaped_tracer_error(\"Can't lift level {} to {}\"\n .format(val, self))\n else: # val._trace.level == self.level:\n raise escaped_tracer_error(\"Different traces at same level: {}, {}\"\n .format(val, self))\n\n def pure(self, val):\n raise NotImplementedError(\"must override\")\n\n def lift(self, tracer):\n raise NotImplementedError(\"must override\")\n\n def sublift(self, tracer):\n raise NotImplementedError(\"must override\")\n\n def process_primitive(self, primitive, tracers, params):\n raise NotImplementedError(\"must override\")\n\n def __repr__(self):\n return '{}(level={}/{})'.format(\n self.__class__.__name__, self.level, self.sublevel)\n\n def process_call(self, call_primitive, f, tracers, params):\n msg = (f\"{type(self)} must override process_call to handle call-like \"\n \"primitives\")\n raise NotImplementedError(msg)\n\n def process_map(self, call_primitive, f, tracers, params):\n msg = (f\"{type(self)} must override process_map to handle map-like \"\n \"primitives\")\n raise NotImplementedError(msg)\n\n def process_custom_jvp_call(self, primitive, fun, jvp, tracers):\n msg = (f\"{type(self)} must override process_custom_jvp_call \"\n \"to handle custom_jvp primitives\")\n raise NotImplementedError(msg)\n\n def process_custom_vjp_call(self, primitive, fun, fwd, bwd, tracers, out_trees):\n msg = (f\"{type(self)} must override process_custom_vjp_call \"\n \"to handle custom_vjp primitives\")\n raise NotImplementedError(msg)\n\ndef escaped_tracer_error(detail=None):\n msg = (\"Encountered an unexpected tracer. Perhaps this tracer escaped \"\n \"through global state from a previously traced function.\\n\"\n \"The functions being transformed should not save traced values to \"\n \"global state.\")\n if detail:\n msg += \" Detail: {}.\".format(detail)\n return UnexpectedTracerError(msg)\n\nclass UnexpectedTracerError(Exception): pass\n\nclass Tracer:\n __array_priority__ = 1000\n __slots__ = ['_trace', '__weakref__']\n\n def __array__(self, *args, **kw):\n msg = (\"The numpy.ndarray conversion method __array__() was called on \"\n f\"the JAX Tracer object {self}.\\n\\n\"\n \"This error can occur when a JAX Tracer object is passed to a raw \"\n \"numpy function, or a method on a numpy.ndarray object. You might \"\n \"want to check that you are using `jnp` together with \"\n \"`import jax.numpy as jnp` rather than using `np` via \"\n \"`import numpy as np`. If this error arises on a line that involves \"\n \"array indexing, like `x[idx]`, it may be that the array being \"\n \"indexed `x` is a raw numpy.ndarray while the indices `idx` are a \"\n \"JAX Tracer instance; in that case, you can instead write \"\n \"`jax.device_put(x)[idx]`.\")\n raise Exception(msg)\n\n def __init__(self, trace: Trace):\n self._trace = trace\n\n def __iter__(self):\n return iter(self.aval._iter(self))\n\n def __len__(self):\n return self.aval._len(self)\n\n @property\n def aval(self):\n raise NotImplementedError(\"must override\")\n\n def _assert_live(self) -> None:\n pass # Override for liveness checking\n\n # Python looks up special methods only on classes, not instances. This means\n # these methods needs to be defined explicitly rather than relying on\n # __getattr__.\n def __neg__(self): return self.aval._neg(self)\n def __pos__(self): return self.aval._pos(self)\n def __eq__(self, other): return self.aval._eq(self, other)\n def __ne__(self, other): return self.aval._ne(self, other)\n def __lt__(self, other): return self.aval._lt(self, other)\n def __le__(self, other): return self.aval._le(self, other)\n def __gt__(self, other): return self.aval._gt(self, other)\n def __ge__(self, other): return self.aval._ge(self, other)\n def __abs__(self): return self.aval._abs(self)\n def __add__(self, other): return self.aval._add(self, other)\n def __radd__(self, other): return self.aval._radd(self, other)\n def __sub__(self, other): return self.aval._sub(self, other)\n def __rsub__(self, other): return self.aval._rsub(self, other)\n def __mul__(self, other): return self.aval._mul(self, other)\n def __rmul__(self, other): return self.aval._rmul(self, other)\n def __div__(self, other): return self.aval._div(self, other)\n def __rdiv__(self, other): return self.aval._rdiv(self, other)\n def __truediv__(self, other): return self.aval._truediv(self, other)\n def __rtruediv__(self, other): return self.aval._rtruediv(self, other)\n def __floordiv__(self, other): return self.aval._floordiv(self, other)\n def __rfloordiv__(self, other): return self.aval._rfloordiv(self, other)\n def __divmod__(self, other): return self.aval._divmod(self, other)\n def __rdivmod__(self, other): return self.aval._rdivmod(self, other)\n def __mod__(self, other): return self.aval._mod(self, other)\n def __rmod__(self, other): return self.aval._rmod(self, other)\n def __pow__(self, other): return self.aval._pow(self, other)\n def __rpow__(self, other): return self.aval._rpow(self, other)\n def __matmul__(self, other): return self.aval._matmul(self, other)\n def __rmatmul__(self, other): return self.aval._rmatmul(self, other)\n def __and__(self, other): return self.aval._and(self, other)\n def __rand__(self, other): return self.aval._rand(self, other)\n def __or__(self, other): return self.aval._or(self, other)\n def __ror__(self, other): return self.aval._ror(self, other)\n def __xor__(self, other): return self.aval._xor(self, other)\n def __rxor__(self, other): return self.aval._rxor(self, other)\n def __invert__(self): return self.aval._invert(self)\n def __lshift__(self, other): return self.aval._lshift(self, other)\n def __rlshift__(self, other): return self.aval._rlshift(self, other)\n def __rshift__(self, other): return self.aval._rshift(self, other)\n def __rrshift__(self, other): return self.aval._rrshift(self, other)\n def __getitem__(self, idx): return self.aval._getitem(self, idx)\n def __nonzero__(self): return self.aval._nonzero(self)\n def __bool__(self): return self.aval._bool(self)\n def __int__(self): return self.aval._int(self)\n def __long__(self): return self.aval._long(self)\n def __hex__(self): return self.aval._hex(self)\n def __oct__(self): return self.aval._oct(self)\n def __float__(self): return self.aval._float(self)\n def __complex__(self): return self.aval._complex(self)\n\n def __setitem__(self, idx, val):\n raise TypeError(\"JAX 'Tracer' objects do not support item assignment\")\n\n # NumPy also only looks up special methods on classes.\n def __array_module__(self, types): return self.aval._array_module(self, types)\n\n def __getattr__(self, name):\n # if the aval property raises an AttributeError, gets caught here\n assert skip_checks or name != \"aval\"\n\n try:\n attr = getattr(self.aval, name)\n except KeyError as err:\n raise AttributeError(\n \"{} has no attribute {}\".format(self.__class__.__name__, name)\n ) from err\n else:\n t = type(attr)\n if t is aval_property:\n return attr.fget(self)\n elif t is aval_method:\n return types.MethodType(attr.fun, self)\n else:\n return attr\n\n def __repr__(self):\n base = pp('Traced<{}>with<{}>'.format(self.aval, self._trace))\n contents = self._contents()\n if contents:\n base += pp(' with ') >> vcat(pp('{} = '.format(name)) >> pp_payload\n for name, pp_payload in contents)\n return str(base)\n\n def _contents(self):\n try:\n return [(name, pp(repr(getattr(self, name)))) for name in self.__slots__]\n except AttributeError:\n return ()\n\n def __copy__(self):\n return self\n\n def __deepcopy__(self, unused_memo):\n return self\n\n def _origin_msg(self) -> str:\n return \"\"\n\n# these can be used to set up forwarding of properties and instance methods from\n# Tracer instances to the underlying avals\naval_property = namedtuple(\"aval_property\", [\"fget\"])\naval_method = namedtuple(\"aval_method\", [\"fun\"])\n\n\nclass EvalTrace(Trace):\n # See comments in https://github.com/google/jax/pull/3370\n def pure(self, x): return x\n lift = sublift = pure\n\n def process_primitive(self, primitive, tracers, params):\n return primitive.impl(*tracers, **params)\n\n def process_call(self, primitive, f, tracers, params):\n return primitive.impl(f, *tracers, **params)\n process_map = process_call\n\n def process_custom_jvp_call(self, primitive, fun, jvp, tracers):\n del primitive, jvp # Unused.\n return fun.call_wrapped(*tracers)\n\n def process_custom_vjp_call(self, primitive, fun, fwd, bwd, tracers, out_trees):\n del primitive, fwd, bwd, out_trees # Unused.\n return fun.call_wrapped(*tracers)\n\n\nclass MainTrace:\n level: int\n trace_type: Type[Trace]\n\n def __init__(self, level, trace_type) -> None:\n self.level = level\n self.trace_type = trace_type\n\n def __repr__(self) -> str:\n return \"MainTrace({},{})\".format(self.level, self.trace_type.__name__)\n\n def __hash__(self) -> int:\n return hash((self.level, self.trace_type))\n\n def __eq__(self, other: object) -> bool:\n return (isinstance(other, MainTrace) and\n self.level == other.level and self.trace_type == other.trace_type)\n\nclass TraceStack:\n # See comments in https://github.com/google/jax/pull/3370\n upward: List[MainTrace]\n downward: List[MainTrace]\n\n def __init__(self):\n eval_trace = MainTrace(0, EvalTrace)\n self.stack = [eval_trace]\n self.dynamic = eval_trace\n\n def next_level(self) -> int:\n return len(self.stack)\n\n def push(self, main_trace: MainTrace) -> None:\n self.stack.append(main_trace)\n\n def pop(self) -> None:\n self.stack.pop()\n\n def __repr__(self) -> str:\n stack_str = map(' {}\\n'.format, self.stack[::-1])\n return f'Trace stack\\n{stack_str}\\n{self.dynamic}'\n\n def copy(self):\n new = self.__new__(TraceStack)\n new.stack = self.stack[:]\n new.dynamic = self.dynamic\n return new\n\nclass Sublevel(int): pass\nAxisEnvFrame = namedtuple('AxisEnvFrame', ['name', 'size', 'main_trace'])\n\nclass TraceState:\n trace_stack: TraceStack\n substack: List[Sublevel]\n axis_env: List[AxisEnvFrame]\n\n def __init__(self) -> None:\n self.trace_stack = TraceStack()\n self.substack = [Sublevel(0)]\n self.axis_env = []\n\n def copy(self):\n new = self.__new__(TraceState)\n new.trace_stack = self.trace_stack.copy()\n new.substack = self.substack[:]\n new.axis_env = self.axis_env[:]\n return new\n\n# The global state of the tracer is accessed by a thread-local object.\n# This allows concurrent tracing in separate threads; passing traced objects\n# between threads is forbidden.\nclass ThreadLocalState(threading.local):\n def __init__(self):\n self.trace_state = TraceState()\nthread_local_state = ThreadLocalState()\n\ndef trace_state_clean() -> bool:\n trace_state = thread_local_state.trace_state\n return (trace_state.substack == [Sublevel(0)] and\n trace_state.axis_env == [] and\n trace_state.trace_stack.stack == [MainTrace(0, EvalTrace)] and\n trace_state.trace_stack.dynamic == MainTrace(0, EvalTrace))\n\ndef reset_trace_state() -> bool:\n \"Reset the global trace state and return True if it was already clean.\"\n if not trace_state_clean():\n thread_local_state.trace_state.__init__() # type: ignore\n return False\n else:\n return True\n\ndef cur_sublevel() -> Sublevel:\n return thread_local_state.trace_state.substack[-1]\n\n@contextmanager\ndef new_main(trace_type: Type[Trace], dynamic: bool = False,\n ) -> Generator[MainTrace, None, None]:\n # See comments in https://github.com/google/jax/pull/3370\n stack = thread_local_state.trace_state.trace_stack\n level = stack.next_level()\n main = MainTrace(level, trace_type)\n stack.push(main)\n if dynamic:\n prev_dynamic, stack.dynamic = stack.dynamic, main\n\n try:\n yield main\n finally:\n thread_local_state.trace_state.trace_stack.pop()\n if dynamic:\n stack.dynamic = prev_dynamic\n\n if check_leaks:\n t = ref(main)\n del main\n if t() is not None:\n print(thread_local_state.trace_state.trace_stack)\n raise Exception('Leaked trace {}'.format(t()))\n\n@contextmanager\ndef new_base_main(trace_type: Type[Trace]) -> Generator[MainTrace, None, None]:\n # See comments in https://github.com/google/jax/pull/3370\n stack = thread_local_state.trace_state.trace_stack\n main = MainTrace(0, trace_type)\n prev_dynamic, stack.dynamic = stack.dynamic, main\n prev_base, stack.stack[0] = stack.stack[0], main\n try:\n yield main\n finally:\n stack.dynamic = prev_dynamic\n stack.stack[0] = prev_base\n\n@contextmanager\ndef eval_context():\n with new_base_main(EvalTrace):\n yield\n\n@contextmanager\ndef new_sublevel() -> Generator[None, None, None]:\n sublevel = Sublevel(len(thread_local_state.trace_state.substack))\n thread_local_state.trace_state.substack.append(sublevel)\n try:\n yield\n finally:\n thread_local_state.trace_state.substack.pop()\n\n if check_leaks:\n t = ref(sublevel)\n del sublevel\n if t() is not None:\n raise Exception('Leaked sublevel {}'.format(t()))\n\ndef maybe_new_sublevel(trace):\n # dynamic traces run the WrappedFun, so we raise the sublevel for them\n dynamic = thread_local_state.trace_state.trace_stack.dynamic\n return new_sublevel() if trace.main is dynamic else suppress()\n\ndef full_lower(val):\n if isinstance(val, Tracer):\n return val.full_lower()\n else:\n return val\n\ndef find_top_trace(xs) -> Trace:\n top_main = max((x._trace.main for x in xs if isinstance(x, Tracer)),\n default=None, key=attrgetter('level'))\n dynamic = thread_local_state.trace_state.trace_stack.dynamic\n top_main = (dynamic if top_main is None or dynamic.level > top_main.level\n else top_main)\n return top_main and top_main.trace_type(top_main, cur_sublevel()) # type: ignore\n\n\n# -------------------- abstract values --------------------\n\n\nclass AbstractValue:\n __slots__: List[str] = []\n _num_buffers: int = 1 # number of buffers used to represent the value.\n\n def at_least_vspace(self):\n return self\n\n def __repr__(self):\n try:\n kv_pairs = ('{}={}'.format(k, v) for k, v in self.__dict__.items())\n return '{}({})'.format(self.__class__.__name__, ','.join(kv_pairs))\n except AttributeError:\n return self.__class__.__name__\n\n def strip_weak_type(self) -> 'AbstractValue':\n return self\n\n def join(self, other):\n raise NotImplementedError(\"must override\")\n\nclass Bot(AbstractValue): pass\n\nbot = Bot()\n\nclass AbstractUnit(AbstractValue):\n # TODO(jakevdp): make it possible to set zero buffers\n # _num_buffers = 0\n def join(self, other):\n if not skip_checks:\n assert other is abstract_unit, other\n return self\n def _eq(self, self_traced, other): return get_aval(other) is self\n def str_short(self): return '*'\n\nabstract_unit = AbstractUnit()\n\ndef lattice_join(x: Optional[AbstractValue],\n y: Optional[AbstractValue]) -> AbstractValue:\n if x is None:\n return cast(AbstractValue, y)\n elif y is None:\n return cast(AbstractValue, x)\n elif isinstance(x, type(y)):\n return y.join(x)\n elif isinstance(y, type(x)):\n return x.join(y)\n else:\n raise TypeError((x, y))\n\n# For use in typing annotations to denote either a Tracer or a `valid_jaxtype`.\nValue = Any\n\ndef valid_jaxtype(x):\n try:\n concrete_aval(x)\n except TypeError:\n return False\n else:\n return True\n\ndef check_valid_jaxtype(x):\n if not valid_jaxtype(x):\n raise TypeError(f\"{x} of type {type(x)} is not a valid JAX type\")\n\n\ndef concrete_aval(x):\n for typ in type(x).mro():\n handler = pytype_aval_mappings.get(typ)\n if handler: return handler(x)\n raise TypeError(f\"{type(x)} is not a valid JAX type\")\n\n\ndef get_aval(x):\n if isinstance(x, Tracer):\n return x.aval\n else:\n return concrete_aval(x)\n\n\npytype_aval_mappings: Dict[type, Callable[[Any], AbstractValue]] = {}\n\n\nclass Unit:\n def __repr__(self): return '*'\nunit = Unit()\nliteralable_types.add(Unit)\n\nclass UnitVar(Var):\n count = -1\n suffix = ''\n def __init__(self): pass\n @property\n def aval(self): return abstract_unit\n def __repr__(self): return '*'\nunitvar = UnitVar()\n\npytype_aval_mappings[Unit] = lambda _: abstract_unit\n\nclass ConcretizationTypeError(TypeError): pass\n\ndef raise_concretization_error(val: Tracer, context=\"\"):\n msg = (\"Abstract tracer value encountered where concrete value is expected.\\n\\n\"\n + context + \"\\n\\n\"\n + val._origin_msg() + \"\\n\\n\"\n \"See https://jax.readthedocs.io/en/latest/faq.html#abstract-tracer-value-encountered-where-concrete-value-is-expected-error for more information.\\n\\n\"\n f\"Encountered tracer value: {val}\")\n raise ConcretizationTypeError(msg)\n\n\ndef concretization_function_error(fun, suggest_astype=False):\n fname = getattr(fun, \"__name__\", fun)\n fname_context = f\"The problem arose with the `{fname}` function. \"\n if suggest_astype:\n fname_context += (\"If trying to convert the data type of a value, \"\n f\"try using `x.astype({fun.__name__})` \"\n f\"or `jnp.array(x, {fun.__name__})` instead.\")\n def error(self, arg):\n raise_concretization_error(arg, fname_context)\n return error\n\n\ndef concrete_or_error(force: Any, val: Any, context=\"\"):\n \"\"\"Like force(val), but gives the context in the error message.\"\"\"\n if force is None:\n force = lambda x: x\n if isinstance(val, Tracer):\n if isinstance(val.aval, ConcreteArray):\n return force(val.aval.val)\n else:\n raise_concretization_error(val, context)\n else:\n return force(val)\n\nclass UnshapedArray(AbstractValue):\n __slots__ = ['dtype', 'weak_type']\n array_abstraction_level = 2\n\n def __init__(self, dtype, weak_type=False):\n self.dtype = np.dtype(dtypes.canonicalize_dtype(dtype))\n self.weak_type = weak_type\n\n def __eq__(self, other):\n return (type(self) is type(other) and self.dtype == other.dtype and\n self.weak_type == other.weak_type)\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n # can use hash(self.dtype) and rely on the fact that numpy reuses base dtype\n # objects, e.g. `np.zeros(3).dtype is np.zeros(4).dtype`, or we can use\n # the unique character code via hash(self.dtype.char)\n return hash((self.dtype, self.weak_type))\n\n def __repr__(self):\n return '{}({}{})'.format(self.__class__.__name__, self.str_short(),\n \", weak_type=True\" if self.weak_type else \"\")\n\n _bool = _nonzero = concretization_function_error(bool)\n _float = concretization_function_error(float, True)\n _int = concretization_function_error(int, True)\n _complex = concretization_function_error(complex, True)\n _hex = concretization_function_error(hex)\n _oct = concretization_function_error(oct)\n\n def at_least_vspace(self) -> AbstractValue:\n return UnshapedArray(primal_dtype_to_tangent_dtype(self.dtype),\n self.weak_type)\n\n def join(self, other):\n if self.dtype == other.dtype:\n if self.weak_type == other.weak_type:\n return self\n else:\n return UnshapedArray(self.dtype, weak_type=False)\n else:\n raise TypeError(self, other)\n\n def str_short(self) -> str:\n return self.dtype.name\n\n def strip_weak_type(self) -> 'UnshapedArray':\n \"\"\"Returns a copy of the aval with weak_type=False.\"\"\"\n return UnshapedArray(self.dtype) if self.weak_type else self\n\n @property\n def shape(self):\n msg = (\"UnshapedArray has no shape. Please open an issue at \"\n \"https://github.com/google/jax/issues because it's unexpected for \"\n \"UnshapedArray instances to ever be produced.\")\n raise TypeError(msg)\n\nclass ShapedArray(UnshapedArray):\n __slots__ = ['shape']\n array_abstraction_level = 1\n\n def __init__(self, shape, dtype, weak_type=False):\n super(ShapedArray, self).__init__(dtype, weak_type=weak_type)\n self.shape = canonicalize_shape(shape)\n\n ndim = property(lambda self: len(self.shape))\n size = property(lambda self: prod(self.shape))\n\n broadcast: ClassVar[Optional[aval_method]] = None\n transpose: ClassVar[Optional[aval_method]] = None\n reshape: ClassVar[Optional[aval_method]] = None\n _iter: ClassVar[Optional[staticmethod]] = None\n\n def __eq__(self, other):\n return (type(self) is type(other)\n and self.dtype == other.dtype and self.shape == other.shape\n and self.weak_type == other.weak_type)\n\n def __hash__(self):\n # can use hash(self.dtype) and rely on the fact that numpy reuses base dtype\n # objects, e.g. `np.zeros(3).dtype is np.zeros(4).dtype`, or we can use\n # the unique character code via hash(self.dtype.char)\n return hash((self.shape, self.dtype, self.weak_type))\n\n def at_least_vspace(self):\n return ShapedArray(self.shape, primal_dtype_to_tangent_dtype(self.dtype),\n self.weak_type)\n\n def join(self, other):\n if self.shape == other.shape and self.dtype == other.dtype:\n if self.weak_type == other.weak_type:\n return self\n else:\n return ShapedArray(self.shape, self.dtype, weak_type=False)\n elif self.dtype == other.dtype:\n return UnshapedArray(self.dtype)\n else:\n raise TypeError(self, other)\n\n def str_short(self):\n shapestr = ','.join(map(str, self.shape))\n return '{}[{}]'.format(self.dtype.name, shapestr)\n\n def __len__(self):\n try:\n return self.shape[0]\n except IndexError as err:\n raise TypeError(\"len() of unsized object\") from err # same as numpy error\n\n def _len(self, ignored_tracer):\n return len(self)\n\n def strip_weak_type(self):\n return ShapedArray(self.shape, self.dtype) if self.weak_type else self\n\n\ndef _forward_to_value(self, fun, ignored_tracer, *args):\n return fun(self.val, *args)\n\nclass ConcreteArray(ShapedArray):\n __slots__ = ['val']\n array_abstraction_level = 0\n\n def __init__(self, val, weak_type=False):\n super(ConcreteArray, self).__init__(np.shape(val), np.result_type(val),\n weak_type=weak_type)\n # Note: canonicalized self.dtype doesn't necessarily match self.val\n self.val = val\n assert self.dtype != np.dtype('O'), val\n\n def __eq__(self, other):\n if (type(self) is type(other) and self.dtype == other.dtype\n and self.shape == other.shape and self.weak_type == other.weak_type):\n with eval_context(): # in case self.val is a DeviceArray\n return (self.val == other.val).all()\n else:\n return False\n\n def __hash__(self):\n return id(self.val)\n\n def at_least_vspace(self):\n return ShapedArray(self.shape, primal_dtype_to_tangent_dtype(self.dtype),\n weak_type=self.weak_type)\n\n def join(self, other) -> UnshapedArray:\n if self == other:\n return self\n elif self.shape == other.shape and self.dtype == other.dtype:\n return ShapedArray(self.shape, self.dtype,\n weak_type=self.weak_type and other.weak_type)\n elif self.dtype == other.dtype:\n return UnshapedArray(self.dtype,\n weak_type=self.weak_type and other.weak_type)\n else:\n raise TypeError(self, other)\n\n def str_short(self) -> str:\n return str(self.val)\n\n def strip_weak_type(self) -> 'ConcreteArray':\n return ConcreteArray(self.val) if self.weak_type else self\n\n _bool = _nonzero = partialmethod(_forward_to_value, bool)\n _int = partialmethod(_forward_to_value, int)\n _hex = partialmethod(_forward_to_value, hex)\n _oct = partialmethod(_forward_to_value, oct)\n\n _float = concretization_function_error(float, True)\n _complex = concretization_function_error(complex, True)\n\ndef primal_dtype_to_tangent_dtype(primal_dtype):\n if not dtypes.issubdtype(primal_dtype, np.inexact):\n return dtypes.float0\n else:\n return primal_dtype\n\nclass AbstractToken(AbstractValue):\n def join(self, other):\n if isinstance(other, AbstractToken):\n return self\n else:\n assert False, f\"Cannot join {self} with {other}\"\n def str_short(self): return 'Tok'\n\nabstract_token = AbstractToken()\n\n\ndef raise_to_shaped(aval: AbstractValue, weak_type=None):\n if weak_type is None:\n weak_type = getattr(aval, 'weak_type', False)\n for typ in type(aval).mro():\n handler = raise_to_shaped_mappings.get(typ)\n if handler: return handler(aval, weak_type)\n raise TypeError(type(aval))\n\nraise_to_shaped_mappings : Dict[type, Callable] = {\n AbstractUnit: lambda aval, _: aval,\n AbstractToken: lambda aval, _: aval,\n ShapedArray: lambda aval, weak_type: ShapedArray(aval.shape, aval.dtype, weak_type=weak_type)\n}\n\n# Registry for valid dimension types. This is used by masking.Poly.\n_DIMENSION_TYPES: Set[type] = {int}\n\ndef _canonicalize_dimension(dim):\n if type(dim) in _DIMENSION_TYPES:\n return dim\n else:\n return operator.index(dim)\n\ndef canonicalize_shape(shape):\n \"\"\"Canonicalizes and checks for errors in a user-provided shape value.\n\n Args:\n shape: a Python value that represents a shape.\n\n Returns:\n A tuple of integers.\n \"\"\"\n try:\n return tuple(map(_canonicalize_dimension, shape))\n except TypeError:\n pass\n msg = (\"Shapes must be 1D sequences of concrete values of integer type, \"\n \"got {}.\")\n if any(isinstance(x, Tracer) and isinstance(get_aval(x), ShapedArray)\n and not isinstance(get_aval(x), ConcreteArray) for x in shape):\n msg += (\"\\nIf using `jit`, try using `static_argnums` or applying `jit` to \"\n \"smaller subfunctions.\")\n raise TypeError(msg.format(shape))\n\n\n# ------------------- Call -------------------\n\ndef apply_todos(todos, outs):\n todos_list = list(todos)\n while todos_list:\n outs = map(full_lower, todos_list.pop()(outs))\n return outs\n\[email protected]_with_aux\ndef process_env_traces(primitive: Union['CallPrimitive', 'MapPrimitive'],\n level: int, params_tuple: tuple, *args):\n outs = yield args, {}\n params = dict(params_tuple)\n todo = []\n while True:\n tracers = [x for x in outs if isinstance(x, Tracer)\n and (level is None or x._trace.level > level)]\n if tracers:\n ans = max(tracers, key=lambda x: x._trace.level)\n else:\n break\n trace = type(ans._trace)(ans._trace.main, cur_sublevel())\n outs = map(trace.full_raise, outs)\n outs, cur_todo = primitive.post_process(trace, outs, params)\n todo.append(cur_todo)\n yield outs, tuple(todo) # Ensure the aux output is immutable\n\ndef call_bind(primitive: Union['CallPrimitive', 'MapPrimitive'],\n fun, *args, **params):\n params_tuple = tuple(params.items())\n top_trace = find_top_trace(args)\n fun, env_trace_todo = process_env_traces(\n fun, primitive, top_trace and top_trace.level, params_tuple)\n tracers = map(top_trace.full_raise, args)\n with maybe_new_sublevel(top_trace):\n outs = primitive.process(top_trace, fun, tracers, params)\n return map(full_lower, apply_todos(env_trace_todo(), outs))\n\n\nclass CallPrimitive(Primitive):\n multiple_results = True\n call_primitive = True\n\n def bind(self, fun, *args, **params):\n return call_bind(self, fun, *args, **params)\n\n def process(self, trace, fun, tracers, params):\n return trace.process_call(self, fun, tracers, params)\n\n def post_process(self, trace, out_tracers, params):\n return trace.post_process_call(self, out_tracers, params)\n\ndef call_impl(f: lu.WrappedFun, *args, **params):\n del params # params parameterize the call primitive, not the function\n return f.call_wrapped(*args)\n\ncall_p = CallPrimitive('call')\ncall = call_p.bind\ncall_p.def_impl(call_impl)\n\n\n# ------------------- Map -------------------\n\nclass MapPrimitive(Primitive):\n multiple_results = True\n map_primitive = True\n\n def bind(self, fun, *args, **params):\n assert len(params['mapped_invars']) == len(args)\n return call_bind(self, fun, *args, **params)\n\n def process(self, trace, fun, tracers, params):\n return trace.process_map(self, fun, tracers, params)\n\n def post_process(self, trace, out_tracers, params):\n return trace.post_process_map(self, out_tracers, params)\n\n@contextmanager\ndef extend_axis_env(axis_name, size: int, tag: Any):\n frame = AxisEnvFrame(axis_name, size, tag)\n thread_local_state.trace_state.axis_env.append(frame)\n try:\n yield\n finally:\n thread_local_state.trace_state.axis_env.pop()\n\n\n# When a mapped function is given no axis name, we generate a name object based\n# on the id of the function object. Collisions aren't important because this\n# name can't be used in collectives, as user code never gets a ref to this\n# object. We don't want to use the function object itself because that might\n# persist references to the function object.\n# TODO(mattjj): revisit this unique axis name strategy\nclass _TempAxisName:\n\n def __init__(self, obj):\n self.id = id(obj)\n\n def __repr__(self):\n return f'<axis {hex(self.id)}>'\n\n def __hash__(self):\n return hash(self.id)\n\n def __eq__(self, other):\n return type(other) is _TempAxisName and self.id == other.id\n\n\ndef axis_frame(axis_name):\n frames = thread_local_state.trace_state.axis_env\n for frame in reversed(frames):\n if frame.name == axis_name:\n return frame\n\n named_axis = [\n frame.name\n for frame in reversed(frames)\n if not isinstance(frame.name, _TempAxisName)\n ]\n raise NameError(\n f'unbound axis name: {axis_name}. The following axis names (e.g. defined '\n 'by pmap) are available to collectives operations:'\n f'{named_axis}')\n\n\n# ------------------- Jaxpr checking -------------------\n\ndef mapped_aval(size: int, aval: AbstractValue) -> AbstractValue:\n if aval is abstract_unit:\n return aval\n elif isinstance(aval, ShapedArray):\n # might be raising abstraction level from Concrete here\n assert aval.shape[0] == size\n return ShapedArray(aval.shape[1:], aval.dtype)\n else:\n raise TypeError(f\"Mapped operand {aval}\")\n\ndef unmapped_aval(size: int, aval: AbstractValue) -> AbstractValue:\n if aval is abstract_unit:\n return aval\n elif isinstance(aval, ShapedArray):\n return ShapedArray((size,) + aval.shape, aval.dtype)\n else:\n raise TypeError(f\"Mapped output {aval}\")\n\ndef typecheck(aval: AbstractValue, x) -> bool:\n return typecompat(aval, get_aval(x))\n\ndef typecompat(aval_ref: AbstractValue, aval: AbstractValue) -> bool:\n \"\"\"Determine whether `aval` conforms to `aval_ref`\"\"\"\n aval_ref = raise_to_shaped(aval_ref).strip_weak_type()\n try:\n return aval_ref == lattice_join(aval_ref, aval).strip_weak_type()\n except TypeError:\n return False\n\ndef typematch(aval1: UnshapedArray, aval2: UnshapedArray) -> bool:\n return raise_to_shaped(aval1, weak_type=False) == raise_to_shaped(aval2, weak_type=False)\n\nclass JaxprTypeError(TypeError): pass\n\ndef typecheck_assert(pred, msg):\n if not pred:\n raise JaxprTypeError(msg)\n\ncustom_typechecks: Dict[Primitive, Callable] = {}\n\ndef check_jaxpr(jaxpr: Jaxpr):\n \"\"\"Checks well-formedness of a jaxpr.\n\n Specifically, check that:\n - variables that are read are bound beforehand\n - variables are typed equally throughout a jaxpr\n - variable type annotations are compatible with their binding expression\n\n Raises `TypeError` if `jaxpr` is determined invalid. Returns `None` otherwise.\n \"\"\"\n try:\n _check_jaxpr(jaxpr, [v.aval for v in jaxpr.invars])\n except JaxprTypeError as e:\n if len(e.args) == 2:\n msg, eqn_idx = e.args\n jaxpr_str = str(pp_jaxpr_eqn_range(jaxpr, eqn_idx - 10, eqn_idx + 10))\n else:\n msg, = e.args\n jaxpr_str = str(pp_jaxpr_eqn_range(jaxpr, 0, 20))\n msg = \"\\n\\n\".join([msg, \"while checking jaxpr:\", jaxpr_str])\n raise JaxprTypeError(msg) from None\n\ndef _check_jaxpr(jaxpr: Jaxpr, in_avals: Sequence[AbstractValue]):\n\n def read(v: Atom) -> AbstractValue:\n if isinstance(v, Literal):\n return raise_to_shaped(get_aval(v.val))\n else:\n typecheck_assert(v in env, f\"Variable '{v}' not defined\")\n return env[v]\n\n def write(v: Var, a: AbstractValue) -> None:\n typecheck_assert(v not in env, f\"Variable '{v}' already bound\")\n if v is not dropvar:\n typecheck_assert(typecompat(v.aval, a),\n f\"Variable '{v}' inconsistently typed as {a}, \"\n f\"bound as {v.aval}\")\n env[v] = a\n\n env : Dict[Var, AbstractValue] = {}\n\n write(unitvar, abstract_unit)\n map(write, jaxpr.constvars, [v.aval for v in jaxpr.constvars])\n map(write, jaxpr.invars, in_avals)\n\n for eqn_idx, eqn in enumerate(jaxpr.eqns):\n prim = eqn.primitive\n try:\n in_avals = map(read, eqn.invars)\n typecheck_assert(all(not isinstance(ina, ConcreteArray) for ina in in_avals),\n \"Equation given ConcreteArray type inputs\")\n if prim in custom_typechecks:\n custom_typechecks[prim](*in_avals, **eqn.params)\n if prim.call_primitive:\n out_avals = check_call(prim, in_avals, eqn.params)\n elif prim.map_primitive:\n out_avals = check_map(prim, in_avals, eqn.params)\n else:\n out_avals = check_eqn(prim, in_avals, eqn.params)\n map(write, eqn.outvars, out_avals)\n except JaxprTypeError as e:\n msg, = e.args\n src = source_info_util.summarize(eqn.source_info)\n msg = \"\\n\\n\".join([msg, \"in equation:\", str(pp_eqn(eqn).indent(2)),\n f\"from source: {src}\"])\n raise JaxprTypeError(msg, eqn_idx) from None\n\n map(read, jaxpr.outvars)\n\ndef check_eqn(prim, in_avals, params):\n for jaxpr in jaxprs_in_params(params):\n check_jaxpr(jaxpr)\n\n out_avals = prim.abstract_eval(*in_avals, **params)\n if not prim.multiple_results:\n out_avals = [out_avals]\n return out_avals\n\ndef check_call(prim, in_avals, params):\n typecheck_assert(\"call_jaxpr\" in params,\n f\"Call primitive {prim} missing 'call_jaxpr' parameter\")\n call_jaxpr = params[\"call_jaxpr\"]\n\n # These checks also happen in recursive call, but give better errors here.\n typecheck_assert(len(in_avals) == len(call_jaxpr.invars),\n f\"Call primitive {prim} with {len(call_jaxpr.invars)} \"\n f\"operands cannot call jaxpr with {len(call_jaxpr.invars)} \"\n f\"inputs\")\n binder_avals = [v.aval for v in call_jaxpr.invars]\n for binder_aval, in_aval in zip(binder_avals, in_avals):\n typecheck_assert(typecompat(binder_aval, in_aval),\n f\"Call primitive {prim} passes operand {in_aval} \"\n f\"to jaxpr expecting {binder_aval}\")\n\n _check_jaxpr(call_jaxpr, in_avals)\n\n out_avals = [v.aval for v in call_jaxpr.outvars]\n return out_avals\n\ndef check_map(prim, in_avals, params):\n typecheck_assert(\"call_jaxpr\" in params,\n f\"Map primitive {prim} missing 'call_jaxpr' parameter\")\n call_jaxpr = params[\"call_jaxpr\"]\n typecheck_assert(\"axis_size\" in params,\n f\"Map primitive {prim} missing 'axis_size' parameter\")\n axis_size = params[\"axis_size\"]\n typecheck_assert(\"mapped_invars\" in params,\n f\"Map primitive {prim} missing 'mapped_invars' parameter\")\n mapped_invars = params[\"mapped_invars\"]\n\n binder_avals = [unmapped_aval(axis_size, v.aval) if mapped else v.aval\n for v, mapped in zip(call_jaxpr.invars, mapped_invars)]\n for binder_aval, in_aval in zip(binder_avals, in_avals):\n typecheck_assert(typecompat(binder_aval, in_aval),\n f\"Call primitive {prim} passes operand {in_aval} \"\n f\"to jaxpr expecting {binder_aval}\")\n\n mapped_avals = [mapped_aval(axis_size, aval) if mapped else aval\n for aval, mapped in zip(in_avals, mapped_invars)]\n _check_jaxpr(call_jaxpr, mapped_avals)\n\n mapped_out_avals = [v.aval for v in call_jaxpr.outvars]\n out_avals = [unmapped_aval(axis_size, aval) for aval in mapped_out_avals]\n return out_avals\n\n\n# ------------------- Jaxpr printed representation -------------------\n\ndef pp_vars(vs: Sequence[Any], print_shapes: bool = False) -> str:\n if print_shapes:\n return ' '.join(f'{v}:{v.aval.str_short()}' for v in vs)\n else:\n return ' '.join(map(str, vs))\n\ndef pp_eqn_compact(primitive_name: str, params: Dict) -> PrettyPrint:\n filtered_params = {k: v for k, v in params.items()\n if (k != 'branches' and\n not isinstance(v, (Jaxpr, ClosedJaxpr)))}\n return pp(primitive_name) >> pp_kv_pairs(sorted(filtered_params.items()))\n\ndef pp_eqn(eqn: JaxprEqn, print_shapes: bool = False) -> PrettyPrint:\n lhs = pp_vars(eqn.outvars, print_shapes)\n pp_lhs = pp(f'{lhs} =')\n pp_rhs = (pp(eqn.primitive.name) >>\n pp_kv_pairs(sorted(eqn.params.items())) >> pp(' ') >>\n pp(pp_vars(eqn.invars, print_shapes)))\n if len(lhs) <= 6 or print_shapes:\n return pp_lhs >> pp(' ') >> pp_rhs\n else:\n return pp_lhs + pp_rhs.indent(2)\n\ndef pp_eqns(eqns: Sequence[JaxprEqn],\n source_info: bool = False) -> Sequence[PrettyPrint]:\n pps = map(pp_eqn, eqns)\n if source_info:\n l = max((i + len(s) for x in pps for i, s in x.lines), default=None)\n if l is not None:\n return [p.annotate(l, source_info_util.summarize(e.source_info))\n for e, p in zip(eqns, pps)]\n return pps\n\ndef pp_jaxpr(jaxpr: Jaxpr, source_info: bool = False) -> PrettyPrint:\n pps = pp_eqns(jaxpr.eqns, source_info=source_info)\n str_outvars = str(tuple(jaxpr.outvars))\n return (pp('{{ lambda {} ; {}.'.format(pp_vars(jaxpr.constvars),\n pp_vars(jaxpr.invars))) +\n ((pp('let ') >> vcat(pps))\n + pp('in {} }}'.format(str_outvars))).indent(2))\n\ndef pp_jaxpr_eqn_range(jaxpr: Jaxpr, lo: int, hi: int,\n source_info: bool = False) -> PrettyPrint:\n lo = max(lo, 0)\n hi = max(lo, min(hi, len(jaxpr.eqns)))\n eqns = jaxpr.eqns[lo:hi]\n pps = []\n if len(eqns) == 0 and len(jaxpr.eqns) != 0:\n pps.append(pp('...'))\n else:\n if lo != 0:\n pps.append(pp('...'))\n pps.extend(pp_eqns(eqns, source_info=source_info))\n if hi != len(jaxpr.eqns):\n pps.append(pp('...'))\n str_outvars = str(tuple(jaxpr.outvars))\n return (pp('{{ lambda {} ; {}.'.format(pp_vars(jaxpr.constvars),\n pp_vars(jaxpr.invars))) +\n ((pp('let ') >> vcat(pps))\n + pp('in {} }}'.format(str_outvars))).indent(2))\n\ndef pp_jaxprs(jaxprs) -> PrettyPrint:\n jaxprs = [j.jaxpr if isinstance(j, ClosedJaxpr) else j for j in jaxprs]\n return pp('( ') >> vcat(map(pp_jaxpr, jaxprs)) >> pp(' )')\n\ndef pp_kv_pair(k, v):\n if type(v) is tuple and all(isinstance(j, (Jaxpr, ClosedJaxpr)) for j in v):\n pp_v = pp_jaxprs(v)\n else:\n pp_v = pp(v)\n return pp(f'{k}=') >> pp_v\n\ndef pp_kv_pairs(kv_pairs):\n if kv_pairs:\n return pp('[ ') >> vcat([pp_kv_pair(k, v) for k, v in kv_pairs]) >> pp(' ]')\n else:\n return pp('')\n\[email protected]_omnistaging_disabler\ndef omnistaging_disabler() -> None:\n global thread_local_state, call_bind, find_top_trace, initial_style_staging, \\\n new_main, reset_trace_state, TraceStack, TraceState, extend_axis_env, \\\n eval_context\n\n class TraceStack:\n upward: List[MainTrace]\n downward: List[MainTrace]\n\n def __init__(self):\n self.upward = []\n self.downward = []\n\n def next_level(self, bottom: bool) -> int:\n if bottom:\n return - (len(self.downward) + 1)\n else:\n return len(self.upward)\n\n def push(self, main_trace: MainTrace, bottom: bool) -> None:\n if bottom:\n self.downward.append(main_trace)\n else:\n self.upward.append(main_trace)\n\n def pop(self, bottom: bool) -> None:\n if bottom:\n self.downward.pop()\n else:\n self.upward.pop()\n\n def __repr__(self) -> str:\n return 'Trace stack\\n{} ---\\n{}'.format(\n map(' {}\\n'.format, self.upward[::-1]),\n map(' {}\\n'.format, self.downward))\n\n def copy(self):\n new = TraceStack()\n new.upward = self.upward[:]\n new.downward = self.downward[:]\n return new\n\n class TraceState:\n trace_stack: TraceStack\n substack: List[Sublevel]\n initial_style: bool\n\n def __init__(self) -> None:\n self.trace_stack = TraceStack() # type: ignore\n self.substack = [Sublevel(0)]\n self.initial_style = False\n\n def copy(self):\n new = TraceState()\n new.trace_stack = self.trace_stack.copy()\n new.substack = self.substack[:]\n new.initial_style = self.initial_style\n return new\n\n thread_local_state = ThreadLocalState()\n\n def reset_trace_state() -> bool:\n \"Reset the global trace state and return True if it was already clean.\"\n if (thread_local_state.trace_state.substack != [Sublevel(0)] or\n thread_local_state.trace_state.trace_stack.downward or\n thread_local_state.trace_state.trace_stack.upward):\n thread_local_state.trace_state.__init__() # type: ignore\n return False\n else:\n return True\n\n @contextmanager\n def new_main(trace_type: Type[Trace], bottom=False) -> Generator[MainTrace, None, None]:\n level = thread_local_state.trace_state.trace_stack.next_level(bottom)\n main = MainTrace(level, trace_type)\n thread_local_state.trace_state.trace_stack.push(main, bottom)\n\n try:\n yield main\n finally:\n thread_local_state.trace_state.trace_stack.pop(bottom)\n\n if check_leaks:\n t = ref(main)\n del main\n if t() is not None:\n print(thread_local_state.trace_state.trace_stack)\n raise Exception('Leaked trace {}'.format(t()))\n\n def find_top_trace(xs) -> Optional[Trace]:\n top_trace = max((x._trace for x in xs if isinstance(x, Tracer)),\n key=attrgetter('level'), default=None)\n return top_trace and type(top_trace)(top_trace.main, cur_sublevel())\n\n @contextmanager\n def eval_context():\n yield # dummy implementation for forward compatibility\n\n def bind(self, *args, **kwargs):\n assert skip_checks or all(isinstance(arg, Tracer)\n or valid_jaxtype(arg) for arg in args), args\n top_trace = find_top_trace(args)\n if top_trace is None:\n return self.impl(*args, **kwargs)\n\n tracers = map(top_trace.full_raise, args)\n out_tracer = top_trace.process_primitive(self, tracers, kwargs)\n if self.multiple_results:\n return map(full_lower, out_tracer)\n else:\n return full_lower(out_tracer)\n Primitive.bind = bind # type: ignore\n\n def call_bind(primitive: Union['CallPrimitive', 'MapPrimitive'],\n fun: lu.WrappedFun, *args, **params):\n params_tuple = tuple(params.items())\n top_trace = find_top_trace(args)\n level = (thread_local_state.trace_state.trace_stack.next_level(True)\n if top_trace is None else top_trace.level)\n params_tuple = tuple(params.items())\n fun, env_trace_todo = process_env_traces(fun, primitive, level, params_tuple)\n if top_trace is None:\n with new_sublevel():\n outs = primitive.impl(fun, *args, **params)\n else:\n tracers = map(top_trace.full_raise, args)\n outs = primitive.process(top_trace, fun, tracers, params)\n return apply_todos(env_trace_todo(), map(full_lower, outs))\n\n @contextmanager\n def extend_axis_env(axis_name, size: int, tag: Any):\n yield\n\n @contextmanager\n def initial_style_staging():\n trace_state = thread_local_state.trace_state\n prev, trace_state.initial_style = trace_state.initial_style, True\n try:\n yield\n finally:\n trace_state.initial_style = prev\n\n# Casting float0 array to a float-valued zero array.\ndef zeros_like_float0(array, dtype=None):\n if not dtype:\n dtype = np.float\n return np.zeros(array.shape, dtype)\n" ]
[ [ "numpy.result_type", "numpy.dtype", "numpy.shape", "numpy.zeros" ] ]
douglatornell/xarray
[ "6d93a95d05bdbfc33fff24064f67d29dd891ab58" ]
[ "xarray/core/pdcompat.py" ]
[ "# The remove_unused_levels defined here was copied based on the source code\n# defined in pandas.core.indexes.muli.py\n\n# For reference, here is a copy of the pandas copyright notice:\n\n# (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team\n# All rights reserved.\n\n# Copyright (c) 2008-2011 AQR Capital Management, LLC\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n\n# * Neither the name of the copyright holder nor the names of any\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nimport numpy as np\n\n\n# for pandas 0.19\ndef remove_unused_levels(self):\n \"\"\"\n create a new MultiIndex from the current that removing\n unused levels, meaning that they are not expressed in the labels\n The resulting MultiIndex will have the same outward\n appearance, meaning the same .values and ordering. It will also\n be .equals() to the original.\n .. versionadded:: 0.20.0\n Returns\n -------\n MultiIndex\n Examples\n --------\n >>> i = pd.MultiIndex.from_product([range(2), list('ab')])\n MultiIndex(levels=[[0, 1], ['a', 'b']],\n labels=[[0, 0, 1, 1], [0, 1, 0, 1]])\n >>> i[2:]\n MultiIndex(levels=[[0, 1], ['a', 'b']],\n labels=[[1, 1], [0, 1]])\n The 0 from the first level is not represented\n and can be removed\n >>> i[2:].remove_unused_levels()\n MultiIndex(levels=[[1], ['a', 'b']],\n labels=[[0, 0], [0, 1]])\n \"\"\"\n import pandas.core.algorithms as algos\n\n new_levels = []\n new_labels = []\n\n changed = False\n for lev, lab in zip(self.levels, self.labels):\n\n # Since few levels are typically unused, bincount() is more\n # efficient than unique() - however it only accepts positive values\n # (and drops order):\n uniques = np.where(np.bincount(lab + 1) > 0)[0] - 1\n has_na = int(len(uniques) and (uniques[0] == -1))\n\n if len(uniques) != len(lev) + has_na:\n # We have unused levels\n changed = True\n\n # Recalculate uniques, now preserving order.\n # Can easily be cythonized by exploiting the already existing\n # \"uniques\" and stop parsing \"lab\" when all items are found:\n uniques = algos.unique(lab)\n if has_na:\n na_idx = np.where(uniques == -1)[0]\n # Just ensure that -1 is in first position:\n uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]\n\n # labels get mapped from uniques to 0:len(uniques)\n # -1 (if present) is mapped to last position\n label_mapping = np.zeros(len(lev) + has_na)\n # ... and reassigned value -1:\n label_mapping[uniques] = np.arange(len(uniques)) - has_na\n\n lab = label_mapping[lab]\n\n # new levels are simple\n lev = lev.take(uniques[has_na:])\n\n new_levels.append(lev)\n new_labels.append(lab)\n\n result = self._shallow_copy()\n\n if changed:\n result._reset_identity()\n result._set_levels(new_levels, validate=False)\n result._set_labels(new_labels, validate=False)\n\n return result\n" ]
[ [ "numpy.where", "numpy.bincount", "pandas.core.algorithms.unique" ] ]
kiukchung/Ax
[ "0f50d94056782d304e573c3c1dde567beb44b65a" ]
[ "ax/core/tests/test_experiment.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nfrom typing import Type\nfrom unittest.mock import patch\n\nimport pandas as pd\nfrom ax.core.arm import Arm\nfrom ax.core.base_trial import TrialStatus\nfrom ax.core.data import Data\nfrom ax.core.experiment import Experiment\nfrom ax.core.map_data import MapData\nfrom ax.core.map_metric import MapMetric\nfrom ax.core.metric import Metric\nfrom ax.core.parameter import FixedParameter, ParameterType\nfrom ax.core.search_space import SearchSpace\nfrom ax.exceptions.core import UnsupportedError\nfrom ax.metrics.branin import BraninMetric\nfrom ax.runners.synthetic import SyntheticRunner\nfrom ax.utils.common.constants import Keys, EXPERIMENT_IS_TEST_WARNING\nfrom ax.utils.common.testutils import TestCase\nfrom ax.utils.testing.core_stubs import (\n get_arm,\n get_branin_arms,\n get_branin_optimization_config,\n get_branin_search_space,\n get_branin_experiment,\n get_branin_experiment_with_timestamp_map_metric,\n get_data,\n get_experiment,\n get_experiment_with_map_data_type,\n get_optimization_config,\n get_search_space,\n get_sobol,\n get_status_quo,\n get_scalarized_outcome_constraint,\n)\n\nDUMMY_RUN_METADATA = {\"test_run_metadata_key\": \"test_run_metadata_value\"}\n\n\nclass ExperimentTest(TestCase):\n def setUp(self):\n self.experiment = get_experiment()\n\n def _setupBraninExperiment(self, n: int) -> Experiment:\n exp = Experiment(\n name=\"test3\",\n search_space=get_branin_search_space(),\n tracking_metrics=[BraninMetric(name=\"b\", param_names=[\"x1\", \"x2\"])],\n runner=SyntheticRunner(),\n )\n batch = exp.new_batch_trial()\n batch.add_arms_and_weights(arms=get_branin_arms(n=n, seed=0))\n batch.run()\n\n batch_2 = exp.new_batch_trial()\n batch_2.add_arms_and_weights(arms=get_branin_arms(n=3 * n, seed=1))\n batch_2.run()\n return exp\n\n def testExperimentInit(self):\n self.assertEqual(self.experiment.name, \"test\")\n self.assertEqual(self.experiment.description, \"test description\")\n self.assertEqual(self.experiment.name, \"test\")\n self.assertIsNotNone(self.experiment.time_created)\n self.assertEqual(self.experiment.experiment_type, None)\n self.assertEqual(self.experiment.num_abandoned_arms, 0)\n\n def testExperimentName(self):\n self.assertTrue(self.experiment.has_name)\n self.experiment.name = None\n self.assertFalse(self.experiment.has_name)\n with self.assertRaises(ValueError):\n self.experiment.name\n self.experiment.name = \"test\"\n\n def testExperimentType(self):\n self.experiment.experiment_type = \"test\"\n self.assertEqual(self.experiment.experiment_type, \"test\")\n\n def testEq(self):\n self.assertEqual(self.experiment, self.experiment)\n\n experiment2 = Experiment(\n name=\"test2\",\n search_space=get_search_space(),\n optimization_config=get_optimization_config(),\n status_quo=get_arm(),\n description=\"test description\",\n )\n self.assertNotEqual(self.experiment, experiment2)\n\n def testDBId(self):\n self.assertIsNone(self.experiment.db_id)\n some_id = 123456789\n self.experiment.db_id = some_id\n self.assertEqual(self.experiment.db_id, some_id)\n\n def testTrackingMetricsMerge(self):\n # Tracking and optimization metrics should get merged\n # m1 is on optimization_config while m3 is not\n exp = Experiment(\n name=\"test2\",\n search_space=get_search_space(),\n optimization_config=get_optimization_config(),\n tracking_metrics=[Metric(name=\"m1\"), Metric(name=\"m3\")],\n )\n self.assertEqual(len(exp.optimization_config.metrics) + 1, len(exp.metrics))\n\n def testBasicBatchCreation(self):\n batch = self.experiment.new_batch_trial()\n self.assertEqual(len(self.experiment.trials), 1)\n self.assertEqual(self.experiment.trials[0], batch)\n\n # Try (and fail) to re-attach batch\n with self.assertRaises(ValueError):\n self.experiment._attach_trial(batch)\n\n # Try (and fail) to attach batch to another experiment\n with self.assertRaises(ValueError):\n new_exp = get_experiment()\n new_exp._attach_trial(batch)\n\n def testRepr(self):\n self.assertEqual(\"Experiment(test)\", str(self.experiment))\n\n def testBasicProperties(self):\n self.assertEqual(self.experiment.status_quo, get_status_quo())\n self.assertEqual(self.experiment.search_space, get_search_space())\n self.assertEqual(self.experiment.optimization_config, get_optimization_config())\n self.assertEqual(self.experiment.is_test, True)\n\n def testMetricSetters(self):\n # Establish current metrics size\n self.assertEqual(\n len(get_optimization_config().metrics) + 1, len(self.experiment.metrics)\n )\n\n # Add optimization config with 1 different metric\n opt_config = get_optimization_config()\n opt_config.outcome_constraints[0].metric = Metric(name=\"m3\")\n self.experiment.optimization_config = opt_config\n\n # Verify total metrics size is the same.\n self.assertEqual(\n len(get_optimization_config().metrics) + 1, len(self.experiment.metrics)\n )\n\n # Add optimization config with 1 scalarized constraint composed of 2 metrics\n opt_config = get_optimization_config()\n opt_config.outcome_constraints = opt_config.outcome_constraints + [\n get_scalarized_outcome_constraint()\n ]\n self.experiment.optimization_config = opt_config\n\n # Verify total metrics size is the same.\n self.assertEqual(len(opt_config.metrics) + 1, len(self.experiment.metrics))\n self.assertEqual(\n len(get_optimization_config().metrics) + 3, len(self.experiment.metrics)\n )\n # set back\n self.experiment.optimization_config = get_optimization_config()\n\n # Test adding new tracking metric\n self.experiment.add_tracking_metric(Metric(name=\"m4\"))\n self.assertEqual(\n len(get_optimization_config().metrics) + 2, len(self.experiment.metrics)\n )\n\n # Test adding new tracking metrics\n self.experiment.add_tracking_metrics([Metric(name=\"z1\")])\n self.assertEqual(\n len(get_optimization_config().metrics) + 3, len(self.experiment.metrics)\n )\n\n # Verify update_tracking_metric updates the metric definition\n self.assertIsNone(self.experiment.metrics[\"m4\"].lower_is_better)\n self.experiment.update_tracking_metric(Metric(name=\"m4\", lower_is_better=True))\n self.assertTrue(self.experiment.metrics[\"m4\"].lower_is_better)\n\n # Verify unable to add existing metric\n with self.assertRaises(ValueError):\n self.experiment.add_tracking_metric(Metric(name=\"m4\"))\n\n # Verify unable to add existing metric\n with self.assertRaises(ValueError):\n self.experiment.add_tracking_metrics([Metric(name=\"z1\"), Metric(name=\"m4\")])\n\n # Verify unable to add metric in optimization config\n with self.assertRaises(ValueError):\n self.experiment.add_tracking_metric(Metric(name=\"m1\"))\n\n # Verify unable to add metric in optimization config\n with self.assertRaises(ValueError):\n self.experiment.add_tracking_metrics([Metric(name=\"z2\"), Metric(name=\"m1\")])\n\n # Cannot update metric not already on experiment\n with self.assertRaises(ValueError):\n self.experiment.update_tracking_metric(Metric(name=\"m5\"))\n\n # Cannot remove metric not already on experiment\n with self.assertRaises(ValueError):\n self.experiment.remove_tracking_metric(metric_name=\"m5\")\n\n def testSearchSpaceSetter(self):\n one_param_ss = SearchSpace(parameters=[get_search_space().parameters[\"w\"]])\n\n # Verify all search space ok with no trials\n self.experiment.search_space = one_param_ss\n self.assertEqual(len(self.experiment.parameters), 1)\n\n # Reset search space and add batch to trigger validations\n self.experiment.search_space = get_search_space()\n self.experiment.new_batch_trial()\n\n # Try search space with too few parameters\n with self.assertRaises(ValueError):\n self.experiment.search_space = one_param_ss\n\n # Try search space with different type\n bad_type_ss = get_search_space()\n bad_type_ss.parameters[\"x\"]._parameter_type = ParameterType.FLOAT\n with self.assertRaises(ValueError):\n self.experiment.search_space = bad_type_ss\n\n # Try search space with additional parameters\n extra_param_ss = get_search_space()\n extra_param_ss.add_parameter(FixedParameter(\"l\", ParameterType.FLOAT, 0.5))\n with self.assertRaises(ValueError):\n self.experiment.search_space = extra_param_ss\n\n def testStatusQuoSetter(self):\n sq_parameters = self.experiment.status_quo.parameters\n self.experiment.status_quo = None\n self.assertIsNone(self.experiment.status_quo)\n\n # Verify normal update\n sq_parameters[\"w\"] = 3.5\n self.experiment.status_quo = Arm(sq_parameters)\n self.assertEqual(self.experiment.status_quo.parameters[\"w\"], 3.5)\n self.assertEqual(self.experiment.status_quo.name, \"status_quo\")\n self.assertTrue(\"status_quo\" in self.experiment.arms_by_name)\n\n # Verify all None values\n self.experiment.status_quo = Arm({n: None for n in sq_parameters.keys()})\n self.assertIsNone(self.experiment.status_quo.parameters[\"w\"])\n\n # Try extra param\n sq_parameters[\"a\"] = 4\n with self.assertRaises(ValueError):\n self.experiment.status_quo = Arm(sq_parameters)\n\n # Try wrong type\n sq_parameters.pop(\"a\")\n sq_parameters[\"w\"] = \"hello\"\n with self.assertRaises(ValueError):\n self.experiment.status_quo = Arm(sq_parameters)\n\n # Verify arms_by_signature, arms_by_name only contains status_quo\n self.assertEqual(len(self.experiment.arms_by_signature), 1)\n self.assertEqual(len(self.experiment.arms_by_name), 1)\n\n # Change status quo, verify still just 1 arm\n sq_parameters[\"w\"] = 3.6\n self.experiment.status_quo = Arm(sq_parameters)\n self.assertEqual(len(self.experiment.arms_by_signature), 1)\n self.assertEqual(len(self.experiment.arms_by_name), 1)\n\n # Make a batch, add status quo to it, then change exp status quo, verify 2 arms\n batch = self.experiment.new_batch_trial()\n batch.set_status_quo_with_weight(self.experiment.status_quo, 1)\n sq_parameters[\"w\"] = 3.7\n self.experiment.status_quo = Arm(sq_parameters)\n self.assertEqual(len(self.experiment.arms_by_signature), 2)\n self.assertEqual(len(self.experiment.arms_by_name), 2)\n self.assertEqual(self.experiment.status_quo.name, \"status_quo_e0\")\n self.assertTrue(\"status_quo_e0\" in self.experiment.arms_by_name)\n\n # Try missing param\n sq_parameters.pop(\"w\")\n with self.assertRaises(ValueError):\n self.experiment.status_quo = Arm(sq_parameters)\n\n # Actually name the status quo.\n exp = Experiment(\n name=\"test3\",\n search_space=get_branin_search_space(),\n tracking_metrics=[BraninMetric(name=\"b\", param_names=[\"x1\", \"x2\"])],\n runner=SyntheticRunner(),\n )\n batch = exp.new_batch_trial()\n arms = get_branin_arms(n=1, seed=0)\n batch.add_arms_and_weights(arms=arms)\n self.assertIsNone(exp.status_quo)\n exp.status_quo = arms[0]\n self.assertEqual(exp.status_quo.name, \"0_0\")\n\n # Try setting sq to existing arm with different name\n with self.assertRaises(ValueError):\n exp.status_quo = Arm(arms[0].parameters, name=\"new_name\")\n\n def testRegisterArm(self):\n # Create a new arm, register on experiment\n parameters = self.experiment.status_quo.parameters\n parameters[\"w\"] = 3.5\n arm = Arm(name=\"my_arm_name\", parameters=parameters)\n self.experiment._register_arm(arm)\n self.assertEqual(self.experiment.arms_by_name[arm.name], arm)\n self.assertEqual(self.experiment.arms_by_signature[arm.signature], arm)\n\n def testFetchAndStoreData(self):\n n = 10\n exp = self._setupBraninExperiment(n)\n batch = exp.trials[0]\n batch.mark_completed()\n\n # Test fetch data\n batch_data = batch.fetch_data()\n self.assertEqual(len(batch_data.df), n)\n\n exp_data = exp.fetch_data()\n exp_data2 = exp.metrics[\"b\"].fetch_experiment_data(exp)\n self.assertEqual(len(exp_data2.df), 4 * n)\n self.assertEqual(len(exp_data.df), 4 * n)\n self.assertEqual(len(exp.arms_by_name), 4 * n)\n\n # Verify that `metrics` kwarg to `experiment.fetch_data` is respected.\n exp.add_tracking_metric(Metric(name=\"not_yet_on_experiment\"))\n exp.attach_data(\n Data(\n df=pd.DataFrame.from_records(\n [\n {\n \"arm_name\": \"0_0\",\n \"metric_name\": \"not_yet_on_experiment\",\n \"mean\": 3,\n \"sem\": 0,\n \"trial_index\": 0,\n }\n ]\n )\n )\n )\n self.assertEqual(\n set(\n exp.fetch_data(metrics=[Metric(name=\"not_yet_on_experiment\")])\n .df[\"metric_name\"]\n .values\n ),\n {\"not_yet_on_experiment\"},\n )\n\n # Verify data lookup includes trials attached from `fetch_data`.\n self.assertEqual(len(exp.lookup_data_for_trial(1)[0].df), 30)\n\n # Test local storage\n t1 = exp.attach_data(batch_data)\n t2 = exp.attach_data(exp_data)\n\n full_dict = exp.data_by_trial\n self.assertEqual(len(full_dict), 2) # data for 2 trials\n self.assertEqual(len(full_dict[0]), 5) # 5 data objs for batch 0\n\n # Test retrieving original batch 0 data\n self.assertEqual(len(exp.lookup_data_for_ts(t1).df), n)\n self.assertEqual(len(exp.lookup_data_for_trial(0)[0].df), n)\n\n # Test retrieving full exp data\n self.assertEqual(len(exp.lookup_data_for_ts(t2).df), 4 * n)\n\n with self.assertRaisesRegex(ValueError, \".* for metric\"):\n exp.attach_data(batch_data, combine_with_last_data=True)\n\n self.assertEqual(len(full_dict[0]), 5) # 5 data objs for batch 0\n new_data = Data(\n df=pd.DataFrame.from_records(\n [\n {\n \"arm_name\": \"0_0\",\n \"metric_name\": \"z\",\n \"mean\": 3,\n \"sem\": 0,\n \"trial_index\": 0,\n }\n ]\n )\n )\n t3 = exp.attach_data(new_data, combine_with_last_data=True)\n # still 5 data objs, since we combined last one\n self.assertEqual(len(full_dict[0]), 5)\n self.assertIn(\"z\", exp.lookup_data_for_ts(t3).df[\"metric_name\"].tolist())\n\n # Verify we don't get the data if the trial is abandoned\n batch._status = TrialStatus.ABANDONED\n self.assertEqual(len(batch.fetch_data().df), 0)\n self.assertEqual(len(exp.fetch_data().df), 3 * n)\n\n # Verify we do get the stored data if there are an unimplemented metrics.\n del exp._data_by_trial[0][t3] # Remove attached data for nonexistent metric.\n # Remove implemented metric that is `available_while_running`\n # (and therefore not pulled from cache).\n exp.remove_tracking_metric(metric_name=\"b\")\n exp.add_tracking_metric(Metric(name=\"b\")) # Add unimplemented metric.\n batch._status = TrialStatus.COMPLETED\n # Data should be getting looked up now.\n self.assertEqual(batch.fetch_data(), exp.lookup_data_for_ts(t1))\n self.assertEqual(exp.fetch_data(), exp.lookup_data_for_ts(t1))\n metrics_in_data = set(batch.fetch_data().df[\"metric_name\"].values)\n # Data for metric \"z\" should no longer be present since we removed it.\n self.assertEqual(metrics_in_data, {\"b\"})\n\n # Verify that `metrics` kwarg to `experiment.fetch_data` is respected\n # when pulling looked-up data.\n self.assertEqual(\n exp.fetch_data(metrics=[Metric(name=\"not_on_experiment\")]), Data()\n )\n\n def testOverwriteExistingData(self):\n n = 10\n exp = self._setupBraninExperiment(n)\n\n # automatically attaches data\n data = exp.fetch_data()\n\n # can't set both combine_with_last_data and overwrite_existing_data\n with self.assertRaises(UnsupportedError):\n exp.attach_data(\n data, combine_with_last_data=True, overwrite_existing_data=True\n )\n\n # data exists for two trials\n # data has been attached once for each trial\n self.assertEqual(len(exp._data_by_trial), 2)\n self.assertEqual(len(exp._data_by_trial[0]), 1)\n self.assertEqual(len(exp._data_by_trial[1]), 1)\n\n exp.attach_data(data)\n # data has been attached twice for each trial\n self.assertEqual(len(exp._data_by_trial), 2)\n self.assertEqual(len(exp._data_by_trial[0]), 2)\n self.assertEqual(len(exp._data_by_trial[1]), 2)\n\n ts = exp.attach_data(data, overwrite_existing_data=True)\n # previous two attachment are overwritten,\n # now only one data (most recent one) per trial\n self.assertEqual(len(exp._data_by_trial), 2)\n self.assertEqual(len(exp._data_by_trial[0]), 1)\n self.assertEqual(len(exp._data_by_trial[1]), 1)\n self.assertTrue(ts in exp._data_by_trial[0])\n self.assertTrue(ts in exp._data_by_trial[1])\n\n def testEmptyMetrics(self):\n empty_experiment = Experiment(\n name=\"test_experiment\", search_space=get_search_space()\n )\n self.assertEqual(empty_experiment.num_trials, 0)\n with self.assertRaises(ValueError):\n empty_experiment.fetch_data()\n batch = empty_experiment.new_batch_trial()\n batch.mark_running(no_runner_required=True)\n self.assertEqual(empty_experiment.num_trials, 1)\n with self.assertRaises(ValueError):\n batch.fetch_data()\n empty_experiment.add_tracking_metric(Metric(name=\"ax_test_metric\"))\n self.assertTrue(empty_experiment.fetch_data().df.empty)\n empty_experiment.attach_data(get_data())\n batch.mark_completed()\n self.assertFalse(empty_experiment.fetch_data().df.empty)\n\n def testNumArmsNoDeduplication(self):\n exp = Experiment(name=\"test_experiment\", search_space=get_search_space())\n arm = get_arm()\n exp.new_batch_trial().add_arm(arm)\n trial = exp.new_batch_trial().add_arm(arm)\n self.assertEqual(exp.sum_trial_sizes, 2)\n self.assertEqual(len(exp.arms_by_name), 1)\n trial.mark_arm_abandoned(trial.arms[0].name)\n self.assertEqual(exp.num_abandoned_arms, 1)\n\n def testExperimentWithoutName(self):\n exp = Experiment(\n search_space=get_branin_search_space(),\n tracking_metrics=[BraninMetric(name=\"b\", param_names=[\"x1\", \"x2\"])],\n runner=SyntheticRunner(),\n )\n self.assertEqual(\"Experiment(None)\", str(exp))\n batch = exp.new_batch_trial()\n batch.add_arms_and_weights(arms=get_branin_arms(n=5, seed=0))\n batch.run()\n self.assertEqual(batch.run_metadata, {\"name\": \"0\"})\n\n def testExperimentRunner(self):\n original_runner = SyntheticRunner()\n self.experiment.runner = original_runner\n batch = self.experiment.new_batch_trial()\n batch.run()\n self.assertEqual(batch.runner, original_runner)\n\n # Simulate a failed run/deployment, in which the runner is attached\n # but the actual run fails, and so the trial remains CANDIDATE.\n candidate_batch = self.experiment.new_batch_trial()\n candidate_batch.run()\n candidate_batch._status = TrialStatus.CANDIDATE\n self.assertEqual(self.experiment.trials_expecting_data, [batch])\n tbs = self.experiment.trials_by_status # All statuses should be present\n self.assertEqual(len(tbs), len(TrialStatus))\n self.assertEqual(tbs[TrialStatus.RUNNING], [batch])\n self.assertEqual(tbs[TrialStatus.CANDIDATE], [candidate_batch])\n tibs = self.experiment.trial_indices_by_status\n self.assertEqual(len(tibs), len(TrialStatus))\n self.assertEqual(tibs[TrialStatus.RUNNING], {0})\n self.assertEqual(tibs[TrialStatus.CANDIDATE], {1})\n\n identifier = {\"new_runner\": True}\n new_runner = SyntheticRunner(dummy_metadata=identifier)\n\n self.experiment.reset_runners(new_runner)\n # Don't update trials that have been run.\n self.assertEqual(batch.runner, original_runner)\n # Update default runner\n self.assertEqual(self.experiment.runner, new_runner)\n # Update candidate trial runners.\n self.assertEqual(self.experiment.trials[1].runner, new_runner)\n\n def testFetchTrialsData(self):\n exp = self._setupBraninExperiment(n=5)\n batch_0 = exp.trials[0]\n batch_1 = exp.trials[1]\n batch_0.mark_completed()\n batch_1.mark_completed()\n batch_0_data = exp.fetch_trials_data(trial_indices=[0])\n self.assertEqual(set(batch_0_data.df[\"trial_index\"].values), {0})\n self.assertEqual(\n set(batch_0_data.df[\"arm_name\"].values), {a.name for a in batch_0.arms}\n )\n batch_1_data = exp.fetch_trials_data(trial_indices=[1])\n self.assertEqual(set(batch_1_data.df[\"trial_index\"].values), {1})\n self.assertEqual(\n set(batch_1_data.df[\"arm_name\"].values), {a.name for a in batch_1.arms}\n )\n self.assertEqual(\n exp.fetch_trials_data(trial_indices=[0, 1]),\n Data.from_multiple_data([batch_0_data, batch_1_data]),\n )\n\n # Since NoisyFunction metric has overwrite_existing_data = False,\n # we should have two dfs per trial now\n self.assertEqual(len(exp.data_by_trial[0]), 2)\n\n with self.assertRaisesRegex(ValueError, \".* not associated .*\"):\n exp.fetch_trials_data(trial_indices=[2])\n # Try to fetch data when there are only metrics and no attached data.\n exp.remove_tracking_metric(metric_name=\"b\") # Remove implemented metric.\n exp.add_tracking_metric(Metric(name=\"b\")) # Add unimplemented metric.\n self.assertEqual(len(exp.fetch_trials_data(trial_indices=[0]).df), 5)\n # Try fetching attached data.\n exp.attach_data(batch_0_data)\n exp.attach_data(batch_1_data)\n self.assertEqual(exp.fetch_trials_data(trial_indices=[0]), batch_0_data)\n self.assertEqual(exp.fetch_trials_data(trial_indices=[1]), batch_1_data)\n self.assertEqual(set(batch_0_data.df[\"trial_index\"].values), {0})\n self.assertEqual(\n set(batch_0_data.df[\"arm_name\"].values), {a.name for a in batch_0.arms}\n )\n\n def test_immutable_search_space_and_opt_config(self):\n mutable_exp = self._setupBraninExperiment(n=5)\n self.assertFalse(mutable_exp.immutable_search_space_and_opt_config)\n immutable_exp = Experiment(\n name=\"test4\",\n search_space=get_branin_search_space(),\n tracking_metrics=[BraninMetric(name=\"b\", param_names=[\"x1\", \"x2\"])],\n optimization_config=get_branin_optimization_config(),\n runner=SyntheticRunner(),\n properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF: True},\n )\n self.assertTrue(immutable_exp.immutable_search_space_and_opt_config)\n immutable_exp.new_batch_trial()\n with self.assertRaises(UnsupportedError):\n immutable_exp.optimization_config = get_branin_optimization_config()\n with self.assertRaises(UnsupportedError):\n immutable_exp.search_space = get_branin_search_space()\n\n # Check that passing the property as just a string is processed\n # correctly.\n immutable_exp_2 = Experiment(\n name=\"test4\",\n search_space=get_branin_search_space(),\n tracking_metrics=[BraninMetric(name=\"b\", param_names=[\"x1\", \"x2\"])],\n runner=SyntheticRunner(),\n properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF.value: True},\n )\n self.assertTrue(immutable_exp_2.immutable_search_space_and_opt_config)\n\n def test_fetch_as_class(self):\n class MyMetric(Metric):\n @property\n def fetch_multi_group_by_metric(self) -> Type[Metric]:\n return Metric\n\n m = MyMetric(name=\"test_metric\")\n exp = Experiment(\n name=\"test\",\n search_space=get_branin_search_space(),\n tracking_metrics=[m],\n runner=SyntheticRunner(),\n )\n self.assertEqual(exp._metrics_by_class(), {Metric: [m]})\n\n @patch(\n # No-op mock just to record calls to `fetch_experiment_data_multi`.\n f\"{BraninMetric.__module__}.BraninMetric.fetch_experiment_data_multi\",\n side_effect=BraninMetric.fetch_experiment_data_multi,\n )\n def test_prefer_lookup_where_possible(self, mock_fetch_exp_data_multi):\n # By default, `BraninMetric` is available while trial is running.\n exp = self._setupBraninExperiment(n=5)\n exp.fetch_data()\n # Since metric is available while trial is running, we should be\n # refetching the data and no data should be attached to experiment.\n mock_fetch_exp_data_multi.assert_called_once()\n self.assertEqual(len(exp._data_by_trial), 2)\n\n with patch(\n f\"{BraninMetric.__module__}.BraninMetric.is_available_while_running\",\n return_value=False,\n ):\n exp = self._setupBraninExperiment(n=5)\n exp.fetch_data()\n # 1. No completed trials => no fetch case.\n mock_fetch_exp_data_multi.reset_mock()\n dat = exp.fetch_data()\n mock_fetch_exp_data_multi.assert_not_called()\n # Data should be empty since there are no completed trials.\n self.assertTrue(dat.df.empty)\n\n # 2. Newly completed trials => fetch case.\n mock_fetch_exp_data_multi.reset_mock()\n exp.trials.get(0).mark_completed()\n exp.trials.get(1).mark_completed()\n dat = exp.fetch_data()\n # `fetch_experiment_data_multi` should be called N=number of trials times.\n self.assertEqual(len(mock_fetch_exp_data_multi.call_args_list), 2)\n # Data should no longer be empty since there are completed trials.\n self.assertFalse(dat.df.empty)\n # Data for two trials should get attached.\n self.assertEqual(len(exp._data_by_trial), 2)\n\n # 3. Previously fetched => look up in cache case.\n mock_fetch_exp_data_multi.reset_mock()\n # All fetched data should get cached, so no fetch should happen next time.\n exp.fetch_data()\n mock_fetch_exp_data_multi.assert_not_called()\n\n def testWarmStartFromOldExperiment(self):\n # create old_experiment\n len_old_trials = 5\n i_failed_trial = 3\n old_experiment = get_branin_experiment()\n for i_old_trial in range(len_old_trials):\n sobol_run = get_sobol(search_space=old_experiment.search_space).gen(n=1)\n trial = old_experiment.new_trial(generator_run=sobol_run)\n trial.mark_running(no_runner_required=True)\n if i_old_trial == i_failed_trial:\n trial.mark_failed()\n else:\n trial.mark_completed()\n # make metric noiseless for exact reproducibility\n old_experiment.optimization_config.objective.metric.noise_sd = 0\n old_experiment.fetch_data()\n\n # should fail if new_experiment has trials\n new_experiment = get_branin_experiment(with_trial=True)\n with self.assertRaisesRegex(ValueError, \"Experiment.*has.*trials\"):\n new_experiment.warm_start_from_old_experiment(old_experiment=old_experiment)\n\n # should fail if search spaces are different\n with self.assertRaisesRegex(ValueError, \"mismatch in search space parameters\"):\n self.experiment.warm_start_from_old_experiment(\n old_experiment=old_experiment\n )\n\n # check that all non-failed trials are copied to new_experiment\n new_experiment = get_branin_experiment()\n # make metric noiseless for exact reproducibility\n new_experiment.optimization_config.objective.metric.noise_sd = 0\n for _, trial in old_experiment.trials.items():\n trial._run_metadata = DUMMY_RUN_METADATA\n new_experiment.warm_start_from_old_experiment(\n old_experiment=old_experiment, copy_run_metadata=True\n )\n self.assertEqual(len(new_experiment.trials), len(old_experiment.trials) - 1)\n i_old_trial = 0\n for _, trial in new_experiment.trials.items():\n # skip failed trial\n i_old_trial += i_old_trial == i_failed_trial\n self.assertEqual(\n trial.arm.parameters, old_experiment.trials[i_old_trial].arm.parameters\n )\n self.assertRegex(\n trial._properties[\"source\"], \"Warm start.*Experiment.*trial\"\n )\n self.assertDictEqual(trial.run_metadata, DUMMY_RUN_METADATA)\n i_old_trial += 1\n\n # Check that the data was attached for correct trials\n old_df = old_experiment.fetch_data().df\n new_df = new_experiment.fetch_data().df\n\n self.assertEqual(len(new_df), len_old_trials - 1)\n pd.testing.assert_frame_equal(\n old_df.drop([\"arm_name\", \"trial_index\"], axis=1),\n new_df.drop([\"arm_name\", \"trial_index\"], axis=1),\n )\n\n def test_is_test_warning(self):\n experiments_module = \"ax.core.experiment\"\n with self.subTest(\"it warns on construction for a test\"):\n with self.assertLogs(experiments_module, level=logging.INFO) as logger:\n exp = Experiment(\n search_space=get_search_space(),\n is_test=True,\n )\n self.assertIn(\n f\"INFO:{experiments_module}:{EXPERIMENT_IS_TEST_WARNING}\",\n logger.output,\n )\n\n with self.subTest(\"it does not warn on construction for a non test\"):\n with self.assertLogs(experiments_module, level=logging.INFO) as logger:\n logging.getLogger(experiments_module).info(\n \"there must be at least one log or the assertLogs statement fails\"\n )\n exp = Experiment(\n search_space=get_search_space(),\n is_test=False,\n )\n self.assertNotIn(\n f\"INFO:{experiments_module}:{EXPERIMENT_IS_TEST_WARNING}\",\n logger.output,\n )\n\n with self.subTest(\"it warns on setting is_test to True\"):\n with self.assertLogs(experiments_module, level=logging.INFO) as logger:\n exp.is_test = True\n self.assertIn(\n f\"INFO:{experiments_module}:{EXPERIMENT_IS_TEST_WARNING}\",\n logger.output,\n )\n\n with self.subTest(\"it does not warn on setting is_test to False\"):\n with self.assertLogs(experiments_module, level=logging.INFO) as logger:\n logging.getLogger(experiments_module).info(\n \"there must be at least one log or the assertLogs statement fails\"\n )\n exp.is_test = False\n self.assertNotIn(\n f\"INFO:{experiments_module}:{EXPERIMENT_IS_TEST_WARNING}\",\n logger.output,\n )\n\n\nclass ExperimentWithMapDataTest(TestCase):\n def setUp(self):\n self.experiment = get_experiment_with_map_data_type()\n\n def _setupBraninExperiment(self, n: int, incremental: bool = False) -> Experiment:\n exp = get_branin_experiment_with_timestamp_map_metric(incremental=incremental)\n batch = exp.new_batch_trial()\n batch.add_arms_and_weights(arms=get_branin_arms(n=n, seed=0))\n batch.run()\n\n batch_2 = exp.new_batch_trial()\n batch_2.add_arms_and_weights(arms=get_branin_arms(n=3 * n, seed=1))\n batch_2.run()\n return exp\n\n def testFetchDataWithMapData(self):\n evaluations = {\n \"0_0\": [\n ({\"epoch\": 1}, {\"no_fetch_impl_metric\": (3.7, 0.5)}),\n ({\"epoch\": 2}, {\"no_fetch_impl_metric\": (3.8, 0.5)}),\n ({\"epoch\": 3}, {\"no_fetch_impl_metric\": (3.9, 0.5)}),\n ({\"epoch\": 4}, {\"no_fetch_impl_metric\": (4.0, 0.5)}),\n ],\n }\n\n self.experiment.add_tracking_metric(\n metric=MapMetric(name=\"no_fetch_impl_metric\")\n )\n self.experiment.new_trial()\n self.experiment.trials[0].mark_running(no_runner_required=True)\n first_epoch = MapData.from_map_evaluations(\n evaluations={\n arm_name: partial_results[0:1]\n for arm_name, partial_results in evaluations.items()\n },\n trial_index=0,\n )\n self.experiment.attach_data(first_epoch)\n remaining_epochs = MapData.from_map_evaluations(\n evaluations={\n arm_name: partial_results[1:4]\n for arm_name, partial_results in evaluations.items()\n },\n trial_index=0,\n )\n self.experiment.attach_data(remaining_epochs)\n self.experiment.trials[0].mark_completed()\n\n expected_data = remaining_epochs\n actual_data = self.experiment.lookup_data()\n self.assertEqual(expected_data, actual_data)\n\n def testFetchTrialsData(self):\n exp = self._setupBraninExperiment(n=5)\n batch_0 = exp.trials[0]\n batch_1 = exp.trials[1]\n batch_0.mark_completed()\n batch_1.mark_completed()\n batch_0_data = exp.fetch_trials_data(trial_indices=[0])\n self.assertEqual(set(batch_0_data.df[\"trial_index\"].values), {0})\n self.assertEqual(\n set(batch_0_data.df[\"arm_name\"].values), {a.name for a in batch_0.arms}\n )\n batch_1_data = exp.fetch_trials_data(trial_indices=[1])\n self.assertEqual(set(batch_1_data.df[\"trial_index\"].values), {1})\n self.assertEqual(\n set(batch_1_data.df[\"arm_name\"].values), {a.name for a in batch_1.arms}\n )\n self.assertEqual(\n exp.fetch_trials_data(trial_indices=[0, 1]),\n MapData.from_multiple_data([batch_0_data, batch_1_data]),\n )\n\n # Since NoisyFunctionMap metric has overwrite_existing_data = True,\n # we should only have one df per trial now\n self.assertEqual(len(exp.data_by_trial[0]), 1)\n\n with self.assertRaisesRegex(ValueError, \".* not associated .*\"):\n exp.fetch_trials_data(trial_indices=[2])\n # Try to fetch data when there are only metrics and no attached data.\n exp.remove_tracking_metric(metric_name=\"b\") # Remove implemented metric.\n exp.add_tracking_metric(MapMetric(name=\"b\")) # Add unimplemented metric.\n self.assertEqual(len(exp.fetch_trials_data(trial_indices=[0]).df), 30)\n # Try fetching attached data.\n exp.attach_data(batch_0_data)\n exp.attach_data(batch_1_data)\n self.assertEqual(exp.fetch_trials_data(trial_indices=[0]), batch_0_data)\n self.assertEqual(exp.fetch_trials_data(trial_indices=[1]), batch_1_data)\n self.assertEqual(set(batch_0_data.df[\"trial_index\"].values), {0})\n self.assertEqual(\n set(batch_0_data.df[\"arm_name\"].values), {a.name for a in batch_0.arms}\n )\n\n def testFetchTrialsDataIncremental(self):\n exp = self._setupBraninExperiment(n=5, incremental=True)\n\n first_data = exp.fetch_trials_data(trial_indices=[0])\n self.assertEqual(set(first_data.df[\"timestamp\"].values), {0})\n\n more_data = exp.fetch_trials_data(trial_indices=[0])\n self.assertEqual(set(more_data.df[\"timestamp\"].values), {1})\n\n # Since we're using BraninIncrementalTimestampMetric,\n # which has combine_with_last_data = True,\n # the cached data should be merged and contain both timestamps\n self.assertEqual(len(exp.data_by_trial[0]), 1)\n looked_up_data = exp.lookup_data()\n self.assertEqual(set(looked_up_data.df[\"timestamp\"].values), {0, 1})\n" ]
[ [ "pandas.DataFrame.from_records" ] ]
samqws-marketing/electronicarts_ava-capture
[ "a04e5f9a7ee817317d0d58ce800eefc6bf4bd150" ]
[ "capture-node/raw_file_format_readers.py" ]
[ "# Copyright (C) 2019 Electronic Arts Inc. All rights reserved.\n\nimport os\nimport struct\nimport cv2\nimport numpy as np\n\nimport lz4.block as lz4block\n\n'''\n Example Usage:\n \n # This script extracts all frames of the recorded file test.ava and outputs them as JPG and TIF images.\n\n from raw_file_format_readers import AvaSequenceFileReader\n\n reader = AvaSequenceFileReader('test.ava')\n for i in range(reader.frame_count()):\n \n img = reader.frame_as_cv2_sRGB_8bit(i)\n cv2.imwrite('test_%04d.jpg' % i, img) # Write 8bit sRGB image as JPG\n\n img = reader.frame_as_cv2_LinearRGB_16bit(i)\n cv2.imwrite('test_%04d.tif' % i, img) # Write 16bit Linear RGB image as TIF\n\n\n'''\n\ndef Linear_to_sRGB(image):\n ''' Image has to be in the 16 bit range (0-65535.0) '''\n a = 0.055\n x = image / 65535.0\n return (np.where(x <= 0.00313066844, 0, (1 + a) * cv2.pow(x, 1 / 2.4) - a) ) * 65535.0\n\ndef resize_image(img, width=None, height=None, max_side=None, interpolation=cv2.INTER_AREA):\n original_height, original_width = img.shape[:2]\n if max_side:\n if original_height>original_width:\n height = max_side\n width = None\n else:\n width = max_side\n height = None\n\n if width and height:\n # Set width and height\n return cv2.resize(img, (width, height), interpolation=interpolation)\n if width:\n # Set width and preserve ratio\n return cv2.resize(img, (width, width*original_height//original_width), interpolation=interpolation)\n if height:\n # Set height and preserve ratio\n return cv2.resize(img, (height*original_width//original_height, height), interpolation=interpolation)\n\ndef rotate_img(img, angle):\n if angle == 180:\n return cv2.flip(cv2.flip(img,0),1)\n elif angle == 90:\n return cv2.flip(cv2.transpose(img),1)\n elif angle == -90 or angle == 270:\n return cv2.transpose(cv2.flip(img,1))\n return img\n\ndef raw_processing_to_float32_linear(raw_img, bayer, blacklevel, bitcount, kB, kG, kR, resize_max_side=None):\n\n img = raw_img\n\n # Debayer\n if bayer == 'BGGR':\n img = cv2.cvtColor(img, cv2.COLOR_BAYER_RG2RGB)\n elif bayer == 'RGGB':\n img = cv2.cvtColor(img, cv2.COLOR_BAYER_BG2RGB)\n elif bayer == 'GBRG':\n img = cv2.cvtColor(img, cv2.COLOR_BAYER_GR2RGB)\n elif bayer == 'GRBG':\n img = cv2.cvtColor(img, cv2.COLOR_BAYER_GB2RGB)\n\n # Resize Image\n if resize_max_side:\n img = resize_image(img, max_side=resize_max_side)\n\n # Black point correction\n image_bpp = (8 if img.dtype==np.uint8 else 16)\n max_value = (2 ** image_bpp - 1)\n img = np.clip(img, blacklevel, max_value) - blacklevel\n\n # # 10,12,14 bit images need to be moved from LSB to MSB\n if bitcount > 8:\n if np.uint16 != img.dtype:\n raise Exception('Images with bitcount higher than 8 should be stored as 16bit')\n img = img << (16 - bitcount)\n\n # Convert image to Float32\n if img.dtype == np.uint8 or img.dtype == np.uint16:\n img = img.astype(np.float32) / float(max_value)\n else:\n raise Exception('Unknown input image format')\n\n # Color Correction\n if len(img.shape)>2:\n # COLOR\n\n mat = np.diag(np.array([kB,kG,kR])).astype(np.float32)\n #mat = mat / np.max(mat)\n img = np.matmul(img, mat)\n\n # Image is in Linear RGB, always 32 bit float\n return img # img_float32_linearRGB\n\ndef raw_processing_to_16bit_linear(raw_img, bayer, blacklevel, bitcount, kB, kG, kR, resize_max_side=None):\n\n img = raw_img\n\n # Debayer\n if bayer == 'BGGR':\n img = cv2.cvtColor(img, cv2.COLOR_BAYER_RG2RGB)\n elif bayer == 'RGGB':\n img = cv2.cvtColor(img, cv2.COLOR_BAYER_BG2RGB)\n elif bayer == 'GBRG':\n img = cv2.cvtColor(img, cv2.COLOR_BAYER_GR2RGB)\n elif bayer == 'GRBG':\n img = cv2.cvtColor(img, cv2.COLOR_BAYER_GB2RGB)\n\n # Resize Image\n if resize_max_side:\n img = resize_image(img, max_side=resize_max_side)\n\n # Black point correction\n image_bpp = (8 if img.dtype==np.uint8 else 16)\n max_value = (2 ** image_bpp - 1)\n img = np.clip(img, blacklevel, max_value) - blacklevel\n\n # # 10,12,14 bit images need to be moved from LSB to MSB\n if bitcount > 8:\n if np.uint16 != img.dtype:\n raise Exception('Images with bitcount higher than 8 should be stored as 16bit')\n img = img << (16 - bitcount)\n\n # Color Correction\n if len(img.shape)>2:\n # COLOR\n # integer color balance\n # the image always gets upgraded to 16 bit for color correction\n if np.uint8 == img.dtype:\n # input is 8 bit color\n mat = np.diag(np.array([255*kB,255*kG,255*kR])).astype(np.uint32)\n img = np.matmul(img.astype(np.uint32), mat)\n img = np.clip(img,0,65535).astype(np.uint16)\n elif np.uint16 == img.dtype:\n # input is 16 bit color\n mat = np.diag(np.array([65535*kB,65535*kG,65535*kR])).astype(np.uint32)\n img = np.matmul(img.astype(np.uint32), mat)\n img = np.clip(img >> 16,0,65535).astype(np.uint16)\n else:\n raise Exception('Invalid bit depth for raw image')\n else:\n # GRAYSCALE\n # upgrade image to 16 bit\n if np.uint8 == img.dtype:\n img = img.astype(np.uint16) << 8\n\n # Image is in Linear RGB, always 16 bit\n return img # img_uint16_linearRGB\n\nclass AvaSequenceFileReader():\n\n def __init__(self, filename, raise_error_on_missing_frame=False):\n\n self._f = None\n self._raise_error_on_missing_frame = raise_error_on_missing_frame\n self.filename = filename\n with open(self.filename, 'rb') as f:\n\n self.file_size = os.fstat(f.fileno()).st_size\n\n # File Header\n # unsigned char magic; // 0xED\n # unsigned char version; // 1\n # unsigned char channels; // 1 or 3\n # unsigned char bitcount; // 8..16\n # unsigned int width;\n # unsigned int height;\n # unsigned int blacklevel;\n # unsigned char bayer0; // first row, first pixel\n # unsigned char bayer1; // first row, second pixel\n # unsigned char bayer2; // second row, first pixel\n # unsigned char bayer3; // second row, second pixel\n # float kR;\n # float kG;\n # float kB;\n # char compression[4];\n # unsigned long long index_start_offset; // offset un bytes from the start of the file where the index will start\n\n header_format = 'BBBBiii4sfff4sQ'\n header_size = struct.calcsize(header_format)\n\n # Read Header\n header_buffer = f.read(header_size)\n magic,version,channels,self.bitcount,self.width,self.height,self.blacklevel,self.bayer,self.kR,self.kG,self.kB,compression,index_offset = struct.unpack(header_format, header_buffer)\n\n self.bayer = self.bayer.decode(\"utf-8\")\n\n if magic != 0xED:\n raise Exception('Invalid Ava Sequence file (magic)')\n if version != 1:\n raise Exception('Invalid Ava Sequence file (version)')\n if compression.decode('utf-8')[:3] != 'LZ4':\n raise Exception('Invalid Ava Sequence file (unknown compression)')\n\n self._frame_count = (self.file_size - index_offset)//8\n self.index_offset = index_offset\n\n # Read Frame index\n index_size = self.file_size - index_offset\n f.seek(index_offset)\n self._frame_indices = np.frombuffer(f.read(index_size), dtype=np.uint64)\n\n byteperpixel = 2 if self.bitcount > 8 else 1\n self._img_data_size = byteperpixel*self.width*self.height\n\n if self._frame_indices.shape[0] != self._frame_count:\n raise Exception('Invalid Ava Sequence file (invalid index size)')\n\n def frame_count(self):\n return self._frame_count\n\n def _get_frame_offset_skip(self, frame_index, is_backward=True):\n while frame_index < self._frame_count and not self._frame_indices[frame_index]:\n frame_index = frame_index + (-1 if is_backward else 1)\n if frame_index < 0 or frame_index >= self._frame_count:\n return None\n return int(self._frame_indices[frame_index])\n\n def _compute_frame_size(self, frame_index):\n # Compute size of one frame, by looking at the index of the next frame (or the index if this is the last frame)\n offset_of_next_frame = 0\n if frame_index<self._frame_count-1:\n offset_of_next_frame = self._get_frame_offset_skip(frame_index+1, is_backward=False)\n if not offset_of_next_frame:\n offset_of_next_frame = self.index_offset\n return offset_of_next_frame - self._get_frame_offset_skip(frame_index)\n\n def _read_frame(self, index):\n\n # open .ava file if needed\n if not self._f:\n self._f = open(self.filename, 'rb', 32*1024*1024)\n\n # Read one frame from .ava file\n frame_offset = self._get_frame_offset_skip(index)\n self._f.seek(frame_offset)\n buf = self._f.read(self._compute_frame_size(index))\n\n return buf\n\n def _read_one_frame_16bit_linear(self, frame_index, resize_max_side):\n\n if frame_index<0 or frame_index>=self._frame_count:\n raise Exception('Invalid frame index %s' % frame_index)\n\n if self._raise_error_on_missing_frame and not self._frame_indices[frame_index]:\n raise Exception('Missing frame index %s' % frame_index)\n\n compressed_buffer = self._read_frame(frame_index)\n\n buffer = lz4block.decompress(compressed_buffer, uncompressed_size=self._img_data_size)\n raw_img = np.fromstring(buffer, np.uint8 if self.bitcount==8 else np.uint16).reshape((self.height,self.width))\n return raw_processing_to_16bit_linear(raw_img, self.bayer, self.blacklevel, self.bitcount, self.kB, self.kG, self.kR, resize_max_side=resize_max_side)\n\n def frame_as_cv2_sRGB_8bit(self, frame_index, resize_max_side=None, rotation_angle=0):\n img_16bit_linear = self._read_one_frame_16bit_linear(frame_index, resize_max_side=resize_max_side)\n return rotate_img((np.clip(Linear_to_sRGB(img_16bit_linear).astype(np.uint16),0,65535) >> 8).astype(np.uint8), rotation_angle)\n\n def frame_as_cv2_LinearRGB_16bit(self, frame_index, resize_max_side=None, rotation_angle=0):\n return rotate_img(self._read_one_frame_16bit_linear(frame_index, resize_max_side=resize_max_side), rotation_angle)\n\nclass AvaRawImageFileReader():\n def __init__(self, filename):\n self.filename = filename\n with open(self.filename, 'rb') as f:\n buffer = f.read()\n\n # unsigned char magic; // 0xED\n # unsigned char version; // 1\n # unsigned char channels; // 1 or 3\n # unsigned char bitcount; // 8..16\n # unsigned int width;\n # unsigned int height;\n # unsigned int blacklevel;\n # unsigned char bayer0; // first row, first pixel\n # unsigned char bayer1; // first row, second pixel\n # unsigned char bayer2; // second row, first pixel\n # unsigned char bayer3; // second row, second pixel\n # float kR;\n # float kG;\n # float kB;\n\n footer_format = 'BBBBiii4sfff'\n raw_footer_size = struct.calcsize(footer_format)\n magic,version,channels,bitcount,width,height,blacklevel,bayer,kR,kG,kB = struct.unpack(footer_format, buffer[-raw_footer_size:])\n\n bayer = bayer.decode(\"utf-8\")\n\n if magic != 0xED:\n raise Exception('Invalid Ava RAW file (magic)')\n if version != 1:\n raise Exception('Invalid Ava RAW file (version)')\n if channels != 1 and channels != 3:\n raise Exception('Invalid Ava RAW file (channels)')\n\n tif_data = buffer[:len(buffer)-raw_footer_size]\n img = cv2.imdecode(np.asarray(bytearray(tif_data), dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n\n # RAW Processing\n self.img_uint16_linearRGB = raw_processing_to_16bit_linear(img,\n bayer,blacklevel,bitcount,kB,kG,kR)\n self.img_float32_linearRGB = raw_processing_to_float32_linear(img,\n bayer,blacklevel,bitcount,kB,kG,kR)\n\n def as_cv2_sRGB_8bit(self, rotation_angle=0):\n return rotate_img((np.clip(Linear_to_sRGB(self.img_uint16_linearRGB).astype(np.uint16),0,65535) >> 8).astype(np.uint8), rotation_angle)\n\n def as_cv2_LinearRGB_16bit(self, rotation_angle=0):\n return rotate_img(self.img_uint16_linearRGB, rotation_angle)\n\n def as_cv2_LinearRGB_float32(self, rotation_angle=0):\n return rotate_img(self.img_float32_linearRGB, rotation_angle)\n" ]
[ [ "numpy.array", "numpy.matmul", "numpy.clip", "numpy.fromstring" ] ]
arielsho/Table-Fact-Checking
[ "afbf987fcaa6cc002655d3fa38f95d88e2ec4f75" ]
[ "code/run.py" ]
[ "# encoding=utf8\nimport json\nimport pandas\nimport numpy\nfrom beam_search import dynamic_programming\nfrom multiprocessing import Pool\nimport multiprocessing\nimport sys\nimport time\nimport argparse\nimport os\nfrom APIs import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--synthesize\", default=False, action=\"store_true\", help=\"whether to synthesize data\")\nparser.add_argument(\"--sequential\", default=False, action=\"store_true\", help=\"Whether to use sequential or distributed\")\nparser.add_argument(\"--debug\", default=False, action=\"store_true\", help=\"Whether to use debugging mode\")\nparser.add_argument(\"--part\", type=int, default=0, help=\"choose a part\")\nparser.add_argument(\"--split\", type=int, default=1, help=\"how many splits\")\nparser.add_argument(\"--output\", type=str, default=\"../all_programs\", help=\"which folder to store the results\")\nargs = parser.parse_args()\n\nwith open('../tokenized_data/full_cleaned.json') as f:\n data = json.load(f)\n\nmonths_a = ['january', 'february', 'march', 'april', 'may', 'june',\n 'july', 'august', 'september', 'october', 'november', 'december']\nmonths_b = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']\n\n\ndef isnumber(string):\n return string in [numpy.dtype('int64'), numpy.dtype('int32'), numpy.dtype('float32'), numpy.dtype('float64')]\n\n\ndef list2tuple(inputs):\n mem = []\n for s in inputs:\n mem.append(tuple(s))\n return mem\n\n\ndef split(string, option):\n if option == \"row\":\n return string.split(',')[0]\n else:\n return string.split(',')[1]\n\n\nif not args.synthesize:\n count = 0\n preprocessed = []\n for idx, table_name in enumerate(data):\n t = pandas.read_csv('../data/all_csv/{}'.format(table_name), delimiter=\"#\")\n cols = t.columns\n mapping = {i: \"num\" if isnumber(t) else \"str\" for i, t in enumerate(t.dtypes)}\n entry = data[table_name]\n caption = entry[3].split(' ')\n\n for sent, label, pos_tag in zip(entry[0], entry[1], entry[2]):\n count += 1\n inside = False\n position = False\n masked_sent = ''\n position_buf, mention_buf = '', ''\n mem_num, head_num, mem_str, head_str = [], [], [], []\n ent_index = 0\n for n in range(len(sent)):\n if sent[n] == '#':\n if position:\n if position_buf.startswith('0'):\n idx = int(split(position_buf, \"col\"))\n if mapping[idx] == 'num':\n if cols[idx] not in head_num:\n head_num.append(cols[idx])\n else:\n if cols[idx] not in head_str:\n head_str.append(cols[idx])\n else:\n row = int(split(position_buf, \"row\"))\n idx = int(split(position_buf, \"col\"))\n if idx == -1:\n pass\n else:\n if mapping[idx] == 'num':\n if mention_buf.isdigit():\n mention_buf = int(mention_buf)\n else:\n try:\n mention_buf = float(mention_buf)\n except Exception:\n import pdb\n pdb.set_trace()\n val = (cols[idx], mention_buf)\n if val not in mem_num:\n mem_num.append(val)\n else:\n if len(fuzzy_match(t, cols[idx], mention_buf)) == 0:\n val = (cols[idx], mention_buf)\n else:\n val = (cols[idx], mention_buf)\n if val not in mem_str:\n mem_str.append(val)\n masked_sent += \"<ENTITY{}>\".format(ent_index)\n ent_index += 1\n position_buf = \"\"\n mention_buf = \"\"\n inside = False\n position = False\n else:\n inside = True\n elif sent[n] == ';':\n position = True\n else:\n if position:\n position_buf += sent[n]\n elif inside:\n mention_buf += sent[n]\n else:\n masked_sent += sent[n]\n\n tokens = masked_sent.split()\n i = 0\n while i < len(tokens):\n _ = tokens[i]\n if i + 1 < len(tokens):\n if _.isdigit() and (tokens[i + 1] not in [\"thousand\", \"hundred\"]):\n num = int(_)\n i += 1\n elif _.isdigit() and tokens[i + 1] in [\"thousand\", \"hundred\"]:\n if tokens[i + 1] == \"thousand\":\n num = int(_) * 1000\n i += 2\n elif tokens[i + 1] == \"hundred\":\n num = int(_) * 100\n i += 2\n elif _ == \"a\" and tokens[i + 1] in [\"thousand\", \"hundred\"]:\n if tokens[i + 1] == \"thousand\":\n num = 1000\n i += 2\n elif tokens[i + 1] == \"hundred\":\n num = 100\n i += 2\n elif '.' in tokens[i]:\n try:\n num = float(_)\n i += 1\n except Exception:\n i += 1\n continue\n else:\n i += 1\n continue\n else:\n if _.isdigit():\n num = int(_)\n i += 1\n elif '.' in tokens[i]:\n try:\n num = float(_)\n i += 1\n except Exception:\n i += 1\n continue\n else:\n i += 1\n continue\n\n features = []\n\n if tokens[i - 2] in months_b + months_a:\n features.append(-6)\n else:\n features.append(0)\n\n if any([_ in tokens for _ in [\"than\", \"over\", \"more\", \"less\"]]):\n features.append(2)\n else:\n features.append(0)\n\n if any([_ in pos_tag for _ in [\"RBR\", \"JJR\"]]):\n features.append(1)\n else:\n features.append(0)\n\n if num > 50:\n if num > 1900 and num < 2020:\n features.append(-4)\n else:\n features.append(2)\n else:\n if num > len(t):\n features.append(2)\n else:\n features.append(0)\n\n if len(head_num) > 0:\n features.append(1)\n else:\n features.append(0)\n\n flag = False\n for h in head_num:\n if h not in map(lambda x: x[0], mem_num):\n flag = True\n\n if flag:\n features.append(2)\n else:\n features.append(0)\n\n if sum(features) >= 3:\n for h in head_num:\n if any([_ == h for _ in mem_num]):\n continue\n else:\n mem_num.append((h, num))\n elif sum(features) >= 0:\n mem_num.append((\"tmp_input\", num))\n\n for k, v in mem_num:\n if k not in head_num and k != \"tmp_input\":\n head_num.append(k)\n\n for k, v in mem_str:\n if k not in head_str:\n head_str.append(k)\n\n preprocessed.append((table_name, sent, pos_tag, masked_sent, mem_str,\n mem_num, head_str, head_num, \"nt-{}\".format(len(preprocessed)), label))\n\n length = len(preprocessed) // args.split\n for i in range(args.split):\n with open('../preprocessed_data_program/preprocessed.json'.format(i), 'w') as f:\n if i == args.split - 1:\n json.dump(preprocessed[i * length:], f, indent=2)\n else:\n json.dump(preprocessed[i * length: (i + 1) * length], f, indent=2)\n\nelse:\n with open('../preprocessed_data_program/preprocessed.json'.format(args.part), 'r') as f:\n data = json.load(f)\n\n with open('../data/complex_ids.json') as f:\n complex_ids = json.load(f)\n\n if not os.path.exists(args.output):\n os.mkdir(args.output)\n\n def func(inputs):\n table_name, sent, pos_tag, masked_sent, mem_str, mem_num, head_str, head_num, idx, labels = inputs\n t = pandas.read_csv('../data/all_csv/{}'.format(table_name), delimiter=\"#\", encoding='utf-8')\n t.fillna('')\n if args.sequential:\n res = dynamic_programming(table_name, t, sent, masked_sent, pos_tag, mem_str,\n mem_num, head_str, head_num, labels, 5, debug=True)\n print(idx, res[:-1])\n for r in res[-1]:\n print(r)\n else:\n try:\n if not os.path.exists('{}/{}.json'.format(args.output, idx)):\n res = dynamic_programming(table_name, t, sent, masked_sent, pos_tag,\n mem_str, mem_num, head_str, head_num, labels, 7)\n with open('{}/{}.json'.format(args.output, idx), 'w') as f:\n json.dump(res, f, indent=2)\n except Exception:\n print(\"failed {}, {}\".format(table_name, idx))\n\n table_name = [_[0] for _ in data]\n sent = [_[1] for _ in data]\n pos_tag = [_[2] for _ in data]\n masked_sent = [_[3] for _ in data]\n mem_str = [list2tuple(_[4]) for _ in data]\n mem_num = [list2tuple(_[5]) for _ in data]\n head_str = [_[6] for _ in data]\n head_num = [_[7] for _ in data]\n idxes = [_[8] for _ in data]\n labels = [_[9] for _ in data]\n\n if args.sequential:\n for arg in zip(table_name, sent, pos_tag, masked_sent, mem_str, mem_num, head_str, head_num, idxes, labels):\n if arg[8] in [\"nt-56710\"]:\n func(arg)\n else:\n cores = multiprocessing.cpu_count()\n print(\"Using {} cores\".format(cores))\n pool = Pool(cores)\n res = pool.map(func, zip(table_name, sent, pos_tag, masked_sent,\n mem_str, mem_num, head_str, head_num, idxes, labels))\n\n pool.close()\n pool.join()\n" ]
[ [ "numpy.dtype" ] ]
cnvrg/Blueprints
[ "e8574063605a2dd7a4c2f4d2cc18458edb2886be" ]
[ "Recommenders/recommenders_data_validation/data_validation.py" ]
[ "import argparse\nimport pandas as pd\nimport psutil\nimport time\nfrom cnvrg import Experiment\n\ntic=time.time()\nparser = argparse.ArgumentParser(description=\"\"\"Preprocessor\"\"\")\nparser.add_argument('-f','--filename', action='store', dest='filename', default='/data/movies_rec_sys/ratings_2.csv', required=True, help=\"\"\"string. csv topics data file\"\"\")\n# parser.add_argument('--project_dir', action='store', dest='project_dir',\n# help=\"\"\"--- For inner use of cnvrg.io ---\"\"\")\n# parser.add_argument('--output_dir', action='store', dest='output_dir',\n# help=\"\"\"--- For inner use of cnvrg.io ---\"\"\")\nargs = parser.parse_args()\nFILENAME = args.filename\ndf = pd.read_csv(FILENAME)\n#if len(df['rating'].unique()) == 2:\n# df['rating'].replace(to_replace=1,value=2,inplace=True)\n# df['rating'].replace(to_replace=0,value=1,inplace=True)\n# print(\"Changed\")\n############## check column headings #############\nheaders=['user_id','item_id']\nif not all([i in df.columns for i in headers]):\n raise Exception('Data must contain |user_id|item_id| columns!')\n\nif 'rating' in df.columns: # EXPLICIT\n print('Data is in Explicit format!')\n print(df.head())\nelse: # IMPLICIT\n print('Data is in Implicit format!')\n print(df.head())\n df['rating'] = 1\n unique_users = df['user_id'].unique()\n unique_items = df['item_id'].unique()\n for user in unique_users:\n for item in unique_items:\n if not ((df['user_id'] == user) & (df['item_id'] == item)).any(): # add negative rows\n df2 = pd.DataFrame({'user_id': [user], 'item_id': [item], 'rating': [0]})\n df = pd.concat([df, df2], ignore_index=True)\n\n\n# if(all(df.columns==headers)==False):\n#\n# # raise(\"Column headings not correct!\")\n#################### CHECK NAN #############\ndf=df.dropna()\n#################### CHECK ratings are either integers or floats #############\ntry:\n df['rating']=df['rating'].astype('float')\nexcept:\n print(\"Ratings have to be either integers or floats\")\n raise()\n########## Convert user and item ids to strings ##########\n\ndf['user_id']=df['user_id'].astype('str')\n\ndf['item_id']=df['item_id'].astype('str')\n\n#################### CHECK ratings are between -10 and 10 #############\n\nif(min(df['rating'])<-10 or max(df['rating'])>10):\n print(\"ratings have to be positive\")\n raise()\n\n##########normalize the ratings globally######### \nprint('RAM GB used:', psutil.virtual_memory()[3]/(1024 * 1024 * 1024))\n \n#Create two dataframe mapping original user id and item id to internal representation and one dataframe of the original translated ratings frame\nprocessed_dataframe=pd.DataFrame(columns=['user_id','item_id','rating'])\n\ncurrent_u_index = 0\ncurrent_i_index = 0\n\nuser = []\nitem = []\nrating = []\nraw2inner_id_users = {}\nraw2inner_id_items = {}\n# user raw id, item raw id, rating\nfor urid, irid, r in df.itertuples(index=False):\n try:\n uid = raw2inner_id_users[urid]\n except KeyError:\n uid = current_u_index\n raw2inner_id_users[urid] = current_u_index\n current_u_index += 1\n try:\n iid = raw2inner_id_items[irid]\n except KeyError:\n iid = current_i_index\n raw2inner_id_items[irid] = current_i_index\n current_i_index += 1\n \n user.append(uid)\n item.append(iid)\n rating.append(r)\ndata={'originaluser_id':raw2inner_id_users.keys(),'user_id':raw2inner_id_users.values()}\nconvertuser=pd.DataFrame(data)\n###########Total input size###########\nprint('RAM GB used:', psutil.virtual_memory()[3]/(1024 * 1024 * 1024))\n\nprint(\"number of users:\",len(data))\n\ndata={'originalitem_id':raw2inner_id_items.keys(),'item_id':raw2inner_id_items.values()}\nconvertitem=pd.DataFrame(data)\n\nprint(\"number of items:\",len(data))\n\ndata={'user_id':user,'item_id':item,'rating':rating}\nprocessed_dataframe=pd.DataFrame(data) ####create a ready to use dataframe with converted values###### \n\n\nfull = \"ratingstranslated.csv\"\nitemdict = \"itemdict.csv\" \nuserdict = \"userdict.csv\" \nprocessed_dataframe.to_csv(\"/cnvrg/{}\".format(full), index=False)\nconvertitem.to_csv(\"/cnvrg/{}\".format(itemdict), index=False)\nconvertuser.to_csv(\"/cnvrg/{}\".format(userdict), index=False)\nconvertitem.to_csv('/cnvrg/itemdict_1.csv')\nconvertuser.to_csv('/cnvrg/userdict_1.csv')\n\nprint('RAM GB used:', psutil.virtual_memory()[3]/(1024 * 1024 * 1024))\ntoc=time.time()\nprint(\"time taken:\",toc-tic)\ne = Experiment()\ne.log_param(\"dataval_ram\", psutil.virtual_memory()[3]/(1024 * 1024 * 1024))\ne.log_param(\"dataval_time\", toc-tic)" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "pandas.concat" ] ]
xinxin342/mmdetection-mini
[ "dad8367880a4e321b8ac64ee95d712da44d232d9" ]
[ "mmdet/apis/inference.py" ]
[ "import matplotlib.pyplot as plt\nfrom mmdet import cv_core\nimport numpy as np\nimport torch\nfrom mmdet.cv_core.parallel import collate\nfrom mmdet.cv_core.runner import load_checkpoint\n\nfrom mmdet.datasets.pipelines import Compose\nfrom mmdet.models import build_detector\nfrom mmdet.datasets import build_dataset\n\n\ndef init_detector(config, checkpoint=None, device='cuda:0'):\n \"\"\"Initialize a detector from config file.\n\n Args:\n config (str or :obj:`mmdet.cv_core.Config`): Config file path or the config\n object.\n checkpoint (str, optional): Checkpoint path. If left as None, the model\n will not load any weights.\n\n Returns:\n nn.Module: The constructed detector.\n \"\"\"\n if isinstance(config, str):\n config = cv_core.Config.fromfile(config)\n elif not isinstance(config, cv_core.Config):\n raise TypeError('config must be a filename or Config object, '\n f'but got {type(config)}')\n config.model.pretrained = None\n model = build_detector(config.model, test_cfg=config.test_cfg)\n if checkpoint is not None:\n map_loc = 'cpu' if device == 'cpu' else None\n checkpoint = load_checkpoint(model, checkpoint, map_location=map_loc)\n if 'meta' in checkpoint and 'CLASSES' in checkpoint['meta']:\n model.CLASSES = checkpoint['meta']['CLASSES']\n else:\n dataset = build_dataset(config.data.test)\n model.CLASSES = dataset.CLASSES\n\n model.cfg = config # save the config in the model for convenience\n model.to(device)\n model.eval()\n return model\n\n\nclass LoadImage(object):\n \"\"\"A simple pipeline to load image.\"\"\"\n\n def __call__(self, results):\n \"\"\"Call function to load images into results.\n\n Args:\n results (dict): A result dict contains the file name\n of the image to be read.\n\n Returns:\n dict: ``results`` will be returned containing loaded image.\n \"\"\"\n if isinstance(results['img'], str):\n results['filename'] = results['img']\n results['ori_filename'] = results['img']\n else:\n results['filename'] = None\n results['ori_filename'] = None\n img = cv_core.imread(results['img'])\n results['img'] = img\n results['img_fields'] = ['img']\n results['img_shape'] = img.shape\n results['ori_shape'] = img.shape\n return results\n\n\ndef inference_detector(model, img):\n \"\"\"Inference image(s) with the detector.\n\n Args:\n model (nn.Module): The loaded detector.\n imgs (str/ndarray or list[str/ndarray]): Either image files or loaded\n images.\n\n Returns:\n If imgs is a str, a generator will be returned, otherwise return the\n detection results directly.\n \"\"\"\n cfg = model.cfg\n data = dict(img_info=dict(filename=img), img_prefix=None)\n # build the data pipeline\n test_pipeline = Compose(cfg.data.test.pipeline)\n data = test_pipeline(data)\n data = collate([data], samples_per_gpu=1)\n if next(model.parameters()).is_cuda:\n data['img'][0] = data['img'][0].cuda()\n data['img_metas'] = data['img_metas'][0].data\n else:\n # just get the actual data from DataContainer\n data['img_metas'] = data['img_metas'][0].data\n\n # forward the model\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)[0]\n return result\n\n\ndef show_result_pyplot(model, img, result, score_thr=0.3, fig_size=(15, 10)):\n \"\"\"Visualize the detection results on the image.\n\n Args:\n model (nn.Module): The loaded detector.\n img (str or np.ndarray): Image filename or loaded image.\n result (tuple[list] or list): The detection result, can be either\n (bbox, segm) or just bbox.\n score_thr (float): The threshold to visualize the bboxes and masks.\n fig_size (tuple): Figure size of the pyplot figure.\n \"\"\"\n if hasattr(model, 'module'):\n model = model.module\n img = model.show_result(img, result, score_thr=score_thr, show=False)\n cv_core.imshow(img)\n # plt.figure(figsize=fig_size)\n # plt.imshow(cv_core.bgr2rgb(img))\n # plt.show()\n" ]
[ [ "torch.no_grad" ] ]
orivej/tensorflow
[ "5ed2fc046a9e59d7fcffc1bc7202465805618aca" ]
[ "tensorflow/python/data/ops/dataset_ops.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Python wrappers for Datasets.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport functools\nimport sys\nimport threading\nimport warnings\nimport weakref\n\nimport numpy as np\nimport six\nfrom six.moves import queue as Queue # pylint: disable=redefined-builtin\n\nfrom tensorflow.core.framework import dataset_options_pb2\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.python import tf2\nfrom tensorflow.python.compat import compat as tf_compat\nfrom tensorflow.python.data.experimental.ops import distribute_options\nfrom tensorflow.python.data.experimental.ops import optimization_options\nfrom tensorflow.python.data.experimental.ops import stats_options\nfrom tensorflow.python.data.experimental.ops import threading_options\nfrom tensorflow.python.data.ops import iterator_ops\nfrom tensorflow.python.data.util import convert\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.data.util import options as options_lib\nfrom tensorflow.python.data.util import random_seed\nfrom tensorflow.python.data.util import structure\nfrom tensorflow.python.data.util import traverse\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import function as eager_function\nfrom tensorflow.python.framework import auto_control_deps\nfrom tensorflow.python.framework import auto_control_deps_utils as acd_utils\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed as core_random_seed\nfrom tensorflow.python.framework import smart_cond\nfrom tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.framework import type_spec\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_dataset_ops\nfrom tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops\nfrom tensorflow.python.ops import gen_io_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import script_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.training.tracking import base as tracking_base\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import function_utils\nfrom tensorflow.python.util import lazy_loader\nfrom tensorflow.python.util import nest as tf_nest\nfrom tensorflow.python.util.compat import collections_abc\nfrom tensorflow.python.util.tf_export import tf_export\n\n# Loaded lazily due to a circular dependency (roughly\n# tf.function->wrap_function->dataset->autograph->tf.function).\n# TODO(b/133251390): Use a regular import.\nwrap_function = lazy_loader.LazyLoader(\n \"wrap_function\", globals(),\n \"tensorflow.python.eager.wrap_function\")\n# TODO(mdan): Create a public API for this.\nautograph_ctx = lazy_loader.LazyLoader(\n \"autograph_ctx\", globals(),\n \"tensorflow.python.autograph.core.ag_ctx\")\nautograph = lazy_loader.LazyLoader(\n \"autograph\", globals(),\n \"tensorflow.python.autograph.impl.api\")\n\nops.NotDifferentiable(\"ReduceDataset\")\n\n# A constant that can be used to enable auto-tuning.\nAUTOTUNE = -1\ntf_export(\"data.AUTOTUNE\").export_constant(__name__, \"AUTOTUNE\")\n# TODO(b/168128531): Deprecate and remove this symbol.\ntf_export(\"data.experimental.AUTOTUNE\").export_constant(__name__, \"AUTOTUNE\")\n\n# Constants representing infinite and unknown cardinalities.\nINFINITE = -1\nUNKNOWN = -2\ntf_export(\"data.INFINITE_CARDINALITY\").export_constant(__name__, \"INFINITE\")\ntf_export(\"data.UNKNOWN_CARDINALITY\").export_constant(__name__, \"UNKNOWN\")\n\n\n@tf_export(\"data.Dataset\", v1=[])\[email protected]_metaclass(abc.ABCMeta)\nclass DatasetV2(collections_abc.Iterable, tracking_base.Trackable,\n composite_tensor.CompositeTensor):\n \"\"\"Represents a potentially large set of elements.\n\n The `tf.data.Dataset` API supports writing descriptive and efficient input\n pipelines. `Dataset` usage follows a common pattern:\n\n 1. Create a source dataset from your input data.\n 2. Apply dataset transformations to preprocess the data.\n 3. Iterate over the dataset and process the elements.\n\n Iteration happens in a streaming fashion, so the full dataset does not need to\n fit into memory.\n\n Source Datasets:\n\n The simplest way to create a dataset is to create it from a python `list`:\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> for element in dataset:\n ... print(element)\n tf.Tensor(1, shape=(), dtype=int32)\n tf.Tensor(2, shape=(), dtype=int32)\n tf.Tensor(3, shape=(), dtype=int32)\n\n To process lines from files, use `tf.data.TextLineDataset`:\n\n >>> dataset = tf.data.TextLineDataset([\"file1.txt\", \"file2.txt\"])\n\n To process records written in the `TFRecord` format, use `TFRecordDataset`:\n\n >>> dataset = tf.data.TFRecordDataset([\"file1.tfrecords\", \"file2.tfrecords\"])\n\n To create a dataset of all files matching a pattern, use\n `tf.data.Dataset.list_files`:\n\n ```python\n dataset = tf.data.Dataset.list_files(\"/path/*.txt\")\n ```\n\n See `tf.data.FixedLengthRecordDataset` and `tf.data.Dataset.from_generator`\n for more ways to create datasets.\n\n Transformations:\n\n Once you have a dataset, you can apply transformations to prepare the data for\n your model:\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> dataset = dataset.map(lambda x: x*2)\n >>> list(dataset.as_numpy_iterator())\n [2, 4, 6]\n\n Common Terms:\n\n **Element**: A single output from calling `next()` on a dataset iterator.\n Elements may be nested structures containing multiple components. For\n example, the element `(1, (3, \"apple\"))` has one tuple nested in another\n tuple. The components are `1`, `3`, and `\"apple\"`.\n\n **Component**: The leaf in the nested structure of an element.\n\n Supported types:\n\n Elements can be nested structures of tuples, named tuples, and dictionaries.\n Note that Python lists are *not* treated as nested structures of components.\n Instead, lists are converted to tensors and treated as components. For\n example, the element `(1, [1, 2, 3])` has only two components; the tensor `1`\n and the tensor `[1, 2, 3]`. Element components can be of any type\n representable by `tf.TypeSpec`, including `tf.Tensor`, `tf.data.Dataset`,\n `tf.sparse.SparseTensor`, `tf.RaggedTensor`, and `tf.TensorArray`.\n\n ```python\n a = 1 # Integer element\n b = 2.0 # Float element\n c = (1, 2) # Tuple element with 2 components\n d = {\"a\": (2, 2), \"b\": 3} # Dict element with 3 components\n Point = collections.namedtuple(\"Point\", [\"x\", \"y\"])\n e = Point(1, 2) # Named tuple\n f = tf.data.Dataset.range(10) # Dataset element\n ```\n\n For more information,\n read [this guide](https://www.tensorflow.org/guide/data).\n \"\"\"\n\n def __init__(self, variant_tensor):\n \"\"\"Creates a DatasetV2 object.\n\n This is a difference between DatasetV1 and DatasetV2. DatasetV1 does not\n take anything in its constructor whereas in the DatasetV2, we expect\n subclasses to create a variant_tensor and pass it in to the super() call.\n\n Args:\n variant_tensor: A DT_VARIANT tensor that represents the dataset.\n \"\"\"\n self._variant_tensor_attr = variant_tensor\n weak_self = weakref.proxy(self)\n self._variant_tracker = self._track_trackable(\n _VariantTracker(\n self._variant_tensor,\n # _trace_variant_creation only works when executing eagerly, so we\n # don't want to run it immediately. We also want the _VariantTracker\n # to have a weak reference to the Dataset to avoid creating\n # reference cycles and making work for the garbage collector.\n lambda: weak_self._trace_variant_creation()()), # pylint: disable=unnecessary-lambda,protected-access\n name=\"_variant_tracker\")\n self._graph_attr = ops.get_default_graph()\n\n # Initialize the options for this dataset and its inputs.\n self._options_attr = Options()\n for input_dataset in self._inputs():\n input_options = input_dataset.options()\n if input_options is not None:\n self._options_attr = self._options_attr.merge(input_options)\n self._options_attr._set_mutable(False) # pylint: disable=protected-access\n\n @property\n def _variant_tensor(self):\n return self._variant_tensor_attr\n\n @_variant_tensor.setter\n def _variant_tensor(self, _):\n raise ValueError(\"The _variant_tensor property is read-only\")\n\n @deprecation.deprecated_args(None, \"Use external_state_policy instead\",\n \"allow_stateful\")\n def _as_serialized_graph(\n self,\n allow_stateful=None,\n strip_device_assignment=None,\n external_state_policy=distribute_options.ExternalStatePolicy.WARN):\n \"\"\"Produces serialized graph representation of the dataset.\n\n Args:\n allow_stateful: If true, we allow stateful ops to be present in the graph\n def. In that case, the state in these ops would be thrown away.\n strip_device_assignment: If true, non-local (i.e. job and task) device\n assignment is stripped from ops in the serialized graph.\n external_state_policy: The ExternalStatePolicy enum that determines how we\n handle input pipelines that depend on external state. By default, its\n set to WARN.\n\n Returns:\n A scalar `tf.Tensor` of `tf.string` type, representing this dataset as a\n serialized graph.\n \"\"\"\n if external_state_policy:\n policy = external_state_policy.value\n return gen_dataset_ops.dataset_to_graph_v2(\n self._variant_tensor,\n external_state_policy=policy,\n strip_device_assignment=strip_device_assignment)\n if strip_device_assignment:\n return gen_dataset_ops.dataset_to_graph(\n self._variant_tensor,\n allow_stateful=allow_stateful,\n strip_device_assignment=strip_device_assignment)\n return gen_dataset_ops.dataset_to_graph(\n self._variant_tensor, allow_stateful=allow_stateful)\n\n def _trace_variant_creation(self):\n \"\"\"Traces a function which outputs a variant `tf.Tensor` for this dataset.\n\n Note that creating this function involves evaluating an op, and is currently\n only supported when executing eagerly.\n\n Returns:\n A zero-argument `ConcreteFunction` which outputs a variant `tf.Tensor`.\n \"\"\"\n variant = self._variant_tensor\n if not isinstance(variant, ops.EagerTensor):\n raise NotImplementedError(\n \"Can only export Datasets which were created executing eagerly. \"\n \"Please file a feature request if this is important to you.\")\n with context.eager_mode(), ops.device(\"CPU\"):\n # pylint: disable=protected-access\n graph_def = graph_pb2.GraphDef().FromString(\n self._as_serialized_graph(external_state_policy=distribute_options\n .ExternalStatePolicy.FAIL).numpy())\n output_node_name = None\n for node in graph_def.node:\n if node.op == \"_Retval\":\n if output_node_name is not None:\n raise AssertionError(\n \"Found multiple return values from the dataset's graph, expected \"\n \"only one.\")\n output_node_name, = node.input\n if output_node_name is None:\n raise AssertionError(\"Could not find the dataset's output node.\")\n # Add functions used in this Dataset to the function's graph, since they\n # need to follow it around (and for example be added to a SavedModel which\n # references the dataset).\n variant_function = wrap_function.function_from_graph_def(\n graph_def, inputs=[], outputs=output_node_name + \":0\")\n for used_function in self._functions():\n used_function.function.add_to_graph(variant_function.graph)\n return variant_function\n\n @abc.abstractmethod\n def _inputs(self):\n \"\"\"Returns a list of the input datasets of the dataset.\"\"\"\n\n raise NotImplementedError(\"Dataset._inputs\")\n\n @property\n def _graph(self):\n return self._graph_attr\n\n @_graph.setter\n def _graph(self, _):\n raise ValueError(\"The _graph property is read-only\")\n\n def _has_captured_ref(self):\n \"\"\"Whether this dataset uses a function that captures ref variables.\n\n Returns:\n A boolean, which if true indicates that the dataset or one of its inputs\n uses a function that captures ref variables.\n \"\"\"\n if context.executing_eagerly():\n # RefVariables are not supported in eager mode\n return False\n\n def is_tensor_or_parent_ref(tensor):\n if tensor.dtype._is_ref_dtype: # pylint: disable=protected-access\n return True\n # If the captured tensor is an eager tensor, we cannot trace its inputs.\n if isinstance(tensor, ops._EagerTensorBase): # pylint: disable=protected-access\n return False\n return any(is_tensor_or_parent_ref(x) for x in tensor.op.inputs)\n\n for fn in self._functions():\n if any(is_tensor_or_parent_ref(t) for t in fn.function.captured_inputs):\n return True\n\n return any(\n [input_dataset._has_captured_ref() for input_dataset in self._inputs()]) # pylint: disable=protected-access\n\n # TODO(jsimsa): Change this to be the transitive closure of functions used\n # by this dataset and its inputs.\n def _functions(self):\n \"\"\"Returns a list of functions associated with this dataset.\n\n Returns:\n A list of `StructuredFunctionWrapper` objects.\n \"\"\"\n return []\n\n def options(self):\n \"\"\"Returns the options for this dataset and its inputs.\n\n Returns:\n A `tf.data.Options` object representing the dataset options.\n \"\"\"\n return self._options_attr\n\n def _apply_options(self):\n \"\"\"Apply options, such as optimization configuration, to the dataset.\"\"\"\n\n dataset = self\n options = self.options()\n\n # (1) Apply threading options\n if options.experimental_threading is not None:\n t_options = options.experimental_threading\n if t_options.max_intra_op_parallelism is not None:\n dataset = _MaxIntraOpParallelismDataset(\n dataset, t_options.max_intra_op_parallelism)\n if t_options.private_threadpool_size is not None:\n dataset = _PrivateThreadPoolDataset(dataset,\n t_options.private_threadpool_size)\n\n # (2) Apply autotune options\n autotune, algorithm, cpu_budget, ram_budget = options._autotune_settings() # pylint: disable=protected-access\n if autotune:\n dataset = _ModelDataset(dataset, algorithm, cpu_budget, ram_budget)\n\n # (3) Apply graph rewrite options\n # pylint: disable=protected-access\n graph_rewrites = options._graph_rewrites()\n graph_rewrite_configs = options._graph_rewrite_configs(autotune)\n # pylint: enable=protected-access\n if self._has_captured_ref():\n if graph_rewrites.enabled or graph_rewrites.default:\n warnings.warn(\n \"tf.data graph rewrites are not compatible with tf.Variable. \"\n \"The following rewrites will be disabled: %s. To enable \"\n \"rewrites, use resource variables instead by calling \"\n \"`tf.enable_resource_variables()` at the start of the program.\" %\n \", \".join(graph_rewrites.enabled + graph_rewrites.default))\n elif (graph_rewrites.enabled or graph_rewrites.default or\n (options.experimental_optimization.apply_default_optimizations # pylint: disable=g-bool-id-comparison\n is not False)):\n dataset = _OptimizeDataset(dataset, graph_rewrites.enabled,\n graph_rewrites.disabled,\n graph_rewrites.default, graph_rewrite_configs)\n\n # (4) Apply stats aggregator options\n if options.experimental_stats and options.experimental_stats.aggregator: # pylint: disable=line-too-long\n dataset = _SetStatsAggregatorDataset( # pylint: disable=protected-access\n dataset, options.experimental_stats.aggregator,\n options.experimental_stats.prefix,\n options.experimental_stats.counter_prefix)\n return dataset\n\n def __iter__(self):\n \"\"\"Creates an iterator for elements of this dataset.\n\n The returned iterator implements the Python Iterator protocol.\n\n Returns:\n An `tf.data.Iterator` for the elements of this dataset.\n\n Raises:\n RuntimeError: If not inside of tf.function and not executing eagerly.\n \"\"\"\n if context.executing_eagerly() or ops.inside_function():\n with ops.colocate_with(self._variant_tensor):\n return iterator_ops.OwnedIterator(self)\n else:\n raise RuntimeError(\"__iter__() is only supported inside of tf.function \"\n \"or when eager execution is enabled.\")\n\n def __bool__(self):\n return True # Required as __len__ is defined\n\n __nonzero__ = __bool__ # Python 2 backward compatibility\n\n def __len__(self):\n \"\"\"Returns the length of the dataset if it is known and finite.\n\n This method requires that you are running in eager mode, and that the\n length of the dataset is known and non-infinite. When the length may be\n unknown or infinite, or if you are running in graph mode, use\n `tf.data.Dataset.cardinality` instead.\n\n Returns:\n An integer representing the length of the dataset.\n\n Raises:\n RuntimeError: If the dataset length is unknown or infinite, or if eager\n execution is not enabled.\n \"\"\"\n if not context.executing_eagerly():\n raise TypeError(\"__len__() is not supported while tracing functions. \"\n \"Use `tf.data.Dataset.cardinality` instead.\")\n length = self.cardinality()\n if length.numpy() == INFINITE:\n raise TypeError(\"dataset length is infinite.\")\n if length.numpy() == UNKNOWN:\n raise TypeError(\"dataset length is unknown.\")\n return length\n\n @abc.abstractproperty\n def element_spec(self):\n \"\"\"The type specification of an element of this dataset.\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> dataset.element_spec\n TensorSpec(shape=(), dtype=tf.int32, name=None)\n\n For more information,\n read [this guide](https://www.tensorflow.org/guide/data#dataset_structure).\n\n Returns:\n A (nested) structure of `tf.TypeSpec` objects matching the structure of an\n element of this dataset and specifying the type of individual components.\n \"\"\"\n raise NotImplementedError(\"Dataset.element_spec\")\n\n def __repr__(self):\n output_shapes = nest.map_structure(str, get_legacy_output_shapes(self))\n output_shapes = str(output_shapes).replace(\"'\", \"\")\n output_types = nest.map_structure(repr, get_legacy_output_types(self))\n output_types = str(output_types).replace(\"'\", \"\")\n return (\"<%s shapes: %s, types: %s>\" % (type(self).__name__, output_shapes,\n output_types))\n\n def as_numpy_iterator(self):\n \"\"\"Returns an iterator which converts all elements of the dataset to numpy.\n\n Use `as_numpy_iterator` to inspect the content of your dataset. To see\n element shapes and types, print dataset elements directly instead of using\n `as_numpy_iterator`.\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> for element in dataset:\n ... print(element)\n tf.Tensor(1, shape=(), dtype=int32)\n tf.Tensor(2, shape=(), dtype=int32)\n tf.Tensor(3, shape=(), dtype=int32)\n\n This method requires that you are running in eager mode and the dataset's\n element_spec contains only `TensorSpec` components.\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> for element in dataset.as_numpy_iterator():\n ... print(element)\n 1\n 2\n 3\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> print(list(dataset.as_numpy_iterator()))\n [1, 2, 3]\n\n `as_numpy_iterator()` will preserve the nested structure of dataset\n elements.\n\n >>> dataset = tf.data.Dataset.from_tensor_slices({'a': ([1, 2], [3, 4]),\n ... 'b': [5, 6]})\n >>> list(dataset.as_numpy_iterator()) == [{'a': (1, 3), 'b': 5},\n ... {'a': (2, 4), 'b': 6}]\n True\n\n Returns:\n An iterable over the elements of the dataset, with their tensors converted\n to numpy arrays.\n\n Raises:\n TypeError: if an element contains a non-`Tensor` value.\n RuntimeError: if eager execution is not enabled.\n \"\"\"\n if not context.executing_eagerly():\n raise RuntimeError(\"as_numpy_iterator() is not supported while tracing \"\n \"functions\")\n for component_spec in nest.flatten(self.element_spec):\n if not isinstance(\n component_spec,\n (tensor_spec.TensorSpec, ragged_tensor.RaggedTensorSpec)):\n raise TypeError(\n \"Dataset.as_numpy_iterator() does not support datasets containing \"\n + str(component_spec.value_type))\n\n return _NumpyIterator(self)\n\n @property\n def _flat_shapes(self):\n \"\"\"Returns a list `tf.TensorShapes`s for the element tensor representation.\n\n Returns:\n A list `tf.TensorShapes`s for the element tensor representation.\n \"\"\"\n return structure.get_flat_tensor_shapes(self.element_spec)\n\n @property\n def _flat_types(self):\n \"\"\"Returns a list `tf.DType`s for the element tensor representation.\n\n Returns:\n A list `tf.DType`s for the element tensor representation.\n \"\"\"\n return structure.get_flat_tensor_types(self.element_spec)\n\n @property\n def _flat_structure(self):\n \"\"\"Helper for setting `output_shapes` and `output_types` attrs of an op.\n\n Most dataset op constructors expect `output_shapes` and `output_types`\n arguments that represent the flattened structure of an element. This helper\n function generates these attrs as a keyword argument dictionary, allowing\n `Dataset._variant_tensor` implementations to pass `**self._flat_structure`\n to the op constructor.\n\n Returns:\n A dictionary of keyword arguments that can be passed to a dataset op\n constructor.\n \"\"\"\n return {\n \"output_shapes\": self._flat_shapes,\n \"output_types\": self._flat_types,\n }\n\n @property\n def _type_spec(self):\n return DatasetSpec(self.element_spec)\n\n @staticmethod\n def from_tensors(tensors):\n \"\"\"Creates a `Dataset` with a single element, comprising the given tensors.\n\n `from_tensors` produces a dataset containing only a single element. To slice\n the input tensor into multiple elements, use `from_tensor_slices` instead.\n\n >>> dataset = tf.data.Dataset.from_tensors([1, 2, 3])\n >>> list(dataset.as_numpy_iterator())\n [array([1, 2, 3], dtype=int32)]\n >>> dataset = tf.data.Dataset.from_tensors(([1, 2, 3], 'A'))\n >>> list(dataset.as_numpy_iterator())\n [(array([1, 2, 3], dtype=int32), b'A')]\n\n >>> # You can use `from_tensors` to produce a dataset which repeats\n >>> # the same example many times.\n >>> example = tf.constant([1,2,3])\n >>> dataset = tf.data.Dataset.from_tensors(example).repeat(2)\n >>> list(dataset.as_numpy_iterator())\n [array([1, 2, 3], dtype=int32), array([1, 2, 3], dtype=int32)]\n\n Note that if `tensors` contains a NumPy array, and eager execution is not\n enabled, the values will be embedded in the graph as one or more\n `tf.constant` operations. For large datasets (> 1 GB), this can waste\n memory and run into byte limits of graph serialization. If `tensors`\n contains one or more large NumPy arrays, consider the alternative described\n in [this\n guide](https://tensorflow.org/guide/data#consuming_numpy_arrays).\n\n Args:\n tensors: A dataset \"element\". Supported values are documented\n [here](https://www.tensorflow.org/guide/data#dataset_structure).\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return TensorDataset(tensors)\n\n @staticmethod\n def from_tensor_slices(tensors):\n \"\"\"Creates a `Dataset` whose elements are slices of the given tensors.\n\n The given tensors are sliced along their first dimension. This operation\n preserves the structure of the input tensors, removing the first dimension\n of each tensor and using it as the dataset dimension. All input tensors\n must have the same size in their first dimensions.\n\n >>> # Slicing a 1D tensor produces scalar tensor elements.\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> list(dataset.as_numpy_iterator())\n [1, 2, 3]\n\n >>> # Slicing a 2D tensor produces 1D tensor elements.\n >>> dataset = tf.data.Dataset.from_tensor_slices([[1, 2], [3, 4]])\n >>> list(dataset.as_numpy_iterator())\n [array([1, 2], dtype=int32), array([3, 4], dtype=int32)]\n\n >>> # Slicing a tuple of 1D tensors produces tuple elements containing\n >>> # scalar tensors.\n >>> dataset = tf.data.Dataset.from_tensor_slices(([1, 2], [3, 4], [5, 6]))\n >>> list(dataset.as_numpy_iterator())\n [(1, 3, 5), (2, 4, 6)]\n\n >>> # Dictionary structure is also preserved.\n >>> dataset = tf.data.Dataset.from_tensor_slices({\"a\": [1, 2], \"b\": [3, 4]})\n >>> list(dataset.as_numpy_iterator()) == [{'a': 1, 'b': 3},\n ... {'a': 2, 'b': 4}]\n True\n\n >>> # Two tensors can be combined into one Dataset object.\n >>> features = tf.constant([[1, 3], [2, 1], [3, 3]]) # ==> 3x2 tensor\n >>> labels = tf.constant(['A', 'B', 'A']) # ==> 3x1 tensor\n >>> dataset = Dataset.from_tensor_slices((features, labels))\n >>> # Both the features and the labels tensors can be converted\n >>> # to a Dataset object separately and combined after.\n >>> features_dataset = Dataset.from_tensor_slices(features)\n >>> labels_dataset = Dataset.from_tensor_slices(labels)\n >>> dataset = Dataset.zip((features_dataset, labels_dataset))\n >>> # A batched feature and label set can be converted to a Dataset\n >>> # in similar fashion.\n >>> batched_features = tf.constant([[[1, 3], [2, 3]],\n ... [[2, 1], [1, 2]],\n ... [[3, 3], [3, 2]]], shape=(3, 2, 2))\n >>> batched_labels = tf.constant([['A', 'A'],\n ... ['B', 'B'],\n ... ['A', 'B']], shape=(3, 2, 1))\n >>> dataset = Dataset.from_tensor_slices((batched_features, batched_labels))\n >>> for element in dataset.as_numpy_iterator():\n ... print(element)\n (array([[1, 3],\n [2, 3]], dtype=int32), array([[b'A'],\n [b'A']], dtype=object))\n (array([[2, 1],\n [1, 2]], dtype=int32), array([[b'B'],\n [b'B']], dtype=object))\n (array([[3, 3],\n [3, 2]], dtype=int32), array([[b'A'],\n [b'B']], dtype=object))\n\n Note that if `tensors` contains a NumPy array, and eager execution is not\n enabled, the values will be embedded in the graph as one or more\n `tf.constant` operations. For large datasets (> 1 GB), this can waste\n memory and run into byte limits of graph serialization. If `tensors`\n contains one or more large NumPy arrays, consider the alternative described\n in [this guide](\n https://tensorflow.org/guide/data#consuming_numpy_arrays).\n\n Args:\n tensors: A dataset element, whose components have the same first\n dimension. Supported values are documented\n [here](https://www.tensorflow.org/guide/data#dataset_structure).\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return TensorSliceDataset(tensors)\n\n class _GeneratorState(object):\n \"\"\"Stores outstanding iterators created from a Python generator.\n\n This class keeps track of potentially multiple iterators that may have\n been created from a generator, e.g. in the case that the dataset is\n repeated, or nested within a parallel computation.\n \"\"\"\n\n def __init__(self, generator):\n self._generator = generator\n self._lock = threading.Lock()\n self._next_id = 0 # GUARDED_BY(self._lock)\n self._args = {}\n self._iterators = {}\n\n def get_next_id(self, *args):\n with self._lock:\n ret = self._next_id\n self._next_id += 1\n self._args[ret] = args\n # NOTE(mrry): Explicitly create an array of `np.int64` because implicit\n # casting in `py_func()` will create an array of `np.int32` on Windows,\n # leading to a runtime error.\n return np.array(ret, dtype=np.int64)\n\n def get_iterator(self, iterator_id):\n try:\n return self._iterators[iterator_id]\n except KeyError:\n iterator = iter(self._generator(*self._args.pop(iterator_id)))\n self._iterators[iterator_id] = iterator\n return iterator\n\n def iterator_completed(self, iterator_id):\n del self._iterators[iterator_id]\n\n @staticmethod\n @deprecation.deprecated_args(None, \"Use output_signature instead\",\n \"output_types\", \"output_shapes\")\n def from_generator(generator,\n output_types=None,\n output_shapes=None,\n args=None,\n output_signature=None):\n \"\"\"Creates a `Dataset` whose elements are generated by `generator`.\n\n The `generator` argument must be a callable object that returns\n an object that supports the `iter()` protocol (e.g. a generator function).\n\n The elements generated by `generator` must be compatible with either the\n given `output_signature` argument or with the given `output_types` and\n (optionally) `output_shapes` arguments, whichever was specified.\n\n The recommended way to call `from_generator` is to use the\n `output_signature` argument. In this case the output will be assumed to\n consist of objects with the classes, shapes and types defined by\n `tf.TypeSpec` objects from `output_signature` argument:\n\n >>> def gen():\n ... ragged_tensor = tf.ragged.constant([[1, 2], [3]])\n ... yield 42, ragged_tensor\n >>>\n >>> dataset = tf.data.Dataset.from_generator(\n ... gen,\n ... output_signature=(\n ... tf.TensorSpec(shape=(), dtype=tf.int32),\n ... tf.RaggedTensorSpec(shape=(2, None), dtype=tf.int32)))\n >>>\n >>> list(dataset.take(1))\n [(<tf.Tensor: shape=(), dtype=int32, numpy=42>,\n <tf.RaggedTensor [[1, 2], [3]]>)]\n\n There is also a deprecated way to call `from_generator` by either with\n `output_types` argument alone or together with `output_shapes` argument.\n In this case the output of the function will be assumed to consist of\n `tf.Tensor` objects with the types defined by `output_types` and with the\n shapes which are either unknown or defined by `output_shapes`.\n\n Note: The current implementation of `Dataset.from_generator()` uses\n `tf.numpy_function` and inherits the same constraints. In particular, it\n requires the dataset and iterator related operations to be placed\n on a device in the same process as the Python program that called\n `Dataset.from_generator()`. The body of `generator` will not be\n serialized in a `GraphDef`, and you should not use this method if you\n need to serialize your model and restore it in a different environment.\n\n Note: If `generator` depends on mutable global variables or other external\n state, be aware that the runtime may invoke `generator` multiple times\n (in order to support repeating the `Dataset`) and at any time\n between the call to `Dataset.from_generator()` and the production of the\n first element from the generator. Mutating global variables or external\n state can cause undefined behavior, and we recommend that you explicitly\n cache any external state in `generator` before calling\n `Dataset.from_generator()`.\n\n Args:\n generator: A callable object that returns an object that supports the\n `iter()` protocol. If `args` is not specified, `generator` must take no\n arguments; otherwise it must take as many arguments as there are values\n in `args`.\n output_types: (Optional.) A (nested) structure of `tf.DType` objects\n corresponding to each component of an element yielded by `generator`.\n output_shapes: (Optional.) A (nested) structure of `tf.TensorShape`\n objects corresponding to each component of an element yielded by\n `generator`.\n args: (Optional.) A tuple of `tf.Tensor` objects that will be evaluated\n and passed to `generator` as NumPy-array arguments.\n output_signature: (Optional.) A (nested) structure of `tf.TypeSpec`\n objects corresponding to each component of an element yielded by\n `generator`.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n if not callable(generator):\n raise TypeError(\"`generator` must be callable.\")\n\n if output_signature is not None:\n if output_types is not None:\n raise TypeError(\"`output_types` can not be used together with \"\n \"`output_signature`\")\n if output_shapes is not None:\n raise TypeError(\"`output_shapes` can not be used together with \"\n \"`output_signature`\")\n if not all(\n isinstance(_, type_spec.TypeSpec)\n for _ in nest.flatten(output_signature)):\n raise TypeError(\"All the elements of `output_signature` must be \"\n \"`tf.TypeSpec` objects.\")\n else:\n if output_types is None:\n raise TypeError(\"Either `output_signature` or `output_types` must \"\n \"be specified\")\n\n if output_signature is None:\n if output_shapes is None:\n output_shapes = nest.map_structure(\n lambda _: tensor_shape.TensorShape(None), output_types)\n else:\n output_shapes = nest.map_structure_up_to(output_types,\n tensor_shape.as_shape,\n output_shapes)\n output_signature = nest.map_structure_up_to(output_types,\n tensor_spec.TensorSpec,\n output_shapes, output_types)\n if all(\n isinstance(x, tensor_spec.TensorSpec)\n for x in nest.flatten(output_signature)):\n output_types = nest.pack_sequence_as(\n output_signature, [x.dtype for x in nest.flatten(output_signature)])\n output_shapes = nest.pack_sequence_as(\n output_signature, [x.shape for x in nest.flatten(output_signature)])\n\n if args is None:\n args = ()\n else:\n args = tuple(ops.convert_n_to_tensor(args, name=\"args\"))\n\n generator_state = DatasetV2._GeneratorState(generator)\n\n def get_iterator_id_fn(unused_dummy):\n \"\"\"Creates a unique `iterator_id` for each pass over the dataset.\n\n The returned `iterator_id` disambiguates between multiple concurrently\n existing iterators.\n\n Args:\n unused_dummy: Ignored value.\n\n Returns:\n A `tf.int64` tensor whose value uniquely identifies an iterator in\n `generator_state`.\n \"\"\"\n return script_ops.numpy_function(generator_state.get_next_id, args,\n dtypes.int64)\n\n def generator_next_fn(iterator_id_t):\n \"\"\"Generates the next element from iterator with ID `iterator_id_t`.\n\n We map this function across an infinite repetition of the\n `iterator_id_t`, and raise `StopIteration` to terminate the iteration.\n\n Args:\n iterator_id_t: A `tf.int64` tensor whose value uniquely identifies the\n iterator in `generator_state` from which to generate an element.\n\n Returns:\n The next element to generate from the iterator.\n \"\"\"\n if output_types and output_shapes:\n flattened_types = [\n dtypes.as_dtype(dt) for dt in nest.flatten(output_types)\n ]\n flattened_shapes = nest.flatten(output_shapes)\n\n def generator_py_func(iterator_id):\n \"\"\"A `py_func` that will be called to invoke the iterator.\"\"\"\n # `next()` raises `StopIteration` when there are no more\n # elements remaining to be generated.\n values = next(generator_state.get_iterator(iterator_id))\n\n # Use the same _convert function from the py_func() implementation to\n # convert the returned values to arrays early, so that we can inspect\n # their values.\n try:\n flattened_values = nest.flatten_up_to(output_types, values)\n except (TypeError, ValueError):\n six.reraise(\n TypeError,\n TypeError(\n \"`generator` yielded an element that did not match the \"\n \"expected structure. The expected structure was %s, but \"\n \"the yielded element was %s.\" % (output_types, values)),\n sys.exc_info()[2])\n ret_arrays = []\n for ret, dtype in zip(flattened_values, flattened_types):\n try:\n ret_arrays.append(\n script_ops.FuncRegistry._convert( # pylint: disable=protected-access\n ret,\n dtype=dtype.as_numpy_dtype))\n except (TypeError, ValueError):\n six.reraise(\n TypeError,\n TypeError(\n \"`generator` yielded an element that could not be \"\n \"converted to the expected type. The expected type was \"\n \"%s, but the yielded element was %s.\" %\n (dtype.name, ret)),\n sys.exc_info()[2])\n\n # Additional type and shape checking to ensure that the components of\n # the generated element match the `output_types` and `output_shapes`\n # arguments.\n for (ret_array, expected_dtype,\n expected_shape) in zip(ret_arrays, flattened_types,\n flattened_shapes):\n if ret_array.dtype != expected_dtype.as_numpy_dtype:\n raise TypeError(\n \"`generator` yielded an element of type %s where an element \"\n \"of type %s was expected.\" %\n (ret_array.dtype, expected_dtype.as_numpy_dtype))\n if not expected_shape.is_compatible_with(ret_array.shape):\n raise ValueError(\n \"`generator` yielded an element of shape %s where an element \"\n \"of shape %s was expected.\" %\n (ret_array.shape, expected_shape))\n\n return ret_arrays\n\n flat_values = script_ops.numpy_function(generator_py_func,\n [iterator_id_t],\n flattened_types)\n\n # The `py_func()` op drops the inferred shapes, so we add them back in\n # here.\n if output_shapes is not None:\n for ret_t, shape in zip(flat_values, flattened_shapes):\n ret_t.set_shape(shape)\n\n return nest.pack_sequence_as(output_types, flat_values)\n else:\n flat_output_types = structure.get_flat_tensor_types(output_signature)\n\n def generator_py_func(iterator_id):\n \"\"\"A `py_func` that will be called to invoke the iterator.\"\"\"\n # `next()` raises `StopIteration` when there are no more\n # elements remaining to be generated.\n values = next(generator_state.get_iterator(iterator_id.numpy()))\n\n try:\n values = structure.normalize_element(values, output_signature)\n except (TypeError, ValueError):\n six.reraise(\n TypeError,\n TypeError(\n \"`generator` yielded an element that did not match the \"\n \"expected structure. The expected structure was %s, but \"\n \"the yielded element was %s.\" % (output_signature, values)),\n sys.exc_info()[2])\n\n values_spec = structure.type_spec_from_value(values)\n\n if not structure.are_compatible(values_spec, output_signature):\n raise TypeError(\n \"`generator` yielded an element of %s where an element \"\n \"of %s was expected.\" % (values_spec, output_signature))\n\n return structure.to_tensor_list(output_signature, values)\n\n return script_ops._eager_py_func( # pylint: disable=protected-access\n generator_py_func,\n inp=[iterator_id_t],\n Tout=flat_output_types,\n use_tape_cache=False)\n\n def finalize_fn(iterator_id_t):\n \"\"\"Releases host-side state for the iterator with ID `iterator_id_t`.\"\"\"\n\n def finalize_py_func(iterator_id):\n generator_state.iterator_completed(iterator_id)\n # We return a dummy value so that the `finalize_fn` has a valid\n # signature.\n # NOTE(mrry): Explicitly create an array of `np.int64` because implicit\n # casting in `py_func()` will create an array of `np.int32` on Windows,\n # leading to a runtime error.\n return np.array(0, dtype=np.int64)\n\n return script_ops.numpy_function(finalize_py_func, [iterator_id_t],\n dtypes.int64)\n\n # This function associates each traversal of `generator` with a unique\n # iterator ID.\n def flat_map_fn(dummy_arg):\n # The `get_iterator_id_fn` gets a unique ID for the current instance of\n # of the generator.\n # The `generator_next_fn` gets the next element from the iterator with the\n # given ID, and raises StopIteration when that iterator contains no\n # more elements.\n return _GeneratorDataset(dummy_arg, get_iterator_id_fn, generator_next_fn,\n finalize_fn, output_signature)\n\n # A single-element dataset that, each time it is evaluated, contains a\n # freshly-generated and unique (for the returned dataset) int64\n # ID that will be used to identify the appropriate Python state, which\n # is encapsulated in `generator_state`, and captured in\n # `get_iterator_id_map_fn`.\n dummy = 0\n id_dataset = Dataset.from_tensors(dummy)\n\n # A dataset that contains all of the elements generated by a\n # single iterator created from `generator`, identified by the\n # iterator ID contained in `id_dataset`. Lifting the iteration\n # into a flat_map here enables multiple repetitions and/or nested\n # versions of the returned dataset to be created, because it forces\n # the generation of a new ID for each version.\n return id_dataset.flat_map(flat_map_fn)\n\n @staticmethod\n def range(*args, **kwargs):\n \"\"\"Creates a `Dataset` of a step-separated range of values.\n\n >>> list(Dataset.range(5).as_numpy_iterator())\n [0, 1, 2, 3, 4]\n >>> list(Dataset.range(2, 5).as_numpy_iterator())\n [2, 3, 4]\n >>> list(Dataset.range(1, 5, 2).as_numpy_iterator())\n [1, 3]\n >>> list(Dataset.range(1, 5, -2).as_numpy_iterator())\n []\n >>> list(Dataset.range(5, 1).as_numpy_iterator())\n []\n >>> list(Dataset.range(5, 1, -2).as_numpy_iterator())\n [5, 3]\n >>> list(Dataset.range(2, 5, output_type=tf.int32).as_numpy_iterator())\n [2, 3, 4]\n >>> list(Dataset.range(1, 5, 2, output_type=tf.float32).as_numpy_iterator())\n [1.0, 3.0]\n\n Args:\n *args: follows the same semantics as python's xrange.\n len(args) == 1 -> start = 0, stop = args[0], step = 1.\n len(args) == 2 -> start = args[0], stop = args[1], step = 1.\n len(args) == 3 -> start = args[0], stop = args[1], step = args[2].\n **kwargs:\n - output_type: Its expected dtype. (Optional, default: `tf.int64`).\n\n Returns:\n Dataset: A `RangeDataset`.\n\n Raises:\n ValueError: if len(args) == 0.\n \"\"\"\n return RangeDataset(*args, **kwargs)\n\n @staticmethod\n def zip(datasets):\n \"\"\"Creates a `Dataset` by zipping together the given datasets.\n\n This method has similar semantics to the built-in `zip()` function\n in Python, with the main difference being that the `datasets`\n argument can be a (nested) structure of `Dataset` objects. The supported\n nesting mechanisms are documented\n [here] (https://www.tensorflow.org/guide/data#dataset_structure).\n\n >>> # The nested structure of the `datasets` argument determines the\n >>> # structure of elements in the resulting dataset.\n >>> a = tf.data.Dataset.range(1, 4) # ==> [ 1, 2, 3 ]\n >>> b = tf.data.Dataset.range(4, 7) # ==> [ 4, 5, 6 ]\n >>> ds = tf.data.Dataset.zip((a, b))\n >>> list(ds.as_numpy_iterator())\n [(1, 4), (2, 5), (3, 6)]\n >>> ds = tf.data.Dataset.zip((b, a))\n >>> list(ds.as_numpy_iterator())\n [(4, 1), (5, 2), (6, 3)]\n >>>\n >>> # The `datasets` argument may contain an arbitrary number of datasets.\n >>> c = tf.data.Dataset.range(7, 13).batch(2) # ==> [ [7, 8],\n ... # [9, 10],\n ... # [11, 12] ]\n >>> ds = tf.data.Dataset.zip((a, b, c))\n >>> for element in ds.as_numpy_iterator():\n ... print(element)\n (1, 4, array([7, 8]))\n (2, 5, array([ 9, 10]))\n (3, 6, array([11, 12]))\n >>>\n >>> # The number of elements in the resulting dataset is the same as\n >>> # the size of the smallest dataset in `datasets`.\n >>> d = tf.data.Dataset.range(13, 15) # ==> [ 13, 14 ]\n >>> ds = tf.data.Dataset.zip((a, d))\n >>> list(ds.as_numpy_iterator())\n [(1, 13), (2, 14)]\n\n Args:\n datasets: A (nested) structure of datasets.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return ZipDataset(datasets)\n\n def concatenate(self, dataset):\n \"\"\"Creates a `Dataset` by concatenating the given dataset with this dataset.\n\n >>> a = tf.data.Dataset.range(1, 4) # ==> [ 1, 2, 3 ]\n >>> b = tf.data.Dataset.range(4, 8) # ==> [ 4, 5, 6, 7 ]\n >>> ds = a.concatenate(b)\n >>> list(ds.as_numpy_iterator())\n [1, 2, 3, 4, 5, 6, 7]\n >>> # The input dataset and dataset to be concatenated should have\n >>> # compatible element specs.\n >>> c = tf.data.Dataset.zip((a, b))\n >>> a.concatenate(c)\n Traceback (most recent call last):\n TypeError: Two datasets to concatenate have different types\n <dtype: 'int64'> and (tf.int64, tf.int64)\n >>> d = tf.data.Dataset.from_tensor_slices([\"a\", \"b\", \"c\"])\n >>> a.concatenate(d)\n Traceback (most recent call last):\n TypeError: Two datasets to concatenate have different types\n <dtype: 'int64'> and <dtype: 'string'>\n\n Args:\n dataset: `Dataset` to be concatenated.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return ConcatenateDataset(self, dataset)\n\n def prefetch(self, buffer_size):\n \"\"\"Creates a `Dataset` that prefetches elements from this dataset.\n\n Most dataset input pipelines should end with a call to `prefetch`. This\n allows later elements to be prepared while the current element is being\n processed. This often improves latency and throughput, at the cost of\n using additional memory to store prefetched elements.\n\n Note: Like other `Dataset` methods, prefetch operates on the\n elements of the input dataset. It has no concept of examples vs. batches.\n `examples.prefetch(2)` will prefetch two elements (2 examples),\n while `examples.batch(20).prefetch(2)` will prefetch 2 elements\n (2 batches, of 20 examples each).\n\n >>> dataset = tf.data.Dataset.range(3)\n >>> dataset = dataset.prefetch(2)\n >>> list(dataset.as_numpy_iterator())\n [0, 1, 2]\n\n Args:\n buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the maximum\n number of elements that will be buffered when prefetching. If the value\n `tf.data.AUTOTUNE` is used, then the buffer size is dynamically tuned.\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return PrefetchDataset(self, buffer_size)\n\n @staticmethod\n def list_files(file_pattern, shuffle=None, seed=None):\n \"\"\"A dataset of all files matching one or more glob patterns.\n\n The `file_pattern` argument should be a small number of glob patterns.\n If your filenames have already been globbed, use\n `Dataset.from_tensor_slices(filenames)` instead, as re-globbing every\n filename with `list_files` may result in poor performance with remote\n storage systems.\n\n Note: The default behavior of this method is to return filenames in\n a non-deterministic random shuffled order. Pass a `seed` or `shuffle=False`\n to get results in a deterministic order.\n\n Example:\n If we had the following files on our filesystem:\n\n - /path/to/dir/a.txt\n - /path/to/dir/b.py\n - /path/to/dir/c.py\n\n If we pass \"/path/to/dir/*.py\" as the directory, the dataset\n would produce:\n\n - /path/to/dir/b.py\n - /path/to/dir/c.py\n\n Args:\n file_pattern: A string, a list of strings, or a `tf.Tensor` of string type\n (scalar or vector), representing the filename glob (i.e. shell wildcard)\n pattern(s) that will be matched.\n shuffle: (Optional.) If `True`, the file names will be shuffled randomly.\n Defaults to `True`.\n seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random\n seed that will be used to create the distribution. See\n `tf.random.set_seed` for behavior.\n\n Returns:\n Dataset: A `Dataset` of strings corresponding to file names.\n \"\"\"\n with ops.name_scope(\"list_files\"):\n if shuffle is None:\n shuffle = True\n file_pattern = ops.convert_to_tensor(\n file_pattern, dtype=dtypes.string, name=\"file_pattern\")\n matching_files = gen_io_ops.matching_files(file_pattern)\n\n # Raise an exception if `file_pattern` does not match any files.\n condition = math_ops.greater(array_ops.shape(matching_files)[0], 0,\n name=\"match_not_empty\")\n\n message = math_ops.add(\n \"No files matched pattern: \",\n string_ops.reduce_join(file_pattern, separator=\", \"), name=\"message\")\n\n assert_not_empty = control_flow_ops.Assert(\n condition, [message], summarize=1, name=\"assert_not_empty\")\n with ops.control_dependencies([assert_not_empty]):\n matching_files = array_ops.identity(matching_files)\n\n dataset = Dataset.from_tensor_slices(matching_files)\n if shuffle:\n # NOTE(mrry): The shuffle buffer size must be greater than zero, but the\n # list of files might be empty.\n buffer_size = math_ops.maximum(\n array_ops.shape(matching_files, out_type=dtypes.int64)[0], 1)\n dataset = dataset.shuffle(buffer_size, seed=seed)\n return dataset\n\n def repeat(self, count=None):\n \"\"\"Repeats this dataset so each original value is seen `count` times.\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> dataset = dataset.repeat(3)\n >>> list(dataset.as_numpy_iterator())\n [1, 2, 3, 1, 2, 3, 1, 2, 3]\n\n Note: If this dataset is a function of global state (e.g. a random number\n generator), then different repetitions may produce different elements.\n\n Args:\n count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the\n number of times the dataset should be repeated. The default behavior (if\n `count` is `None` or `-1`) is for the dataset be repeated indefinitely.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return RepeatDataset(self, count)\n\n def enumerate(self, start=0):\n \"\"\"Enumerates the elements of this dataset.\n\n It is similar to python's `enumerate`.\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> dataset = dataset.enumerate(start=5)\n >>> for element in dataset.as_numpy_iterator():\n ... print(element)\n (5, 1)\n (6, 2)\n (7, 3)\n\n >>> # The (nested) structure of the input dataset determines the\n >>> # structure of elements in the resulting dataset.\n >>> dataset = tf.data.Dataset.from_tensor_slices([(7, 8), (9, 10)])\n >>> dataset = dataset.enumerate()\n >>> for element in dataset.as_numpy_iterator():\n ... print(element)\n (0, array([7, 8], dtype=int32))\n (1, array([ 9, 10], dtype=int32))\n\n Args:\n start: A `tf.int64` scalar `tf.Tensor`, representing the start value for\n enumeration.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n\n max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max\n return Dataset.zip((Dataset.range(start, max_value), self))\n\n def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):\n \"\"\"Randomly shuffles the elements of this dataset.\n\n This dataset fills a buffer with `buffer_size` elements, then randomly\n samples elements from this buffer, replacing the selected elements with new\n elements. For perfect shuffling, a buffer size greater than or equal to the\n full size of the dataset is required.\n\n For instance, if your dataset contains 10,000 elements but `buffer_size` is\n set to 1,000, then `shuffle` will initially select a random element from\n only the first 1,000 elements in the buffer. Once an element is selected,\n its space in the buffer is replaced by the next (i.e. 1,001-st) element,\n maintaining the 1,000 element buffer.\n\n `reshuffle_each_iteration` controls whether the shuffle order should be\n different for each epoch. In TF 1.X, the idiomatic way to create epochs\n was through the `repeat` transformation:\n\n ```python\n dataset = tf.data.Dataset.range(3)\n dataset = dataset.shuffle(3, reshuffle_each_iteration=True)\n dataset = dataset.repeat(2)\n # [1, 0, 2, 1, 2, 0]\n\n dataset = tf.data.Dataset.range(3)\n dataset = dataset.shuffle(3, reshuffle_each_iteration=False)\n dataset = dataset.repeat(2)\n # [1, 0, 2, 1, 0, 2]\n ```\n\n In TF 2.0, `tf.data.Dataset` objects are Python iterables which makes it\n possible to also create epochs through Python iteration:\n\n ```python\n dataset = tf.data.Dataset.range(3)\n dataset = dataset.shuffle(3, reshuffle_each_iteration=True)\n list(dataset.as_numpy_iterator())\n # [1, 0, 2]\n list(dataset.as_numpy_iterator())\n # [1, 2, 0]\n ```\n\n ```python\n dataset = tf.data.Dataset.range(3)\n dataset = dataset.shuffle(3, reshuffle_each_iteration=False)\n list(dataset.as_numpy_iterator())\n # [1, 0, 2]\n list(dataset.as_numpy_iterator())\n # [1, 0, 2]\n ```\n\n Args:\n buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of\n elements from this dataset from which the new dataset will sample.\n seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random\n seed that will be used to create the distribution. See\n `tf.random.set_seed` for behavior.\n reshuffle_each_iteration: (Optional.) A boolean, which if true indicates\n that the dataset should be pseudorandomly reshuffled each time it is\n iterated over. (Defaults to `True`.)\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return ShuffleDataset(self, buffer_size, seed, reshuffle_each_iteration)\n\n def cache(self, filename=\"\"):\n \"\"\"Caches the elements in this dataset.\n\n The first time the dataset is iterated over, its elements will be cached\n either in the specified file or in memory. Subsequent iterations will\n use the cached data.\n\n Note: For the cache to be finalized, the input dataset must be iterated\n through in its entirety. Otherwise, subsequent iterations will not use\n cached data.\n\n >>> dataset = tf.data.Dataset.range(5)\n >>> dataset = dataset.map(lambda x: x**2)\n >>> dataset = dataset.cache()\n >>> # The first time reading through the data will generate the data using\n >>> # `range` and `map`.\n >>> list(dataset.as_numpy_iterator())\n [0, 1, 4, 9, 16]\n >>> # Subsequent iterations read from the cache.\n >>> list(dataset.as_numpy_iterator())\n [0, 1, 4, 9, 16]\n\n When caching to a file, the cached data will persist across runs. Even the\n first iteration through the data will read from the cache file. Changing\n the input pipeline before the call to `.cache()` will have no effect until\n the cache file is removed or the filename is changed.\n\n ```python\n dataset = tf.data.Dataset.range(5)\n dataset = dataset.cache(\"/path/to/file\")\n list(dataset.as_numpy_iterator())\n # [0, 1, 2, 3, 4]\n dataset = tf.data.Dataset.range(10)\n dataset = dataset.cache(\"/path/to/file\") # Same file!\n list(dataset.as_numpy_iterator())\n # [0, 1, 2, 3, 4]\n ```\n\n Note: `cache` will produce exactly the same elements during each iteration\n through the dataset. If you wish to randomize the iteration order, make sure\n to call `shuffle` *after* calling `cache`.\n\n Args:\n filename: A `tf.string` scalar `tf.Tensor`, representing the name of a\n directory on the filesystem to use for caching elements in this Dataset.\n If a filename is not provided, the dataset will be cached in memory.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return CacheDataset(self, filename)\n\n def take(self, count):\n \"\"\"Creates a `Dataset` with at most `count` elements from this dataset.\n\n >>> dataset = tf.data.Dataset.range(10)\n >>> dataset = dataset.take(3)\n >>> list(dataset.as_numpy_iterator())\n [0, 1, 2]\n\n Args:\n count: A `tf.int64` scalar `tf.Tensor`, representing the number of\n elements of this dataset that should be taken to form the new dataset.\n If `count` is -1, or if `count` is greater than the size of this\n dataset, the new dataset will contain all elements of this dataset.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return TakeDataset(self, count)\n\n def skip(self, count):\n \"\"\"Creates a `Dataset` that skips `count` elements from this dataset.\n\n >>> dataset = tf.data.Dataset.range(10)\n >>> dataset = dataset.skip(7)\n >>> list(dataset.as_numpy_iterator())\n [7, 8, 9]\n\n Args:\n count: A `tf.int64` scalar `tf.Tensor`, representing the number of\n elements of this dataset that should be skipped to form the new dataset.\n If `count` is greater than the size of this dataset, the new dataset\n will contain no elements. If `count` is -1, skips the entire dataset.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return SkipDataset(self, count)\n\n def shard(self, num_shards, index):\n \"\"\"Creates a `Dataset` that includes only 1/`num_shards` of this dataset.\n\n `shard` is deterministic. The Dataset produced by `A.shard(n, i)` will\n contain all elements of A whose index mod n = i.\n\n >>> A = tf.data.Dataset.range(10)\n >>> B = A.shard(num_shards=3, index=0)\n >>> list(B.as_numpy_iterator())\n [0, 3, 6, 9]\n >>> C = A.shard(num_shards=3, index=1)\n >>> list(C.as_numpy_iterator())\n [1, 4, 7]\n >>> D = A.shard(num_shards=3, index=2)\n >>> list(D.as_numpy_iterator())\n [2, 5, 8]\n\n This dataset operator is very useful when running distributed training, as\n it allows each worker to read a unique subset.\n\n When reading a single input file, you can shard elements as follows:\n\n ```python\n d = tf.data.TFRecordDataset(input_file)\n d = d.shard(num_workers, worker_index)\n d = d.repeat(num_epochs)\n d = d.shuffle(shuffle_buffer_size)\n d = d.map(parser_fn, num_parallel_calls=num_map_threads)\n ```\n\n Important caveats:\n\n - Be sure to shard before you use any randomizing operator (such as\n shuffle).\n - Generally it is best if the shard operator is used early in the dataset\n pipeline. For example, when reading from a set of TFRecord files, shard\n before converting the dataset to input samples. This avoids reading every\n file on every worker. The following is an example of an efficient\n sharding strategy within a complete pipeline:\n\n ```python\n d = Dataset.list_files(pattern)\n d = d.shard(num_workers, worker_index)\n d = d.repeat(num_epochs)\n d = d.shuffle(shuffle_buffer_size)\n d = d.interleave(tf.data.TFRecordDataset,\n cycle_length=num_readers, block_length=1)\n d = d.map(parser_fn, num_parallel_calls=num_map_threads)\n ```\n\n Args:\n num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of\n shards operating in parallel.\n index: A `tf.int64` scalar `tf.Tensor`, representing the worker index.\n\n Returns:\n Dataset: A `Dataset`.\n\n Raises:\n InvalidArgumentError: if `num_shards` or `index` are illegal values.\n\n Note: error checking is done on a best-effort basis, and errors aren't\n guaranteed to be caught upon dataset creation. (e.g. providing in a\n placeholder tensor bypasses the early checking, and will instead result\n in an error during a session.run call.)\n \"\"\"\n return ShardDataset(self, num_shards, index)\n\n def batch(self,\n batch_size,\n drop_remainder=False,\n num_parallel_calls=None,\n deterministic=None):\n \"\"\"Combines consecutive elements of this dataset into batches.\n\n >>> dataset = tf.data.Dataset.range(8)\n >>> dataset = dataset.batch(3)\n >>> list(dataset.as_numpy_iterator())\n [array([0, 1, 2]), array([3, 4, 5]), array([6, 7])]\n\n >>> dataset = tf.data.Dataset.range(8)\n >>> dataset = dataset.batch(3, drop_remainder=True)\n >>> list(dataset.as_numpy_iterator())\n [array([0, 1, 2]), array([3, 4, 5])]\n\n The components of the resulting element will have an additional outer\n dimension, which will be `batch_size` (or `N % batch_size` for the last\n element if `batch_size` does not divide the number of input elements `N`\n evenly and `drop_remainder` is `False`). If your program depends on the\n batches having the same outer dimension, you should set the `drop_remainder`\n argument to `True` to prevent the smaller batch from being produced.\n\n Args:\n batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of\n consecutive elements of this dataset to combine in a single batch.\n drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing\n whether the last batch should be dropped in the case it has fewer than\n `batch_size` elements; the default behavior is not to drop the smaller\n batch.\n num_parallel_calls: (Optional.) A `tf.int64` scalar `tf.Tensor`,\n representing the number of batches to compute asynchronously in\n parallel.\n If not specified, batches will be computed sequentially. If the value\n `tf.data.AUTOTUNE` is used, then the number of parallel\n calls is set dynamically based on available resources.\n deterministic: (Optional.) When `num_parallel_calls` is specified, if this\n boolean is specified (`True` or `False`), it controls the order in which\n the transformation produces elements. If set to `False`, the\n transformation is allowed to yield elements out of order to trade\n determinism for performance. If not specified, the\n `tf.data.Options.experimental_deterministic` option\n (`True` by default) controls the behavior.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n if num_parallel_calls is None:\n if deterministic is not None:\n warnings.warn(\"The `deterministic` argument has no effect unless the \"\n \"`num_parallel_calls` argument is specified.\")\n return BatchDataset(self, batch_size, drop_remainder)\n else:\n return ParallelBatchDataset(self, batch_size, drop_remainder,\n num_parallel_calls, deterministic)\n\n def padded_batch(self,\n batch_size,\n padded_shapes=None,\n padding_values=None,\n drop_remainder=False):\n \"\"\"Combines consecutive elements of this dataset into padded batches.\n\n This transformation combines multiple consecutive elements of the input\n dataset into a single element.\n\n Like `tf.data.Dataset.batch`, the components of the resulting element will\n have an additional outer dimension, which will be `batch_size` (or\n `N % batch_size` for the last element if `batch_size` does not divide the\n number of input elements `N` evenly and `drop_remainder` is `False`). If\n your program depends on the batches having the same outer dimension, you\n should set the `drop_remainder` argument to `True` to prevent the smaller\n batch from being produced.\n\n Unlike `tf.data.Dataset.batch`, the input elements to be batched may have\n different shapes, and this transformation will pad each component to the\n respective shape in `padded_shapes`. The `padded_shapes` argument\n determines the resulting shape for each dimension of each component in an\n output element:\n\n * If the dimension is a constant, the component will be padded out to that\n length in that dimension.\n * If the dimension is unknown, the component will be padded out to the\n maximum length of all elements in that dimension.\n\n >>> A = (tf.data.Dataset\n ... .range(1, 5, output_type=tf.int32)\n ... .map(lambda x: tf.fill([x], x)))\n >>> # Pad to the smallest per-batch size that fits all elements.\n >>> B = A.padded_batch(2)\n >>> for element in B.as_numpy_iterator():\n ... print(element)\n [[1 0]\n [2 2]]\n [[3 3 3 0]\n [4 4 4 4]]\n >>> # Pad to a fixed size.\n >>> C = A.padded_batch(2, padded_shapes=5)\n >>> for element in C.as_numpy_iterator():\n ... print(element)\n [[1 0 0 0 0]\n [2 2 0 0 0]]\n [[3 3 3 0 0]\n [4 4 4 4 0]]\n >>> # Pad with a custom value.\n >>> D = A.padded_batch(2, padded_shapes=5, padding_values=-1)\n >>> for element in D.as_numpy_iterator():\n ... print(element)\n [[ 1 -1 -1 -1 -1]\n [ 2 2 -1 -1 -1]]\n [[ 3 3 3 -1 -1]\n [ 4 4 4 4 -1]]\n >>> # Components of nested elements can be padded independently.\n >>> elements = [([1, 2, 3], [10]),\n ... ([4, 5], [11, 12])]\n >>> dataset = tf.data.Dataset.from_generator(\n ... lambda: iter(elements), (tf.int32, tf.int32))\n >>> # Pad the first component of the tuple to length 4, and the second\n >>> # component to the smallest size that fits.\n >>> dataset = dataset.padded_batch(2,\n ... padded_shapes=([4], [None]),\n ... padding_values=(-1, 100))\n >>> list(dataset.as_numpy_iterator())\n [(array([[ 1, 2, 3, -1], [ 4, 5, -1, -1]], dtype=int32),\n array([[ 10, 100], [ 11, 12]], dtype=int32))]\n >>> # Pad with a single value and multiple components.\n >>> E = tf.data.Dataset.zip((A, A)).padded_batch(2, padding_values=-1)\n >>> for element in E.as_numpy_iterator():\n ... print(element)\n (array([[ 1, -1],\n [ 2, 2]], dtype=int32), array([[ 1, -1],\n [ 2, 2]], dtype=int32))\n (array([[ 3, 3, 3, -1],\n [ 4, 4, 4, 4]], dtype=int32), array([[ 3, 3, 3, -1],\n [ 4, 4, 4, 4]], dtype=int32))\n\n See also `tf.data.experimental.dense_to_sparse_batch`, which combines\n elements that may have different shapes into a `tf.sparse.SparseTensor`.\n\n Args:\n batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of\n consecutive elements of this dataset to combine in a single batch.\n padded_shapes: (Optional.) A (nested) structure of `tf.TensorShape` or\n `tf.int64` vector tensor-like objects representing the shape to which\n the respective component of each input element should be padded prior\n to batching. Any unknown dimensions will be padded to the maximum size\n of that dimension in each batch. If unset, all dimensions of all\n components are padded to the maximum size in the batch. `padded_shapes`\n must be set if any component has an unknown rank.\n padding_values: (Optional.) A (nested) structure of scalar-shaped\n `tf.Tensor`, representing the padding values to use for the respective\n components. None represents that the (nested) structure should be padded\n with default values. Defaults are `0` for numeric types and the empty\n string for string types. The `padding_values` should have the same\n (nested) structure as the input dataset. If `padding_values` is a single\n element and the input dataset has multiple components, then the same\n `padding_values` will be used to pad every component of the dataset.\n If `padding_values` is a scalar, then its value will be broadcasted\n to match the shape of each component.\n drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing\n whether the last batch should be dropped in the case it has fewer than\n `batch_size` elements; the default behavior is not to drop the smaller\n batch.\n\n Returns:\n Dataset: A `Dataset`.\n\n Raises:\n ValueError: If a component has an unknown rank, and the `padded_shapes`\n argument is not set.\n \"\"\"\n if padded_shapes is None:\n padded_shapes = get_legacy_output_shapes(self)\n # A `tf.TensorShape` is only false if its *rank* is unknown:\n # bool(tf.TensorShape(None)) is False\n if not all(nest.flatten(padded_shapes)):\n raise ValueError(\"You must set the `padded_shapes` argument to \"\n \"`Dataset.padded_batch` if any component of its \"\n \"input has an unknown rank\")\n return PaddedBatchDataset(self, batch_size, padded_shapes, padding_values,\n drop_remainder)\n\n def map(self, map_func, num_parallel_calls=None, deterministic=None):\n \"\"\"Maps `map_func` across the elements of this dataset.\n\n This transformation applies `map_func` to each element of this dataset, and\n returns a new dataset containing the transformed elements, in the same\n order as they appeared in the input. `map_func` can be used to change both\n the values and the structure of a dataset's elements. Supported structure\n constructs are documented\n [here](https://www.tensorflow.org/guide/data#dataset_structure).\n\n For example, `map` can be used for adding 1 to each element, or projecting a\n subset of element components.\n\n >>> dataset = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]\n >>> dataset = dataset.map(lambda x: x + 1)\n >>> list(dataset.as_numpy_iterator())\n [2, 3, 4, 5, 6]\n\n The input signature of `map_func` is determined by the structure of each\n element in this dataset.\n\n >>> dataset = Dataset.range(5)\n >>> # `map_func` takes a single argument of type `tf.Tensor` with the same\n >>> # shape and dtype.\n >>> result = dataset.map(lambda x: x + 1)\n\n >>> # Each element is a tuple containing two `tf.Tensor` objects.\n >>> elements = [(1, \"foo\"), (2, \"bar\"), (3, \"baz\")]\n >>> dataset = tf.data.Dataset.from_generator(\n ... lambda: elements, (tf.int32, tf.string))\n >>> # `map_func` takes two arguments of type `tf.Tensor`. This function\n >>> # projects out just the first component.\n >>> result = dataset.map(lambda x_int, y_str: x_int)\n >>> list(result.as_numpy_iterator())\n [1, 2, 3]\n\n >>> # Each element is a dictionary mapping strings to `tf.Tensor` objects.\n >>> elements = ([{\"a\": 1, \"b\": \"foo\"},\n ... {\"a\": 2, \"b\": \"bar\"},\n ... {\"a\": 3, \"b\": \"baz\"}])\n >>> dataset = tf.data.Dataset.from_generator(\n ... lambda: elements, {\"a\": tf.int32, \"b\": tf.string})\n >>> # `map_func` takes a single argument of type `dict` with the same keys\n >>> # as the elements.\n >>> result = dataset.map(lambda d: str(d[\"a\"]) + d[\"b\"])\n\n The value or values returned by `map_func` determine the structure of each\n element in the returned dataset.\n\n >>> dataset = tf.data.Dataset.range(3)\n >>> # `map_func` returns two `tf.Tensor` objects.\n >>> def g(x):\n ... return tf.constant(37.0), tf.constant([\"Foo\", \"Bar\", \"Baz\"])\n >>> result = dataset.map(g)\n >>> result.element_spec\n (TensorSpec(shape=(), dtype=tf.float32, name=None), TensorSpec(shape=(3,), \\\ndtype=tf.string, name=None))\n >>> # Python primitives, lists, and NumPy arrays are implicitly converted to\n >>> # `tf.Tensor`.\n >>> def h(x):\n ... return 37.0, [\"Foo\", \"Bar\"], np.array([1.0, 2.0], dtype=np.float64)\n >>> result = dataset.map(h)\n >>> result.element_spec\n (TensorSpec(shape=(), dtype=tf.float32, name=None), TensorSpec(shape=(2,), \\\ndtype=tf.string, name=None), TensorSpec(shape=(2,), dtype=tf.float64, \\\nname=None))\n >>> # `map_func` can return nested structures.\n >>> def i(x):\n ... return (37.0, [42, 16]), \"foo\"\n >>> result = dataset.map(i)\n >>> result.element_spec\n ((TensorSpec(shape=(), dtype=tf.float32, name=None),\n TensorSpec(shape=(2,), dtype=tf.int32, name=None)),\n TensorSpec(shape=(), dtype=tf.string, name=None))\n\n `map_func` can accept as arguments and return any type of dataset element.\n\n Note that irrespective of the context in which `map_func` is defined (eager\n vs. graph), tf.data traces the function and executes it as a graph. To use\n Python code inside of the function you have a few options:\n\n 1) Rely on AutoGraph to convert Python code into an equivalent graph\n computation. The downside of this approach is that AutoGraph can convert\n some but not all Python code.\n\n 2) Use `tf.py_function`, which allows you to write arbitrary Python code but\n will generally result in worse performance than 1). For example:\n\n >>> d = tf.data.Dataset.from_tensor_slices(['hello', 'world'])\n >>> # transform a string tensor to upper case string using a Python function\n >>> def upper_case_fn(t: tf.Tensor):\n ... return t.numpy().decode('utf-8').upper()\n >>> d = d.map(lambda x: tf.py_function(func=upper_case_fn,\n ... inp=[x], Tout=tf.string))\n >>> list(d.as_numpy_iterator())\n [b'HELLO', b'WORLD']\n\n 3) Use `tf.numpy_function`, which also allows you to write arbitrary\n Python code. Note that `tf.py_function` accepts `tf.Tensor` whereas\n `tf.numpy_function` accepts numpy arrays and returns only numpy arrays.\n For example:\n\n >>> d = tf.data.Dataset.from_tensor_slices(['hello', 'world'])\n >>> def upper_case_fn(t: np.ndarray):\n ... return t.decode('utf-8').upper()\n >>> d = d.map(lambda x: tf.numpy_function(func=upper_case_fn,\n ... inp=[x], Tout=tf.string))\n >>> list(d.as_numpy_iterator())\n [b'HELLO', b'WORLD']\n\n Note that the use of `tf.numpy_function` and `tf.py_function`\n in general precludes the possibility of executing user-defined\n transformations in parallel (because of Python GIL).\n\n Performance can often be improved by setting `num_parallel_calls` so that\n `map` will use multiple threads to process elements. If deterministic order\n isn't required, it can also improve performance to set\n `deterministic=False`.\n\n >>> dataset = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]\n >>> dataset = dataset.map(lambda x: x + 1,\n ... num_parallel_calls=tf.data.AUTOTUNE,\n ... deterministic=False)\n\n The order of elements yielded by this transformation is deterministic if\n `deterministic=True`. If `map_func` contains stateful operations and\n `num_parallel_calls > 1`, the order in which that state is accessed is\n undefined, so the values of output elements may not be deterministic\n regardless of the `deterministic` flag value.\n\n Args:\n map_func: A function mapping a dataset element to another dataset element.\n num_parallel_calls: (Optional.) A `tf.int64` scalar `tf.Tensor`,\n representing the number elements to process asynchronously in parallel.\n If not specified, elements will be processed sequentially. If the value\n `tf.data.AUTOTUNE` is used, then the number of parallel\n calls is set dynamically based on available CPU.\n deterministic: (Optional.) When `num_parallel_calls` is specified, if this\n boolean is specified (`True` or `False`), it controls the order in which\n the transformation produces elements. If set to `False`, the\n transformation is allowed to yield elements out of order to trade\n determinism for performance. If not specified, the\n `tf.data.Options.experimental_deterministic` option\n (`True` by default) controls the behavior.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n if num_parallel_calls is None:\n if deterministic is not None:\n warnings.warn(\"The `deterministic` argument has no effect unless the \"\n \"`num_parallel_calls` argument is specified.\")\n return MapDataset(self, map_func, preserve_cardinality=True)\n else:\n return ParallelMapDataset(\n self,\n map_func,\n num_parallel_calls,\n deterministic,\n preserve_cardinality=True)\n\n def flat_map(self, map_func):\n \"\"\"Maps `map_func` across this dataset and flattens the result.\n\n Use `flat_map` if you want to make sure that the order of your dataset\n stays the same. For example, to flatten a dataset of batches into a\n dataset of their elements:\n\n >>> dataset = tf.data.Dataset.from_tensor_slices(\n ... [[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n >>> dataset = dataset.flat_map(lambda x: Dataset.from_tensor_slices(x))\n >>> list(dataset.as_numpy_iterator())\n [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n `tf.data.Dataset.interleave()` is a generalization of `flat_map`, since\n `flat_map` produces the same output as\n `tf.data.Dataset.interleave(cycle_length=1)`\n\n Args:\n map_func: A function mapping a dataset element to a dataset.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n return FlatMapDataset(self, map_func)\n\n def interleave(self,\n map_func,\n cycle_length=None,\n block_length=None,\n num_parallel_calls=None,\n deterministic=None):\n \"\"\"Maps `map_func` across this dataset, and interleaves the results.\n\n For example, you can use `Dataset.interleave()` to process many input files\n concurrently:\n\n >>> # Preprocess 4 files concurrently, and interleave blocks of 16 records\n >>> # from each file.\n >>> filenames = [\"/var/data/file1.txt\", \"/var/data/file2.txt\",\n ... \"/var/data/file3.txt\", \"/var/data/file4.txt\"]\n >>> dataset = tf.data.Dataset.from_tensor_slices(filenames)\n >>> def parse_fn(filename):\n ... return tf.data.Dataset.range(10)\n >>> dataset = dataset.interleave(lambda x:\n ... tf.data.TextLineDataset(x).map(parse_fn, num_parallel_calls=1),\n ... cycle_length=4, block_length=16)\n\n The `cycle_length` and `block_length` arguments control the order in which\n elements are produced. `cycle_length` controls the number of input elements\n that are processed concurrently. If you set `cycle_length` to 1, this\n transformation will handle one input element at a time, and will produce\n identical results to `tf.data.Dataset.flat_map`. In general,\n this transformation will apply `map_func` to `cycle_length` input elements,\n open iterators on the returned `Dataset` objects, and cycle through them\n producing `block_length` consecutive elements from each iterator, and\n consuming the next input element each time it reaches the end of an\n iterator.\n\n For example:\n\n >>> dataset = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]\n >>> # NOTE: New lines indicate \"block\" boundaries.\n >>> dataset = dataset.interleave(\n ... lambda x: Dataset.from_tensors(x).repeat(6),\n ... cycle_length=2, block_length=4)\n >>> list(dataset.as_numpy_iterator())\n [1, 1, 1, 1,\n 2, 2, 2, 2,\n 1, 1,\n 2, 2,\n 3, 3, 3, 3,\n 4, 4, 4, 4,\n 3, 3,\n 4, 4,\n 5, 5, 5, 5,\n 5, 5]\n\n Note: The order of elements yielded by this transformation is\n deterministic, as long as `map_func` is a pure function and\n `deterministic=True`. If `map_func` contains any stateful operations, the\n order in which that state is accessed is undefined.\n\n Performance can often be improved by setting `num_parallel_calls` so that\n `interleave` will use multiple threads to fetch elements. If determinism\n isn't required, it can also improve performance to set\n `deterministic=False`.\n\n >>> filenames = [\"/var/data/file1.txt\", \"/var/data/file2.txt\",\n ... \"/var/data/file3.txt\", \"/var/data/file4.txt\"]\n >>> dataset = tf.data.Dataset.from_tensor_slices(filenames)\n >>> dataset = dataset.interleave(lambda x: tf.data.TFRecordDataset(x),\n ... cycle_length=4, num_parallel_calls=tf.data.AUTOTUNE,\n ... deterministic=False)\n\n Args:\n map_func: A function mapping a dataset element to a dataset.\n cycle_length: (Optional.) The number of input elements that will be\n processed concurrently. If not set, the tf.data runtime decides what it\n should be based on available CPU. If `num_parallel_calls` is set to\n `tf.data.AUTOTUNE`, the `cycle_length` argument identifies\n the maximum degree of parallelism.\n block_length: (Optional.) The number of consecutive elements to produce\n from each input element before cycling to another input element. If not\n set, defaults to 1.\n num_parallel_calls: (Optional.) If specified, the implementation creates a\n threadpool, which is used to fetch inputs from cycle elements\n asynchronously and in parallel. The default behavior is to fetch inputs\n from cycle elements synchronously with no parallelism. If the value\n `tf.data.AUTOTUNE` is used, then the number of parallel\n calls is set dynamically based on available CPU.\n deterministic: (Optional.) When `num_parallel_calls` is specified, if this\n boolean is specified (`True` or `False`), it controls the order in which\n the transformation produces elements. If set to `False`, the\n transformation is allowed to yield elements out of order to trade\n determinism for performance. If not specified, the\n `tf.data.Options.experimental_deterministic` option\n (`True` by default) controls the behavior.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n if block_length is None:\n block_length = 1\n\n if cycle_length is None:\n cycle_length = AUTOTUNE\n\n if num_parallel_calls is None:\n if deterministic is not None:\n warnings.warn(\"The `deterministic` argument has no effect unless the \"\n \"`num_parallel_calls` argument is specified.\")\n return InterleaveDataset(self, map_func, cycle_length, block_length)\n else:\n return ParallelInterleaveDataset(\n self,\n map_func,\n cycle_length,\n block_length,\n num_parallel_calls,\n deterministic=deterministic)\n\n def filter(self, predicate):\n \"\"\"Filters this dataset according to `predicate`.\n\n >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n >>> dataset = dataset.filter(lambda x: x < 3)\n >>> list(dataset.as_numpy_iterator())\n [1, 2]\n >>> # `tf.math.equal(x, y)` is required for equality comparison\n >>> def filter_fn(x):\n ... return tf.math.equal(x, 1)\n >>> dataset = dataset.filter(filter_fn)\n >>> list(dataset.as_numpy_iterator())\n [1]\n\n Args:\n predicate: A function mapping a dataset element to a boolean.\n\n Returns:\n Dataset: The `Dataset` containing the elements of this dataset for which\n `predicate` is `True`.\n \"\"\"\n return FilterDataset(self, predicate)\n\n def apply(self, transformation_func):\n \"\"\"Applies a transformation function to this dataset.\n\n `apply` enables chaining of custom `Dataset` transformations, which are\n represented as functions that take one `Dataset` argument and return a\n transformed `Dataset`.\n\n >>> dataset = tf.data.Dataset.range(100)\n >>> def dataset_fn(ds):\n ... return ds.filter(lambda x: x < 5)\n >>> dataset = dataset.apply(dataset_fn)\n >>> list(dataset.as_numpy_iterator())\n [0, 1, 2, 3, 4]\n\n Args:\n transformation_func: A function that takes one `Dataset` argument and\n returns a `Dataset`.\n\n Returns:\n Dataset: The `Dataset` returned by applying `transformation_func` to this\n dataset.\n \"\"\"\n dataset = transformation_func(self)\n if not isinstance(dataset, DatasetV2):\n raise TypeError(\n \"`transformation_func` must return a Dataset. Got {}.\".format(\n dataset))\n dataset._input_datasets = [self] # pylint: disable=protected-access\n return dataset\n\n def window(self, size, shift=None, stride=1, drop_remainder=False):\n \"\"\"Combines (nests of) input elements into a dataset of (nests of) windows.\n\n A \"window\" is a finite dataset of flat elements of size `size` (or possibly\n fewer if there are not enough input elements to fill the window and\n `drop_remainder` evaluates to `False`).\n\n The `shift` argument determines the number of input elements by which the\n window moves on each iteration. If windows and elements are both numbered\n starting at 0, the first element in window `k` will be element `k * shift`\n of the input dataset. In particular, the first element of the first window\n will always be the first element of the input dataset.\n\n The `stride` argument determines the stride of the input elements, and the\n `shift` argument determines the shift of the window.\n\n For example:\n\n >>> dataset = tf.data.Dataset.range(7).window(2)\n >>> for window in dataset:\n ... print(list(window.as_numpy_iterator()))\n [0, 1]\n [2, 3]\n [4, 5]\n [6]\n >>> dataset = tf.data.Dataset.range(7).window(3, 2, 1, True)\n >>> for window in dataset:\n ... print(list(window.as_numpy_iterator()))\n [0, 1, 2]\n [2, 3, 4]\n [4, 5, 6]\n >>> dataset = tf.data.Dataset.range(7).window(3, 1, 2, True)\n >>> for window in dataset:\n ... print(list(window.as_numpy_iterator()))\n [0, 2, 4]\n [1, 3, 5]\n [2, 4, 6]\n\n Note that when the `window` transformation is applied to a dataset of\n nested elements, it produces a dataset of nested windows.\n\n >>> nested = ([1, 2, 3, 4], [5, 6, 7, 8])\n >>> dataset = tf.data.Dataset.from_tensor_slices(nested).window(2)\n >>> for window in dataset:\n ... def to_numpy(ds):\n ... return list(ds.as_numpy_iterator())\n ... print(tuple(to_numpy(component) for component in window))\n ([1, 2], [5, 6])\n ([3, 4], [7, 8])\n\n >>> dataset = tf.data.Dataset.from_tensor_slices({'a': [1, 2, 3, 4]})\n >>> dataset = dataset.window(2)\n >>> for window in dataset:\n ... def to_numpy(ds):\n ... return list(ds.as_numpy_iterator())\n ... print({'a': to_numpy(window['a'])})\n {'a': [1, 2]}\n {'a': [3, 4]}\n\n Args:\n size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements\n of the input dataset to combine into a window. Must be positive.\n shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the\n number of input elements by which the window moves in each iteration.\n Defaults to `size`. Must be positive.\n stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the\n stride of the input elements in the sliding window. Must be positive.\n The default value of 1 means \"retain every input element\".\n drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing\n whether the last windows should be dropped if their size is smaller than\n `size`.\n\n Returns:\n Dataset: A `Dataset` of (nests of) windows -- a finite datasets of flat\n elements created from the (nests of) input elements.\n\n \"\"\"\n if shift is None:\n shift = size\n return WindowDataset(self, size, shift, stride, drop_remainder)\n\n def reduce(self, initial_state, reduce_func):\n \"\"\"Reduces the input dataset to a single element.\n\n The transformation calls `reduce_func` successively on every element of\n the input dataset until the dataset is exhausted, aggregating information in\n its internal state. The `initial_state` argument is used for the initial\n state and the final state is returned as the result.\n\n >>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, _: x + 1).numpy()\n 5\n >>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, y: x + y).numpy()\n 10\n\n Args:\n initial_state: An element representing the initial state of the\n transformation.\n reduce_func: A function that maps `(old_state, input_element)` to\n `new_state`. It must take two arguments and return a new element\n The structure of `new_state` must match the structure of\n `initial_state`.\n\n Returns:\n A dataset element corresponding to the final state of the transformation.\n\n \"\"\"\n\n with ops.name_scope(\"initial_state\"):\n initial_state = structure.normalize_element(initial_state)\n state_structure = structure.type_spec_from_value(initial_state)\n\n # Iteratively rerun the reduce function until reaching a fixed point on\n # `state_structure`.\n need_to_rerun = True\n while need_to_rerun:\n\n wrapped_func = StructuredFunctionWrapper(\n reduce_func,\n \"reduce()\",\n input_structure=(state_structure, self.element_spec),\n add_to_graph=False)\n\n # Extract and validate class information from the returned values.\n output_classes = wrapped_func.output_classes\n state_classes = nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access\n state_structure)\n for new_state_class, state_class in zip(\n nest.flatten(output_classes), nest.flatten(state_classes)):\n if not issubclass(new_state_class, state_class):\n raise TypeError(\n \"The element classes for the new state must match the initial \"\n \"state. Expected %s; got %s.\" %\n (state_classes, wrapped_func.output_classes))\n\n # Extract and validate type information from the returned values.\n output_types = wrapped_func.output_types\n state_types = nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access\n state_structure)\n for new_state_type, state_type in zip(\n nest.flatten(output_types), nest.flatten(state_types)):\n if new_state_type != state_type:\n raise TypeError(\n \"The element types for the new state must match the initial \"\n \"state. Expected %s; got %s.\" %\n (state_types, wrapped_func.output_types))\n\n # Extract shape information from the returned values.\n output_shapes = wrapped_func.output_shapes\n state_shapes = nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access\n state_structure)\n flat_state_shapes = nest.flatten(state_shapes)\n flat_new_state_shapes = nest.flatten(output_shapes)\n weakened_state_shapes = [\n original.most_specific_compatible_shape(new)\n for original, new in zip(flat_state_shapes, flat_new_state_shapes)\n ]\n\n need_to_rerun = False\n for original_shape, weakened_shape in zip(flat_state_shapes,\n weakened_state_shapes):\n if original_shape.ndims is not None and (\n weakened_shape.ndims is None or\n original_shape.as_list() != weakened_shape.as_list()):\n need_to_rerun = True\n break\n\n if need_to_rerun:\n # TODO(b/110122868): Support a \"most specific compatible structure\"\n # method for combining structures, to avoid using legacy structures\n # here.\n state_structure = structure.convert_legacy_structure(\n state_types,\n nest.pack_sequence_as(state_shapes, weakened_state_shapes),\n state_classes)\n\n reduce_func = wrapped_func.function\n reduce_func.add_to_graph(ops.get_default_graph())\n\n dataset = self._apply_options()\n\n # pylint: disable=protected-access\n return structure.from_compatible_tensor_list(\n state_structure,\n gen_dataset_ops.reduce_dataset(\n dataset._variant_tensor,\n structure.to_tensor_list(state_structure, initial_state),\n reduce_func.captured_inputs,\n f=reduce_func,\n output_shapes=structure.get_flat_tensor_shapes(state_structure),\n output_types=structure.get_flat_tensor_types(state_structure)))\n\n def unbatch(self):\n \"\"\"Splits elements of a dataset into multiple elements.\n\n For example, if elements of the dataset are shaped `[B, a0, a1, ...]`,\n where `B` may vary for each input element, then for each element in the\n dataset, the unbatched dataset will contain `B` consecutive elements\n of shape `[a0, a1, ...]`.\n\n >>> elements = [ [1, 2, 3], [1, 2], [1, 2, 3, 4] ]\n >>> dataset = tf.data.Dataset.from_generator(lambda: elements, tf.int64)\n >>> dataset = dataset.unbatch()\n >>> list(dataset.as_numpy_iterator())\n [1, 2, 3, 1, 2, 1, 2, 3, 4]\n\n Note: `unbatch` requires a data copy to slice up the batched tensor into\n smaller, unbatched tensors. When optimizing performance, try to avoid\n unnecessary usage of `unbatch`.\n\n Returns:\n A `Dataset`.\n \"\"\"\n normalized_dataset = normalize_to_dense(self)\n return _UnbatchDataset(normalized_dataset)\n\n def with_options(self, options):\n \"\"\"Returns a new `tf.data.Dataset` with the given options set.\n\n The options are \"global\" in the sense they apply to the entire dataset.\n If options are set multiple times, they are merged as long as different\n options do not use different non-default values.\n\n >>> ds = tf.data.Dataset.range(5)\n >>> ds = ds.interleave(lambda x: tf.data.Dataset.range(5),\n ... cycle_length=3,\n ... num_parallel_calls=3)\n >>> options = tf.data.Options()\n >>> # This will make the interleave order non-deterministic.\n >>> options.experimental_deterministic = False\n >>> ds = ds.with_options(options)\n\n Args:\n options: A `tf.data.Options` that identifies the options the use.\n\n Returns:\n Dataset: A `Dataset` with the given options.\n\n Raises:\n ValueError: when an option is set more than once to a non-default value\n \"\"\"\n return _OptionsDataset(self, options)\n\n def cardinality(self):\n \"\"\"Returns the cardinality of the dataset, if known.\n\n `cardinality` may return `tf.data.INFINITE_CARDINALITY` if the dataset\n contains an infinite number of elements or `tf.data.UNKNOWN_CARDINALITY` if\n the analysis fails to determine the number of elements in the dataset\n (e.g. when the dataset source is a file).\n\n >>> dataset = tf.data.Dataset.range(42)\n >>> print(dataset.cardinality().numpy())\n 42\n >>> dataset = dataset.repeat()\n >>> cardinality = dataset.cardinality()\n >>> print((cardinality == tf.data.INFINITE_CARDINALITY).numpy())\n True\n >>> dataset = dataset.filter(lambda x: True)\n >>> cardinality = dataset.cardinality()\n >>> print((cardinality == tf.data.UNKNOWN_CARDINALITY).numpy())\n True\n\n Returns:\n A scalar `tf.int64` `Tensor` representing the cardinality of the dataset.\n If the cardinality is infinite or unknown, `cardinality` returns the\n named constants `tf.data.INFINITE_CARDINALITY` and\n `tf.data.UNKNOWN_CARDINALITY` respectively.\n \"\"\"\n return gen_dataset_ops.dataset_cardinality(self._variant_tensor)\n\n\n@tf_export(v1=[\"data.Dataset\"])\nclass DatasetV1(DatasetV2):\n \"\"\"Represents a potentially large set of elements.\n\n A `Dataset` can be used to represent an input pipeline as a\n collection of elements and a \"logical plan\" of transformations that act on\n those elements.\n \"\"\"\n\n def __init__(self):\n try:\n variant_tensor = self._as_variant_tensor()\n except AttributeError as e:\n if \"_as_variant_tensor\" in str(e):\n raise AttributeError(\"Please use _variant_tensor instead of \"\n \"_as_variant_tensor() to obtain the variant \"\n \"associated with a dataset\")\n raise AttributeError(\"{}: A likely cause of this error is that the super \"\n \"call for this dataset is not the last line of the \"\n \"__init__ method. The base class causes the \"\n \"_as_variant_tensor call in its constructor and \"\n \"if that uses attributes defined in the __init__ \"\n \"method, those attrs need to be defined before the \"\n \"super call.\".format(e))\n super(DatasetV1, self).__init__(variant_tensor)\n\n @abc.abstractmethod\n def _as_variant_tensor(self):\n \"\"\"Creates a scalar `tf.Tensor` of `tf.variant` representing this dataset.\n\n Returns:\n A scalar `tf.Tensor` of `tf.variant` type, which represents this dataset.\n \"\"\"\n raise NotImplementedError(\"Dataset._as_variant_tensor\")\n\n @deprecation.deprecated(\n None, \"This is a deprecated API that should only be used in TF 1 graph \"\n \"mode and legacy TF 2 graph mode available through `tf.compat.v1`. In \"\n \"all other situations -- namely, eager mode and inside `tf.function` -- \"\n \"you can consume dataset elements using `for elem in dataset: ...` or \"\n \"by explicitly creating iterator via `iterator = iter(dataset)` and \"\n \"fetching its elements via `values = next(iterator)`. Furthermore, \"\n \"this API is not available in TF 2. During the transition from TF 1 \"\n \"to TF 2 you can use `tf.compat.v1.data.make_one_shot_iterator(dataset)` \"\n \"to create a TF 1 graph mode style iterator for a dataset created \"\n \"through TF 2 APIs. Note that this should be a transient state of your \"\n \"code base as there are in general no guarantees about the \"\n \"interoperability of TF 1 and TF 2 code.\")\n def make_one_shot_iterator(self):\n \"\"\"Creates an iterator for elements of this dataset.\n\n Note: The returned iterator will be initialized automatically.\n A \"one-shot\" iterator does not currently support re-initialization. For\n that see `make_initializable_iterator`.\n\n Example:\n\n ```python\n # Building graph ...\n dataset = ...\n next_value = dataset.make_one_shot_iterator().get_next()\n\n # ... from within a session ...\n try:\n while True:\n value = sess.run(next_value)\n ...\n except tf.errors.OutOfRangeError:\n pass\n ```\n\n Returns:\n An `tf.data.Iterator` for elements of this dataset.\n \"\"\"\n return self._make_one_shot_iterator()\n\n def _make_one_shot_iterator(self): # pylint: disable=missing-docstring\n if context.executing_eagerly():\n with ops.colocate_with(self._variant_tensor):\n return iterator_ops.OwnedIterator(self)\n\n _ensure_same_dataset_graph(self)\n # Some ops (e.g. dataset ops) are marked as stateful but are stil safe to\n # to capture by value. We must allowlist these ops so that the capturing\n # logic captures the ops instead of raising an exception.\n allowlisted_stateful_ops = traverse.obtain_capture_by_value_ops(self)\n graph_level_seed, op_level_seed = core_random_seed.get_seed(None)\n\n # NOTE(mrry): We capture by value here to ensure that `_make_dataset()` is\n # a 0-argument function.\n @function.Defun(\n capture_by_value=True,\n allowlisted_stateful_ops=allowlisted_stateful_ops)\n def _make_dataset():\n \"\"\"Factory function for a dataset.\"\"\"\n # NOTE(mrry): `Defun` does not capture the graph-level seed from the\n # enclosing graph, so if a graph-level seed is present we set the local\n # graph seed based on a combination of the graph- and op-level seeds.\n if graph_level_seed is not None:\n assert op_level_seed is not None\n core_random_seed.set_random_seed(\n (graph_level_seed + 87654321 * op_level_seed) % (2 ** 63 - 1))\n\n dataset = self._apply_options()\n return dataset._variant_tensor # pylint: disable=protected-access\n\n try:\n _make_dataset.add_to_graph(ops.get_default_graph())\n except ValueError as err:\n if \"Cannot capture a stateful node\" in str(err):\n raise ValueError(\n \"Failed to create a one-shot iterator for a dataset. \"\n \"`Dataset.make_one_shot_iterator()` does not support datasets that \"\n \"capture stateful objects, such as a `Variable` or `LookupTable`. \"\n \"In these cases, use `Dataset.make_initializable_iterator()`. \"\n \"(Original error: %s)\" % err)\n else:\n six.reraise(ValueError, err)\n\n with ops.colocate_with(self._variant_tensor):\n # pylint: disable=protected-access\n return iterator_ops.Iterator(\n gen_dataset_ops.one_shot_iterator(\n dataset_factory=_make_dataset, **self._flat_structure), None,\n get_legacy_output_types(self), get_legacy_output_shapes(self),\n get_legacy_output_classes(self))\n\n @deprecation.deprecated(\n None, \"This is a deprecated API that should only be used in TF 1 graph \"\n \"mode and legacy TF 2 graph mode available through `tf.compat.v1`. \"\n \"In all other situations -- namely, eager mode and inside `tf.function` \"\n \"-- you can consume dataset elements using `for elem in dataset: ...` \"\n \"or by explicitly creating iterator via `iterator = iter(dataset)` \"\n \"and fetching its elements via `values = next(iterator)`. \"\n \"Furthermore, this API is not available in TF 2. During the transition \"\n \"from TF 1 to TF 2 you can use \"\n \"`tf.compat.v1.data.make_initializable_iterator(dataset)` to create a TF \"\n \"1 graph mode style iterator for a dataset created through TF 2 APIs. \"\n \"Note that this should be a transient state of your code base as there \"\n \"are in general no guarantees about the interoperability of TF 1 and TF \"\n \"2 code.\")\n def make_initializable_iterator(self, shared_name=None):\n \"\"\"Creates an iterator for elements of this dataset.\n\n Note: The returned iterator will be in an uninitialized state,\n and you must run the `iterator.initializer` operation before using it:\n\n ```python\n # Building graph ...\n dataset = ...\n iterator = dataset.make_initializable_iterator()\n next_value = iterator.get_next() # This is a Tensor.\n\n # ... from within a session ...\n sess.run(iterator.initializer)\n try:\n while True:\n value = sess.run(next_value)\n ...\n except tf.errors.OutOfRangeError:\n pass\n ```\n\n Args:\n shared_name: (Optional.) If non-empty, the returned iterator will be\n shared under the given name across multiple sessions that share the same\n devices (e.g. when using a remote server).\n\n Returns:\n A `tf.data.Iterator` for elements of this dataset.\n\n Raises:\n RuntimeError: If eager execution is enabled.\n \"\"\"\n return self._make_initializable_iterator(shared_name)\n\n def _make_initializable_iterator(self, shared_name=None): # pylint: disable=missing-docstring\n if context.executing_eagerly():\n raise RuntimeError(\n \"dataset.make_initializable_iterator is not supported when eager \"\n \"execution is enabled. Use `for element in dataset` instead.\")\n _ensure_same_dataset_graph(self)\n dataset = self._apply_options()\n if shared_name is None:\n shared_name = \"\"\n\n with ops.colocate_with(self._variant_tensor):\n iterator_resource = gen_dataset_ops.iterator_v2(\n container=\"\", shared_name=shared_name, **self._flat_structure)\n\n initializer = gen_dataset_ops.make_iterator(\n dataset._variant_tensor, # pylint: disable=protected-access\n iterator_resource)\n\n # pylint: disable=protected-access\n return iterator_ops.Iterator(iterator_resource, initializer,\n get_legacy_output_types(dataset),\n get_legacy_output_shapes(dataset),\n get_legacy_output_classes(dataset))\n\n @property\n @deprecation.deprecated(\n None, \"Use `tf.compat.v1.data.get_output_classes(dataset)`.\")\n def output_classes(self):\n \"\"\"Returns the class of each component of an element of this dataset.\n\n Returns:\n A (nested) structure of Python `type` objects corresponding to each\n component of an element of this dataset.\n \"\"\"\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access\n self.element_spec)\n\n @property\n @deprecation.deprecated(\n None, \"Use `tf.compat.v1.data.get_output_shapes(dataset)`.\")\n def output_shapes(self):\n \"\"\"Returns the shape of each component of an element of this dataset.\n\n Returns:\n A (nested) structure of `tf.TensorShape` objects corresponding to each\n component of an element of this dataset.\n \"\"\"\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access\n self.element_spec)\n\n @property\n @deprecation.deprecated(\n None, \"Use `tf.compat.v1.data.get_output_types(dataset)`.\")\n def output_types(self):\n \"\"\"Returns the type of each component of an element of this dataset.\n\n Returns:\n A (nested) structure of `tf.DType` objects corresponding to each component\n of an element of this dataset.\n \"\"\"\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access\n self.element_spec)\n\n @property\n def element_spec(self):\n # TODO(b/110122868): Remove this override once all `Dataset` instances\n # implement `element_structure`.\n return structure.convert_legacy_structure(\n self.output_types, self.output_shapes, self.output_classes)\n\n @staticmethod\n @functools.wraps(DatasetV2.from_tensors)\n def from_tensors(tensors):\n return DatasetV1Adapter(DatasetV2.from_tensors(tensors))\n\n @staticmethod\n @functools.wraps(DatasetV2.from_tensor_slices)\n def from_tensor_slices(tensors):\n return DatasetV1Adapter(DatasetV2.from_tensor_slices(tensors))\n\n @staticmethod\n @deprecation.deprecated(None, \"Use `tf.data.Dataset.from_tensor_slices()`.\")\n def from_sparse_tensor_slices(sparse_tensor):\n \"\"\"Splits each rank-N `tf.sparse.SparseTensor` in this dataset row-wise.\n\n Args:\n sparse_tensor: A `tf.sparse.SparseTensor`.\n\n Returns:\n Dataset: A `Dataset` of rank-(N-1) sparse tensors.\n \"\"\"\n return DatasetV1Adapter(SparseTensorSliceDataset(sparse_tensor))\n\n @staticmethod\n @functools.wraps(DatasetV2.from_generator)\n def from_generator(generator,\n output_types=None,\n output_shapes=None,\n args=None,\n output_signature=None):\n return DatasetV1Adapter(\n DatasetV2.from_generator(generator, output_types, output_shapes, args,\n output_signature))\n\n @staticmethod\n @functools.wraps(DatasetV2.range)\n def range(*args, **kwargs):\n return DatasetV1Adapter(DatasetV2.range(*args, **kwargs))\n\n @staticmethod\n @functools.wraps(DatasetV2.zip)\n def zip(datasets):\n return DatasetV1Adapter(DatasetV2.zip(datasets))\n\n @functools.wraps(DatasetV2.concatenate)\n def concatenate(self, dataset):\n return DatasetV1Adapter(super(DatasetV1, self).concatenate(dataset))\n\n @functools.wraps(DatasetV2.prefetch)\n def prefetch(self, buffer_size):\n return DatasetV1Adapter(super(DatasetV1, self).prefetch(buffer_size))\n\n @staticmethod\n @functools.wraps(DatasetV2.list_files)\n def list_files(file_pattern, shuffle=None, seed=None):\n return DatasetV1Adapter(DatasetV2.list_files(file_pattern, shuffle, seed))\n\n @functools.wraps(DatasetV2.repeat)\n def repeat(self, count=None):\n return DatasetV1Adapter(super(DatasetV1, self).repeat(count))\n\n @functools.wraps(DatasetV2.shuffle)\n def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):\n return DatasetV1Adapter(super(DatasetV1, self).shuffle(\n buffer_size, seed, reshuffle_each_iteration))\n\n @functools.wraps(DatasetV2.cache)\n def cache(self, filename=\"\"):\n return DatasetV1Adapter(super(DatasetV1, self).cache(filename))\n\n @functools.wraps(DatasetV2.take)\n def take(self, count):\n return DatasetV1Adapter(super(DatasetV1, self).take(count))\n\n @functools.wraps(DatasetV2.skip)\n def skip(self, count):\n return DatasetV1Adapter(super(DatasetV1, self).skip(count))\n\n @functools.wraps(DatasetV2.shard)\n def shard(self, num_shards, index):\n return DatasetV1Adapter(super(DatasetV1, self).shard(num_shards, index))\n\n @functools.wraps(DatasetV2.batch)\n def batch(self,\n batch_size,\n drop_remainder=False,\n num_parallel_calls=None,\n deterministic=None):\n return DatasetV1Adapter(\n super(DatasetV1, self).batch(batch_size, drop_remainder,\n num_parallel_calls, deterministic))\n\n @functools.wraps(DatasetV2.padded_batch)\n def padded_batch(self,\n batch_size,\n padded_shapes=None,\n padding_values=None,\n drop_remainder=False):\n return DatasetV1Adapter(\n super(DatasetV1, self).padded_batch(batch_size, padded_shapes,\n padding_values, drop_remainder))\n\n @functools.wraps(DatasetV2.map)\n def map(self, map_func, num_parallel_calls=None, deterministic=None):\n if num_parallel_calls is None:\n return DatasetV1Adapter(\n MapDataset(self, map_func, preserve_cardinality=False))\n else:\n return DatasetV1Adapter(\n ParallelMapDataset(\n self,\n map_func,\n num_parallel_calls,\n deterministic,\n preserve_cardinality=False))\n\n @deprecation.deprecated(None, \"Use `tf.data.Dataset.map()\")\n def map_with_legacy_function(self,\n map_func,\n num_parallel_calls=None,\n deterministic=None):\n \"\"\"Maps `map_func` across the elements of this dataset.\n\n Note: This is an escape hatch for existing uses of `map` that do not work\n with V2 functions. New uses are strongly discouraged and existing uses\n should migrate to `map` as this method will be removed in V2.\n\n Args:\n map_func: A function mapping a (nested) structure of tensors (having\n shapes and types defined by `self.output_shapes` and\n `self.output_types`) to another (nested) structure of tensors.\n num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,\n representing the number elements to process asynchronously in parallel.\n If not specified, elements will be processed sequentially. If the value\n `tf.data.AUTOTUNE` is used, then the number of parallel\n calls is set dynamically based on available CPU.\n deterministic: (Optional.) When `num_parallel_calls` is specified, this\n boolean controls the order in which the transformation produces\n elements. If set to `False`, the transformation is allowed to yield\n elements out of order to trade determinism for performance. If not\n specified, the `tf.data.Options.experimental_deterministic` option\n (`True` by default) controls the behavior.\n\n Returns:\n Dataset: A `Dataset`.\n \"\"\"\n if num_parallel_calls is None:\n if deterministic is not None:\n warnings.warn(\"The `deterministic` argument has no effect unless the \"\n \"`num_parallel_calls` argument is specified.\")\n return DatasetV1Adapter(\n MapDataset(\n self,\n map_func,\n preserve_cardinality=False,\n use_legacy_function=True))\n else:\n return DatasetV1Adapter(\n ParallelMapDataset(\n self,\n map_func,\n num_parallel_calls,\n deterministic,\n preserve_cardinality=False,\n use_legacy_function=True))\n\n @functools.wraps(DatasetV2.flat_map)\n def flat_map(self, map_func):\n return DatasetV1Adapter(super(DatasetV1, self).flat_map(map_func))\n\n @functools.wraps(DatasetV2.interleave)\n def interleave(self,\n map_func,\n cycle_length=None,\n block_length=None,\n num_parallel_calls=None,\n deterministic=None):\n return DatasetV1Adapter(\n super(DatasetV1, self).interleave(map_func, cycle_length, block_length,\n num_parallel_calls, deterministic))\n\n @functools.wraps(DatasetV2.filter)\n def filter(self, predicate):\n return DatasetV1Adapter(super(DatasetV1, self).filter(predicate))\n\n @deprecation.deprecated(None, \"Use `tf.data.Dataset.filter()\")\n def filter_with_legacy_function(self, predicate):\n \"\"\"Filters this dataset according to `predicate`.\n\n Note: This is an escape hatch for existing uses of `filter` that do not work\n with V2 functions. New uses are strongly discouraged and existing uses\n should migrate to `filter` as this method will be removed in V2.\n\n Args:\n predicate: A function mapping a (nested) structure of tensors (having\n shapes and types defined by `self.output_shapes` and\n `self.output_types`) to a scalar `tf.bool` tensor.\n\n Returns:\n Dataset: The `Dataset` containing the elements of this dataset for which\n `predicate` is `True`.\n \"\"\"\n return FilterDataset(self, predicate, use_legacy_function=True)\n\n @functools.wraps(DatasetV2.apply)\n def apply(self, transformation_func):\n return DatasetV1Adapter(super(DatasetV1, self).apply(transformation_func))\n\n @functools.wraps(DatasetV2.window)\n def window(self, size, shift=None, stride=1, drop_remainder=False):\n return DatasetV1Adapter(super(DatasetV1, self).window(\n size, shift, stride, drop_remainder))\n\n @functools.wraps(DatasetV2.unbatch)\n def unbatch(self):\n return DatasetV1Adapter(super(DatasetV1, self).unbatch())\n\n @functools.wraps(DatasetV2.with_options)\n def with_options(self, options):\n return DatasetV1Adapter(super(DatasetV1, self).with_options(options))\n\n\nif tf2.enabled():\n Dataset = DatasetV2\nelse:\n Dataset = DatasetV1\n\n\nclass DatasetV1Adapter(DatasetV1):\n \"\"\"Wraps a V2 `Dataset` object in the `tf.compat.v1.data.Dataset` API.\"\"\"\n\n def __init__(self, dataset):\n self._dataset = dataset\n super(DatasetV1Adapter, self).__init__()\n\n def _as_variant_tensor(self):\n return self._dataset._variant_tensor # pylint: disable=protected-access\n\n def _has_captured_ref(self):\n return self._dataset._has_captured_ref() # pylint: disable=protected-access\n\n def _inputs(self):\n return self._dataset._inputs() # pylint: disable=protected-access\n\n def _functions(self):\n return self._dataset._functions() # pylint: disable=protected-access\n\n def options(self):\n return self._dataset.options()\n\n @property\n def element_spec(self):\n return self._dataset.element_spec # pylint: disable=protected-access\n\n def __iter__(self):\n return iter(self._dataset)\n\n\ndef _ensure_same_dataset_graph(dataset):\n \"\"\"Walks the dataset graph to ensure all datasets come from the same graph.\"\"\"\n # pylint: disable=protected-access\n current_graph = ops.get_default_graph()\n bfs_q = Queue.Queue()\n bfs_q.put(dataset)\n visited = []\n while not bfs_q.empty():\n ds = bfs_q.get()\n visited.append(ds)\n ds_graph = ds._graph\n if current_graph != ds_graph:\n raise ValueError(\n \"The graph (\" + str(current_graph) + \") of the iterator is different \"\n \"from the graph (\" + str(ds_graph) + \") the dataset: \" +\n str(ds._variant_tensor) + \" was created in. If you are using the \"\n \"Estimator API, make sure that no part of the dataset returned by \"\n \"the `input_fn` function is defined outside the `input_fn` function. \"\n \"Please ensure that all datasets in the pipeline are created in the \"\n \"same graph as the iterator.\")\n for input_ds in ds._inputs():\n if input_ds not in visited:\n bfs_q.put(input_ds)\n\n\n@tf_export(v1=[\"data.make_one_shot_iterator\"])\ndef make_one_shot_iterator(dataset):\n \"\"\"Creates an iterator for elements of `dataset`.\n\n Note: The returned iterator will be initialized automatically.\n A \"one-shot\" iterator does not support re-initialization.\n\n Args:\n dataset: A `tf.data.Dataset`.\n\n Returns:\n A `tf.data.Iterator` for elements of `dataset`.\n \"\"\"\n try:\n # Call the defined `_make_one_shot_iterator()` if there is one, because some\n # datasets (e.g. for prefetching) override its behavior.\n return dataset._make_one_shot_iterator() # pylint: disable=protected-access\n except AttributeError:\n return DatasetV1Adapter(dataset)._make_one_shot_iterator() # pylint: disable=protected-access\n\n\n@tf_export(v1=[\"data.make_initializable_iterator\"])\ndef make_initializable_iterator(dataset, shared_name=None):\n \"\"\"Creates an iterator for elements of `dataset`.\n\n Note: The returned iterator will be in an uninitialized state,\n and you must run the `iterator.initializer` operation before using it:\n\n ```python\n dataset = ...\n iterator = tf.compat.v1.data.make_initializable_iterator(dataset)\n # ...\n sess.run(iterator.initializer)\n ```\n\n Args:\n dataset: A `tf.data.Dataset`.\n shared_name: (Optional.) If non-empty, the returned iterator will be shared\n under the given name across multiple sessions that share the same devices\n (e.g. when using a remote server).\n\n Returns:\n A `tf.data.Iterator` for elements of `dataset`.\n\n Raises:\n RuntimeError: If eager execution is enabled.\n \"\"\"\n try:\n # Call the defined `_make_initializable_iterator()` if there is one, because\n # some datasets (e.g. for prefetching) override its behavior.\n return dataset._make_initializable_iterator(shared_name) # pylint: disable=protected-access\n except AttributeError:\n return DatasetV1Adapter(dataset)._make_initializable_iterator(shared_name) # pylint: disable=protected-access\n\n\n@tf_export(\"data.experimental.get_structure\")\ndef get_structure(dataset_or_iterator):\n \"\"\"Returns the type signature for elements of the input dataset / iterator.\n\n Args:\n dataset_or_iterator: A `tf.data.Dataset` or an `tf.data.Iterator`.\n\n Returns:\n A (nested) structure of `tf.TypeSpec` objects matching the structure of an\n element of `dataset_or_iterator` and specifying the type of individual\n components.\n\n Raises:\n TypeError: If input is not a `tf.data.Dataset` or an `tf.data.Iterator`\n object.\n \"\"\"\n try:\n return dataset_or_iterator.element_spec # pylint: disable=protected-access\n except AttributeError:\n raise TypeError(\"`dataset_or_iterator` must be a `tf.data.Dataset` or \"\n \"tf.data.Iterator object, but got %s.\" %\n type(dataset_or_iterator))\n\n\n@tf_export(v1=[\"data.get_output_classes\"])\ndef get_legacy_output_classes(dataset_or_iterator):\n \"\"\"Returns the output classes for elements of the input dataset / iterator.\n\n Args:\n dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.\n\n Returns:\n A (nested) structure of Python `type` objects matching the structure of the\n dataset / iterator elements and specifying the class of the individual\n components.\n \"\"\"\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access\n get_structure(dataset_or_iterator))\n\n\n@tf_export(v1=[\"data.get_output_shapes\"])\ndef get_legacy_output_shapes(dataset_or_iterator):\n \"\"\"Returns the output shapes for elements of the input dataset / iterator.\n\n Args:\n dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.\n\n Returns:\n A (nested) structure of `tf.TensorShape` objects matching the structure of\n the dataset / iterator elements and specifying the shape of the individual\n components.\n \"\"\"\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access\n get_structure(dataset_or_iterator))\n\n\n@tf_export(v1=[\"data.get_output_types\"])\ndef get_legacy_output_types(dataset_or_iterator):\n \"\"\"Returns the output shapes for elements of the input dataset / iterator.\n\n Args:\n dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.\n\n Returns:\n A (nested) structure of `tf.DType` objects matching the structure of\n dataset / iterator elements and specifying the shape of the individual\n components.\n \"\"\"\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access\n get_structure(dataset_or_iterator))\n\n\n@tf_export(\"data.Options\")\nclass Options(options_lib.OptionsBase):\n \"\"\"Represents options for `tf.data.Dataset`.\n\n A `tf.data.Options` object can be, for instance, used to control which static\n optimizations to apply to the input pipeline graph or whether to use\n performance modeling to dynamically tune the parallelism of operations such as\n `tf.data.Dataset.map` or `tf.data.Dataset.interleave`.\n\n The options are set for the entire dataset and are carried over to datasets\n created through tf.data transformations.\n\n The options can be set by constructing an `Options` object and using the\n `tf.data.Dataset.with_options(options)` transformation, which returns a\n dataset with the options set.\n\n >>> dataset = tf.data.Dataset.range(42)\n >>> options = tf.data.Options()\n >>> options.experimental_deterministic = False\n >>> dataset = dataset.with_options(options)\n >>> print(dataset.options().experimental_deterministic)\n False\n\n Note: A known limitation of the `tf.data.Options` implementation is that the\n options are not preserved across tf.function boundaries. In particular, to\n set options for a dataset that is iterated within a tf.function, the options\n need to be set within the same tf.function.\n \"\"\"\n\n experimental_deterministic = options_lib.create_option(\n name=\"experimental_deterministic\",\n ty=bool,\n docstring=\n \"Whether the outputs need to be produced in deterministic order. If None,\"\n \" defaults to True.\")\n\n experimental_distribute = options_lib.create_option(\n name=\"experimental_distribute\",\n ty=distribute_options.DistributeOptions,\n docstring=\n \"The distribution strategy options associated with the dataset. See \"\n \"`tf.data.experimental.DistributeOptions` for more details.\",\n default_factory=distribute_options.DistributeOptions)\n\n experimental_optimization = options_lib.create_option(\n name=\"experimental_optimization\",\n ty=optimization_options.OptimizationOptions,\n docstring=\n \"The optimization options associated with the dataset. See \"\n \"`tf.data.experimental.OptimizationOptions` for more details.\",\n default_factory=optimization_options.OptimizationOptions)\n\n experimental_slack = options_lib.create_option(\n name=\"experimental_slack\",\n ty=bool,\n docstring=\"Whether to introduce 'slack' in the last `prefetch` of the \"\n \"input pipeline, if it exists. This may reduce CPU contention with \"\n \"accelerator host-side activity at the start of a step. The slack \"\n \"frequency is determined by the number of devices attached to this \"\n \"input pipeline. If None, defaults to False.\")\n\n experimental_stats = options_lib.create_option(\n name=\"experimental_stats\",\n ty=stats_options.StatsOptions,\n docstring=\n \"The statistics options associated with the dataset. See \"\n \"`tf.data.experimental.StatsOptions` for more details.\",\n default_factory=stats_options.StatsOptions)\n\n experimental_threading = options_lib.create_option(\n name=\"experimental_threading\",\n ty=threading_options.ThreadingOptions,\n docstring=\n \"The threading options associated with the dataset. See \"\n \"`tf.data.experimental.ThreadingOptions` for more details.\",\n default_factory=threading_options.ThreadingOptions)\n\n experimental_external_state_policy = options_lib.create_option(\n name=\"experimental_external_state_policy\",\n ty=distribute_options.ExternalStatePolicy,\n docstring=\"This option can be used to override the default policy for \"\n \"how to handle external state when serializing a dataset or \"\n \"checkpointing its iterator. There are three settings available - \"\n \"IGNORE: External state is ignored without a warning; WARN: External \"\n \"state is ignored and a warning is logged; FAIL: External state results \"\n \"in an error.\")\n\n def _to_proto(self):\n pb = dataset_options_pb2.Options()\n if self.experimental_deterministic is not None:\n pb.deterministic = self.experimental_deterministic\n pb.distribute_options.CopyFrom(self.experimental_distribute._to_proto()) # pylint: disable=protected-access\n if self.experimental_external_state_policy is not None:\n pb.external_state_policy = (\n distribute_options.ExternalStatePolicy._to_proto( # pylint: disable=protected-access\n self.experimental_external_state_policy))\n pb.optimization_options.CopyFrom(self.experimental_optimization._to_proto()) # pylint: disable=protected-access\n if self.experimental_slack is not None:\n pb.slack = self.experimental_slack\n pb.threading_options.CopyFrom(self.experimental_threading._to_proto()) # pylint: disable=protected-access\n return pb\n\n def _from_proto(self, pb):\n if pb.WhichOneof(\"optional_deterministic\") is not None:\n self.experimental_deterministic = pb.deterministic\n self.experimental_distribute._from_proto(pb.distribute_options) # pylint: disable=protected-access\n if pb.WhichOneof(\"optional_external_state_policy\") is not None:\n self.experimental_external_state_policy = (\n distribute_options.ExternalStatePolicy._from_proto( # pylint: disable=protected-access\n pb.external_state_policy))\n self.experimental_optimization._from_proto(pb.optimization_options) # pylint: disable=protected-access\n if pb.WhichOneof(\"optional_slack\") is not None:\n self.experimental_slack = pb.slack\n self.experimental_threading._from_proto(pb.threading_options) # pylint: disable=protected-access\n\n def _set_mutable(self, mutable):\n \"\"\"Change the mutability value to `mutable` on this options and children.\"\"\"\n # pylint: disable=protected-access\n object.__setattr__(self, \"_mutable\", mutable)\n self.experimental_distribute._set_mutable(mutable)\n self.experimental_optimization._set_mutable(mutable)\n self.experimental_threading._set_mutable(mutable)\n\n def _graph_rewrites(self):\n \"\"\"Produces lists of enabled, disabled, default static graph rewrites.\n\n Returns:\n result: a namedtuple with three attributes. `result.enabled` is the list\n of user enabled graph rewrites. `result.disabled` is the list of user\n disabled graph rewrites. `result.default` is the list of graph\n rewrites that are enabled by default (the user has not explicitly\n enabled or disabled them).\n \"\"\"\n if self.experimental_optimization is not None:\n result = self.experimental_optimization._graph_rewrites() # pylint: disable=protected-access\n else:\n # Apply default options\n result = optimization_options.OptimizationOptions()._graph_rewrites() # pylint: disable=protected-access\n\n if self.experimental_deterministic is False: # pylint: disable=g-bool-id-comparison\n result.enabled.append(\"make_sloppy\")\n elif self.experimental_deterministic is True: # pylint: disable=g-bool-id-comparison\n result.disabled.append(\"make_sloppy\")\n if self.experimental_stats:\n if self.experimental_stats.latency_all_edges is True: # pylint: disable=g-bool-id-comparison\n result.enabled.append(\"latency_all_edges\")\n elif self.experimental_stats.latency_all_edges is False: # pylint: disable=g-bool-id-comparison\n result.disabled.append(\"latency_all_edges\")\n if self.experimental_slack is True: # pylint: disable=g-bool-id-comparison\n result.enabled.append(\"slack\")\n elif self.experimental_slack is False: # pylint: disable=g-bool-id-comparison\n result.disabled.append(\"slack\")\n\n graph_rewrites = options_lib.graph_rewrites()\n return graph_rewrites(enabled=list(set(result.enabled)),\n disabled=list(set(result.disabled)),\n default=list(set(result.default)))\n\n def _graph_rewrite_configs(self, autotune):\n \"\"\"Produces the list of configurations for enabled graph optimizations.\"\"\"\n result = []\n if self.experimental_optimization:\n result.extend(\n self.experimental_optimization._graph_rewrite_configs(autotune)) # pylint: disable=protected-access\n\n if self.experimental_slack:\n num_devices = self.experimental_distribute.num_devices\n if num_devices is None:\n num_devices = 1\n result.append(\"slack:slack_period:%d\" % num_devices)\n return result\n\n def _autotune_settings(self):\n if self.experimental_optimization is not None:\n return self.experimental_optimization._autotune_settings() # pylint: disable=protected-access\n\n # Return default autotune options\n return optimization_options.OptimizationOptions()._autotune_settings() # pylint: disable=protected-access\n\n def merge(self, options):\n \"\"\"Merges itself with the given `tf.data.Options`.\n\n If this object and the `options` to merge set an option differently, a\n warning is generated and this object's value is updated with the `options`\n object's value.\n\n Args:\n options: a `tf.data.Options` to merge with\n\n Returns:\n New `tf.data.Options` object which is the result of merging self with\n the input `tf.data.Options`.\n \"\"\"\n return options_lib.merge_options(self, options)\n\n\nclass DatasetSource(DatasetV2):\n \"\"\"Abstract class representing a dataset with no inputs.\"\"\"\n\n def _inputs(self):\n return []\n\n\nclass UnaryDataset(DatasetV2):\n \"\"\"Abstract class representing a dataset with one input.\"\"\"\n\n def __init__(self, input_dataset, variant_tensor):\n self._input_dataset = input_dataset\n super(UnaryDataset, self).__init__(variant_tensor)\n\n def _inputs(self):\n return [self._input_dataset]\n\n\nclass UnaryUnchangedStructureDataset(UnaryDataset):\n \"\"\"Represents a unary dataset with the same input and output structure.\"\"\"\n\n def __init__(self, input_dataset, variant_tensor):\n self._input_dataset = input_dataset\n super(UnaryUnchangedStructureDataset, self).__init__(\n input_dataset, variant_tensor)\n\n @property\n def element_spec(self):\n return self._input_dataset.element_spec\n\n\nclass TensorDataset(DatasetSource):\n \"\"\"A `Dataset` with a single element.\"\"\"\n\n def __init__(self, element):\n \"\"\"See `Dataset.from_tensors()` for details.\"\"\"\n element = structure.normalize_element(element)\n self._structure = structure.type_spec_from_value(element)\n self._tensors = structure.to_tensor_list(self._structure, element)\n\n variant_tensor = gen_dataset_ops.tensor_dataset(\n self._tensors,\n output_shapes=structure.get_flat_tensor_shapes(self._structure))\n super(TensorDataset, self).__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass TensorSliceDataset(DatasetSource):\n \"\"\"A `Dataset` of slices from a dataset element.\"\"\"\n\n def __init__(self, element):\n \"\"\"See `Dataset.from_tensor_slices()` for details.\"\"\"\n element = structure.normalize_element(element)\n batched_spec = structure.type_spec_from_value(element)\n self._tensors = structure.to_batched_tensor_list(batched_spec, element)\n self._structure = nest.map_structure(\n lambda component_spec: component_spec._unbatch(), batched_spec) # pylint: disable=protected-access\n\n batch_dim = tensor_shape.Dimension(tensor_shape.dimension_value(\n self._tensors[0].get_shape()[0]))\n for t in self._tensors[1:]:\n batch_dim.assert_is_compatible_with(tensor_shape.Dimension(\n tensor_shape.dimension_value(t.get_shape()[0])))\n\n variant_tensor = gen_dataset_ops.tensor_slice_dataset(\n self._tensors,\n output_shapes=structure.get_flat_tensor_shapes(self._structure))\n super(TensorSliceDataset, self).__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass SparseTensorSliceDataset(DatasetSource):\n \"\"\"A `Dataset` that splits a rank-N `tf.sparse.SparseTensor` into its rows.\"\"\"\n\n def __init__(self, sparse_tensor):\n \"\"\"See `Dataset.from_sparse_tensor_slices()` for details.\"\"\"\n if not isinstance(sparse_tensor, sparse_tensor_lib.SparseTensor):\n raise TypeError(\n \"`sparse_tensor` must be a `tf.sparse.SparseTensor` object.\"\n \"Was {}.\".format(sparse_tensor))\n self._sparse_tensor = sparse_tensor\n\n indices_shape = self._sparse_tensor.indices.get_shape()\n shape_shape = self._sparse_tensor.dense_shape.get_shape()\n rank = (indices_shape.dims[1] - 1).merge_with(shape_shape.dims[0] - 1)\n self._structure = (tensor_spec.TensorSpec([None, rank], dtypes.int64),\n tensor_spec.TensorSpec([None],\n self._sparse_tensor.dtype),\n tensor_spec.TensorSpec([rank], dtypes.int64))\n\n variant_tensor = gen_dataset_ops.sparse_tensor_slice_dataset(\n self._sparse_tensor.indices, self._sparse_tensor.values,\n self._sparse_tensor.dense_shape)\n super(SparseTensorSliceDataset, self).__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass _VariantDataset(DatasetV2):\n \"\"\"A Dataset wrapper around a `tf.variant`-typed function argument.\"\"\"\n\n def __init__(self, dataset_variant, structure):\n self._structure = structure\n super(_VariantDataset, self).__init__(dataset_variant)\n\n def _inputs(self):\n return []\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass _NestedVariant(composite_tensor.CompositeTensor):\n\n def __init__(self, variant_tensor, element_spec, dataset_shape):\n self._variant_tensor = variant_tensor\n self._element_spec = element_spec\n self._dataset_shape = dataset_shape\n\n @property\n def _type_spec(self):\n return DatasetSpec(self._element_spec, self._dataset_shape)\n\n\n@tf_export(\"data.experimental.from_variant\")\ndef from_variant(variant, structure):\n \"\"\"Constructs a dataset from the given variant and (nested) structure.\n\n Args:\n variant: A scalar `tf.variant` tensor representing a dataset.\n structure: A (nested) structure of `tf.TypeSpec` objects representing the\n structure of each element in the dataset.\n\n Returns:\n A `tf.data.Dataset` instance.\n \"\"\"\n return _VariantDataset(variant, structure) # pylint: disable=protected-access\n\n\n@tf_export(\"data.experimental.to_variant\")\ndef to_variant(dataset):\n \"\"\"Returns a variant representing the given dataset.\n\n Args:\n dataset: A `tf.data.Dataset`.\n\n Returns:\n A scalar `tf.variant` tensor representing the given dataset.\n \"\"\"\n return dataset._variant_tensor # pylint: disable=protected-access\n\n\n@tf_export(\n \"data.DatasetSpec\",\n v1=[\"data.DatasetSpec\", \"data.experimental.DatasetStructure\"])\nclass DatasetSpec(type_spec.BatchableTypeSpec):\n \"\"\"Type specification for `tf.data.Dataset`.\n\n See `tf.TypeSpec` for more information about TensorFlow type specifications.\n\n >>> dataset = tf.data.Dataset.range(3)\n >>> tf.data.DatasetSpec.from_value(dataset)\n DatasetSpec(TensorSpec(shape=(), dtype=tf.int64, name=None), TensorShape([]))\n \"\"\"\n\n __slots__ = [\"_element_spec\", \"_dataset_shape\"]\n\n def __init__(self, element_spec, dataset_shape=()):\n self._element_spec = element_spec\n self._dataset_shape = tensor_shape.as_shape(dataset_shape)\n\n @property\n def value_type(self):\n return Dataset\n\n @property\n def element_spec(self):\n \"\"\"The inner element spec.\"\"\"\n return self._element_spec\n\n def _serialize(self):\n return (self._element_spec, self._dataset_shape)\n\n @property\n def _component_specs(self):\n return tensor_spec.TensorSpec(self._dataset_shape, dtypes.variant)\n\n def _to_components(self, value):\n return value._variant_tensor # pylint: disable=protected-access\n\n def _from_components(self, components):\n # pylint: disable=protected-access\n if self._dataset_shape.ndims == 0:\n return _VariantDataset(components, self._element_spec)\n else:\n return _NestedVariant(components, self._element_spec, self._dataset_shape)\n\n def _to_tensor_list(self, value):\n return [\n ops.convert_to_tensor(\n tf_nest.map_structure(lambda x: x._variant_tensor, value)) # pylint: disable=protected-access\n ]\n\n @staticmethod\n def from_value(value):\n \"\"\"Creates a `DatasetSpec` for the given `tf.data.Dataset` value.\"\"\"\n return DatasetSpec(value.element_spec) # pylint: disable=protected-access\n\n def _batch(self, batch_size):\n return DatasetSpec(\n self._element_spec,\n tensor_shape.TensorShape([batch_size]).concatenate(self._dataset_shape))\n\n def _unbatch(self):\n if self._dataset_shape.ndims == 0:\n raise ValueError(\"Unbatching a dataset is only supported for rank >= 1\")\n return DatasetSpec(self._element_spec, self._dataset_shape[1:])\n\n def _to_batched_tensor_list(self, value):\n if self._dataset_shape.ndims == 0:\n raise ValueError(\"Unbatching a dataset is only supported for rank >= 1\")\n return self._to_tensor_list(value)\n\n def _to_legacy_output_types(self):\n return self\n\n def _to_legacy_output_shapes(self):\n return self\n\n def _to_legacy_output_classes(self):\n return self\n\n\nclass StructuredFunctionWrapper(object):\n \"\"\"A function wrapper that supports structured arguments and return values.\"\"\"\n\n def __init__(self,\n func,\n transformation_name,\n dataset=None,\n input_classes=None,\n input_shapes=None,\n input_types=None,\n input_structure=None,\n add_to_graph=True,\n use_legacy_function=False,\n defun_kwargs=None):\n \"\"\"Creates a new `StructuredFunctionWrapper` for the given function.\n\n Args:\n func: A function from a (nested) structure to another (nested) structure.\n transformation_name: Human-readable name of the transformation in which\n this function is being instantiated, for error messages.\n dataset: (Optional.) A `tf.data.Dataset`. If given, the structure of this\n dataset will be assumed as the structure for `func` arguments; otherwise\n `input_classes`, `input_shapes`, and `input_types` must be defined.\n input_classes: (Optional.) A (nested) structure of `type`. If given, this\n argument defines the Python types for `func` arguments.\n input_shapes: (Optional.) A (nested) structure of `tf.TensorShape`. If\n given, this argument defines the shapes and structure for `func`\n arguments.\n input_types: (Optional.) A (nested) structure of `tf.DType`. If given,\n this argument defines the element types and structure for `func`\n arguments.\n input_structure: (Optional.) A `Structure` object. If given, this argument\n defines the element types and structure for `func` arguments.\n add_to_graph: (Optional.) If `True`, the function will be added to the\n default graph, if it exists.\n use_legacy_function: (Optional.) A boolean that determines whether the\n function be created using `tensorflow.python.eager.function.defun`\n (default behavior) or `tensorflow.python.framework.function.Defun`\n (legacy behavior).\n defun_kwargs: (Optional.) A dictionary mapping string argument names to\n values. If supplied, will be passed to `function` as keyword arguments.\n\n Raises:\n ValueError: If an invalid combination of `dataset`, `input_classes`,\n `input_shapes`, and `input_types` is passed.\n \"\"\"\n # pylint: disable=protected-access\n if input_structure is None:\n if dataset is None:\n if input_classes is None or input_shapes is None or input_types is None:\n raise ValueError(\"Either `dataset`, `input_structure` or all of \"\n \"`input_classes`, `input_shapes`, and `input_types` \"\n \"must be specified.\")\n self._input_structure = structure.convert_legacy_structure(\n input_types, input_shapes, input_classes)\n else:\n if not (input_classes is None and input_shapes is None and\n input_types is None):\n raise ValueError(\"Either `dataset`, `input_structure` or all of \"\n \"`input_classes`, `input_shapes`, and `input_types` \"\n \"must be specified.\")\n self._input_structure = dataset.element_spec\n else:\n if not (dataset is None and input_classes is None and input_shapes is None\n and input_types is None):\n raise ValueError(\"Either `dataset`, `input_structure`, or all of \"\n \"`input_classes`, `input_shapes`, and `input_types` \"\n \"must be specified.\")\n self._input_structure = input_structure\n\n self._func = func\n\n # There is no graph to add in eager mode.\n add_to_graph &= not context.executing_eagerly()\n # There are some lifetime issues when a legacy function is not added to a\n # out-living graph. It's already deprecated so de-prioritizing the fix.\n add_to_graph |= use_legacy_function\n\n if defun_kwargs is None:\n defun_kwargs = {}\n\n readable_transformation_name = transformation_name.replace(\n \".\", \"_\")[:-2] if len(transformation_name) > 2 else \"\"\n\n func_name = \"_\".join(\n [readable_transformation_name,\n function_utils.get_func_name(func)])\n # Sanitize function name to remove symbols that interfere with graph\n # construction.\n for symbol in [\"<\", \">\", \"\\\\\", \"'\", \" \"]:\n func_name = func_name.replace(symbol, \"\")\n\n ag_ctx = autograph_ctx.control_status_ctx()\n\n def _warn_if_collections(transformation_name):\n \"\"\"Prints a warning if the given graph uses common graph collections.\n\n NOTE(mrry): Currently a warning is only generated for resources. Any\n variables created will be automatically hoisted out to the outermost scope\n using `init_scope()`. Some collections (such as for control-flow contexts)\n are benign and should not generate a warning.\n\n Args:\n transformation_name: A human-readable name for the transformation.\n \"\"\"\n warnings.warn(\"Creating resources inside a function passed to %s \"\n \"is not supported. Create each resource outside the \"\n \"function, and capture it inside the function to use it.\" %\n transformation_name, stacklevel=5)\n\n def _wrapper_helper(*args):\n \"\"\"Wrapper for passing nested structures to and from tf.data functions.\"\"\"\n nested_args = structure.from_compatible_tensor_list(\n self._input_structure, args)\n if not _should_unpack_args(nested_args):\n nested_args = (nested_args,)\n\n ret = autograph.tf_convert(func, ag_ctx)(*nested_args)\n # If `func` returns a list of tensors, `nest.flatten()` and\n # `ops.convert_to_tensor()` would conspire to attempt to stack\n # those tensors into a single tensor, because the customized\n # version of `nest.flatten()` does not recurse into lists. Since\n # it is more likely that the list arose from returning the\n # result of an operation (such as `tf.numpy_function()`) that returns a\n # list of not-necessarily-stackable tensors, we treat the\n # returned value is a `tuple` instead. A user wishing to pack\n # the return value into a single tensor can use an explicit\n # `tf.stack()` before returning.\n if isinstance(ret, list):\n ret = tuple(ret)\n\n try:\n self._output_structure = structure.type_spec_from_value(ret)\n except (ValueError, TypeError):\n six.reraise(\n TypeError,\n TypeError(\"Unsupported return value from function passed to \"\n \"%s: %s.\" % (transformation_name, ret)),\n sys.exc_info()[2])\n return ret\n\n if use_legacy_function:\n func_name = func_name + \"_\" + str(ops.uid())\n\n @function.Defun(\n *structure.get_flat_tensor_types(self._input_structure),\n func_name=func_name,\n **defun_kwargs)\n def wrapper_fn(*args):\n ret = _wrapper_helper(*args)\n # _warn_if_collections(transformation_name, ops.get_default_graph(), 0)\n return structure.to_tensor_list(self._output_structure, ret)\n\n self._function = wrapper_fn\n resource_tracker = tracking.ResourceTracker()\n with tracking.resource_tracker_scope(resource_tracker):\n if add_to_graph:\n self._function.add_to_graph(ops.get_default_graph())\n else:\n # Use the private method that will execute `wrapper_fn` but delay\n # adding it to the graph in case (e.g.) we need to rerun the function.\n self._function._create_definition_if_needed()\n if resource_tracker.resources:\n _warn_if_collections(transformation_name)\n\n else:\n if def_function.functions_run_eagerly():\n warnings.warn(\n \"Even though the tf.config.experimental_run_functions_eagerly \"\n \"option is set, this option does not apply to tf.data functions. \"\n \"tf.data functions are still traced and executed as graphs.\")\n\n defun_kwargs.update({\"func_name\": func_name})\n defun_kwargs.update({\"_tf_data_function\": True})\n\n # Note: _wrapper_helper will apply autograph based on context.\n @eager_function.defun_with_attributes(\n input_signature=structure.get_flat_tensor_specs(\n self._input_structure),\n autograph=False,\n attributes=defun_kwargs)\n def wrapper_fn(*args): # pylint: disable=missing-docstring\n ret = _wrapper_helper(*args)\n ret = structure.to_tensor_list(self._output_structure, ret)\n return [ops.convert_to_tensor(t) for t in ret]\n\n resource_tracker = tracking.ResourceTracker()\n with tracking.resource_tracker_scope(resource_tracker):\n # TODO(b/141462134): Switch to using garbage collection.\n self._function = wrapper_fn.get_concrete_function()\n if add_to_graph:\n self._function.add_to_graph(ops.get_default_graph())\n\n if resource_tracker.resources:\n _warn_if_collections(transformation_name)\n\n outer_graph_seed = ops.get_default_graph().seed\n if outer_graph_seed and self._function.graph.seed == outer_graph_seed:\n if self._function.graph._seed_used:\n warnings.warn(\n \"Seed %s from outer graph might be getting used by function %s, \"\n \"if the random op has not been provided any seed. Explicitly set \"\n \"the seed in the function if this is not the intended behavior.\"\n %(outer_graph_seed, func_name), stacklevel=4)\n\n @property\n def output_structure(self):\n return self._output_structure\n\n @property\n def output_classes(self):\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access\n self._output_structure)\n\n @property\n def output_shapes(self):\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access\n self._output_structure)\n\n @property\n def output_types(self):\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access\n self._output_structure)\n\n @property\n def function(self):\n return self._function\n\n\nclass _GeneratorDataset(DatasetSource):\n \"\"\"A `Dataset` that generates elements by invoking a function.\"\"\"\n\n def __init__(self, init_args, init_func, next_func, finalize_func,\n output_signature):\n \"\"\"Constructs a `_GeneratorDataset`.\n\n Args:\n init_args: A (nested) structure representing the arguments to `init_func`.\n init_func: A TensorFlow function that will be called on `init_args` each\n time a C++ iterator over this dataset is constructed. Returns a (nested)\n structure representing the \"state\" of the dataset.\n next_func: A TensorFlow function that will be called on the result of\n `init_func` to produce each element, and that raises `OutOfRangeError`\n to terminate iteration.\n finalize_func: A TensorFlow function that will be called on the result of\n `init_func` immediately before a C++ iterator over this dataset is\n destroyed. The return value is ignored.\n output_signature: A (nested) structure of `tf.TypeSpec` objects describing\n the output of `next_func`.\n \"\"\"\n self._init_args = init_args\n\n self._init_structure = structure.type_spec_from_value(init_args)\n\n self._init_func = StructuredFunctionWrapper(\n init_func,\n self._transformation_name(),\n input_structure=self._init_structure)\n\n self._next_func = StructuredFunctionWrapper(\n next_func,\n self._transformation_name(),\n input_structure=self._init_func.output_structure)\n\n self._finalize_func = StructuredFunctionWrapper(\n finalize_func,\n self._transformation_name(),\n input_structure=self._init_func.output_structure)\n\n self._output_signature = output_signature\n\n variant_tensor = gen_dataset_ops.generator_dataset(\n structure.to_tensor_list(self._init_structure, self._init_args) +\n self._init_func.function.captured_inputs,\n self._next_func.function.captured_inputs,\n self._finalize_func.function.captured_inputs,\n init_func=self._init_func.function,\n next_func=self._next_func.function,\n finalize_func=self._finalize_func.function,\n **self._flat_structure)\n super(_GeneratorDataset, self).__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return self._output_signature\n\n def _transformation_name(self):\n return \"Dataset.from_generator()\"\n\n\nclass ZipDataset(DatasetV2):\n \"\"\"A `Dataset` that zips its inputs together.\"\"\"\n\n def __init__(self, datasets):\n \"\"\"See `Dataset.zip()` for details.\"\"\"\n for ds in nest.flatten(datasets):\n if not isinstance(ds, DatasetV2):\n if isinstance(ds, list):\n message = (\"The argument to `Dataset.zip()` must be a (nested) \"\n \"structure of `Dataset` objects. Python `list` is not \"\n \"supported, please use a `tuple` instead.\")\n else:\n message = (\"The argument to `Dataset.zip()` must be a (nested) \"\n \"structure of `Dataset` objects.\")\n raise TypeError(message)\n self._datasets = datasets\n self._structure = nest.pack_sequence_as(\n self._datasets,\n [ds.element_spec for ds in nest.flatten(self._datasets)])\n variant_tensor = gen_dataset_ops.zip_dataset(\n [ds._variant_tensor for ds in nest.flatten(self._datasets)],\n **self._flat_structure)\n super(ZipDataset, self).__init__(variant_tensor)\n\n def _inputs(self):\n return nest.flatten(self._datasets)\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass ConcatenateDataset(DatasetV2):\n \"\"\"A `Dataset` that concatenates its input with given dataset.\"\"\"\n\n def __init__(self, input_dataset, dataset_to_concatenate):\n \"\"\"See `Dataset.concatenate()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._dataset_to_concatenate = dataset_to_concatenate\n\n output_types = get_legacy_output_types(input_dataset)\n if output_types != get_legacy_output_types(dataset_to_concatenate):\n raise TypeError(\n \"Two datasets to concatenate have different types %s and %s\" %\n (output_types, get_legacy_output_types(dataset_to_concatenate)))\n\n output_classes = get_legacy_output_classes(input_dataset)\n if output_classes != get_legacy_output_classes(dataset_to_concatenate):\n raise TypeError(\n \"Two datasets to concatenate have different classes %s and %s\" %\n (output_classes, get_legacy_output_classes(dataset_to_concatenate)))\n\n input_shapes = get_legacy_output_shapes(self._input_dataset)\n output_shapes = nest.pack_sequence_as(input_shapes, [\n ts1.most_specific_compatible_shape(ts2)\n for (ts1, ts2) in zip(\n nest.flatten(input_shapes),\n nest.flatten(get_legacy_output_shapes(\n self._dataset_to_concatenate)))\n ])\n\n self._structure = structure.convert_legacy_structure(\n output_types, output_shapes, output_classes)\n\n self._input_datasets = [input_dataset, dataset_to_concatenate]\n # pylint: disable=protected-access\n variant_tensor = gen_dataset_ops.concatenate_dataset(\n input_dataset._variant_tensor, dataset_to_concatenate._variant_tensor,\n **self._flat_structure)\n # pylint: enable=protected-access\n super(ConcatenateDataset, self).__init__(variant_tensor)\n\n def _inputs(self):\n return self._input_datasets\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass RepeatDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that repeats its input several times.\"\"\"\n\n def __init__(self, input_dataset, count):\n \"\"\"See `Dataset.repeat()` for details.\"\"\"\n self._input_dataset = input_dataset\n if count is None:\n self._count = constant_op.constant(-1, dtype=dtypes.int64, name=\"count\")\n else:\n self._count = ops.convert_to_tensor(\n count, dtype=dtypes.int64, name=\"count\")\n variant_tensor = gen_dataset_ops.repeat_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n count=self._count,\n **self._flat_structure)\n super(RepeatDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass RangeDataset(DatasetSource):\n \"\"\"A `Dataset` of a step separated range of values.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"See `Dataset.range()` for details.\"\"\"\n self._parse_args(*args, **kwargs)\n self._structure = tensor_spec.TensorSpec([], self._output_type)\n variant_tensor = gen_dataset_ops.range_dataset(\n start=self._start,\n stop=self._stop,\n step=self._step,\n **self._flat_structure)\n super(RangeDataset, self).__init__(variant_tensor)\n\n def _parse_args(self, *args, **kwargs):\n \"\"\"Parse arguments according to the same rules as the `range()` builtin.\"\"\"\n if len(args) == 1:\n self._start = self._build_tensor(0, \"start\")\n self._stop = self._build_tensor(args[0], \"stop\")\n self._step = self._build_tensor(1, \"step\")\n elif len(args) == 2:\n self._start = self._build_tensor(args[0], \"start\")\n self._stop = self._build_tensor(args[1], \"stop\")\n self._step = self._build_tensor(1, \"step\")\n elif len(args) == 3:\n self._start = self._build_tensor(args[0], \"start\")\n self._stop = self._build_tensor(args[1], \"stop\")\n self._step = self._build_tensor(args[2], \"step\")\n else:\n raise ValueError(\"Invalid arguments to RangeDataset: %s\" % str(args))\n if \"output_type\" in kwargs:\n self._output_type = kwargs[\"output_type\"]\n else:\n self._output_type = dtypes.int64\n\n def _build_tensor(self, int64_value, name):\n return ops.convert_to_tensor(int64_value, dtype=dtypes.int64, name=name)\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass CacheDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that caches elements of its input.\"\"\"\n\n def __init__(self, input_dataset, filename):\n \"\"\"See `Dataset.cache()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._filename = ops.convert_to_tensor(\n filename, dtype=dtypes.string, name=\"filename\")\n if tf2.enabled() and (context.executing_eagerly() or ops.inside_function()):\n variant_tensor = gen_dataset_ops.cache_dataset_v2(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n filename=self._filename,\n cache=gen_dataset_ops.dummy_memory_cache(),\n **self._flat_structure)\n else:\n variant_tensor = gen_dataset_ops.cache_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n filename=self._filename,\n **self._flat_structure)\n super(CacheDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass ShuffleDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that randomly shuffles the elements of its input.\"\"\"\n\n def __init__(self,\n input_dataset,\n buffer_size,\n seed=None,\n reshuffle_each_iteration=None):\n \"\"\"Randomly shuffles the elements of this dataset.\n\n Args:\n input_dataset: The input dataset.\n buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of\n elements from this dataset from which the new dataset will sample.\n seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random\n seed that will be used to create the distribution. See\n `tf.random.set_seed` for behavior.\n reshuffle_each_iteration: (Optional.) A boolean, which if true indicates\n that the dataset should be pseudorandomly reshuffled each time it is\n iterated over. (Defaults to `True`.)\n\n Returns:\n A `Dataset`.\n\n Raises:\n ValueError: if invalid arguments are provided.\n \"\"\"\n self._input_dataset = input_dataset\n self._buffer_size = ops.convert_to_tensor(\n buffer_size, dtype=dtypes.int64, name=\"buffer_size\")\n self._seed, self._seed2 = random_seed.get_seed(seed)\n if reshuffle_each_iteration is None:\n reshuffle_each_iteration = True\n self._reshuffle_each_iteration = reshuffle_each_iteration\n\n if (tf2.enabled() and\n (context.executing_eagerly() or ops.inside_function())):\n variant_tensor = gen_dataset_ops.shuffle_dataset_v3(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n buffer_size=self._buffer_size,\n seed=self._seed,\n seed2=self._seed2,\n seed_generator=gen_dataset_ops.dummy_seed_generator(),\n reshuffle_each_iteration=self._reshuffle_each_iteration,\n **self._flat_structure)\n else:\n variant_tensor = gen_dataset_ops.shuffle_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n buffer_size=self._buffer_size,\n seed=self._seed,\n seed2=self._seed2,\n reshuffle_each_iteration=self._reshuffle_each_iteration,\n **self._flat_structure)\n super(ShuffleDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass TakeDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` containing the first `count` elements from its input.\"\"\"\n\n def __init__(self, input_dataset, count):\n \"\"\"See `Dataset.take()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name=\"count\")\n variant_tensor = gen_dataset_ops.take_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n count=self._count,\n **self._flat_structure)\n super(TakeDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass SkipDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` skipping the first `count` elements from its input.\"\"\"\n\n def __init__(self, input_dataset, count):\n \"\"\"See `Dataset.skip()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name=\"count\")\n variant_tensor = gen_dataset_ops.skip_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n count=self._count,\n **self._flat_structure)\n super(SkipDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass ShardDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` for sharding its input.\"\"\"\n\n def __init__(self, input_dataset, num_shards, index):\n \"\"\"See `Dataset.shard()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._num_shards = ops.convert_to_tensor(\n num_shards, dtype=dtypes.int64, name=\"num_shards\")\n self._index = ops.convert_to_tensor(index, dtype=dtypes.int64, name=\"index\")\n variant_tensor = gen_dataset_ops.shard_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n num_shards=self._num_shards,\n index=self._index,\n **self._flat_structure)\n super(ShardDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass BatchDataset(UnaryDataset):\n \"\"\"A `Dataset` that batches contiguous elements from its input.\"\"\"\n\n def __init__(self, input_dataset, batch_size, drop_remainder):\n \"\"\"See `Dataset.batch()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._batch_size = ops.convert_to_tensor(\n batch_size, dtype=dtypes.int64, name=\"batch_size\")\n self._drop_remainder = ops.convert_to_tensor(\n drop_remainder, dtype=dtypes.bool, name=\"drop_remainder\")\n\n constant_drop_remainder = tensor_util.constant_value(self._drop_remainder)\n # pylint: disable=protected-access\n if constant_drop_remainder:\n # NOTE(mrry): `constant_drop_remainder` may be `None` (unknown statically)\n # or `False` (explicitly retaining the remainder).\n # pylint: disable=g-long-lambda\n constant_batch_size = tensor_util.constant_value(self._batch_size)\n self._structure = nest.map_structure(\n lambda component_spec: component_spec._batch(constant_batch_size),\n input_dataset.element_spec)\n else:\n self._structure = nest.map_structure(\n lambda component_spec: component_spec._batch(None),\n input_dataset.element_spec)\n variant_tensor = gen_dataset_ops.batch_dataset_v2(\n input_dataset._variant_tensor,\n batch_size=self._batch_size,\n drop_remainder=self._drop_remainder,\n **self._flat_structure)\n super(BatchDataset, self).__init__(input_dataset, variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass ParallelBatchDataset(UnaryDataset):\n \"\"\"A `Dataset` that batches contiguous elements from its input in parallel.\"\"\"\n\n def __init__(self, input_dataset, batch_size, drop_remainder,\n num_parallel_calls, deterministic):\n \"\"\"See `Dataset.batch()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._batch_size = ops.convert_to_tensor(\n batch_size, dtype=dtypes.int64, name=\"batch_size\")\n self._drop_remainder = ops.convert_to_tensor(\n drop_remainder, dtype=dtypes.bool, name=\"drop_remainder\")\n self._num_parallel_calls = ops.convert_to_tensor(\n num_parallel_calls, dtype=dtypes.int64, name=\"num_parallel_calls\")\n if deterministic is None:\n self._deterministic = \"default\"\n elif deterministic:\n self._deterministic = \"true\"\n else:\n self._deterministic = \"false\"\n\n constant_drop_remainder = tensor_util.constant_value(self._drop_remainder)\n # pylint: disable=protected-access\n if constant_drop_remainder:\n # NOTE(mrry): `constant_drop_remainder` may be `None` (unknown statically)\n # or `False` (explicitly retaining the remainder).\n # pylint: disable=g-long-lambda\n constant_batch_size = tensor_util.constant_value(self._batch_size)\n self._structure = nest.map_structure(\n lambda component_spec: component_spec._batch(constant_batch_size),\n input_dataset.element_spec)\n else:\n self._structure = nest.map_structure(\n lambda component_spec: component_spec._batch(None),\n input_dataset.element_spec)\n\n if tf_compat.forward_compatible(2021, 3,\n 18) or self._deterministic != \"default\":\n variant_tensor = gen_dataset_ops.parallel_batch_dataset(\n input_dataset._variant_tensor,\n batch_size=self._batch_size,\n num_parallel_calls=self._num_parallel_calls,\n drop_remainder=self._drop_remainder,\n deterministic=self._deterministic,\n **self._flat_structure)\n else:\n variant_tensor = gen_dataset_ops.parallel_batch_dataset(\n input_dataset._variant_tensor,\n batch_size=self._batch_size,\n num_parallel_calls=self._num_parallel_calls,\n drop_remainder=self._drop_remainder,\n **self._flat_structure)\n super(ParallelBatchDataset, self).__init__(input_dataset, variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass _NumpyIterator(object):\n \"\"\"Iterator over a dataset with elements converted to numpy.\"\"\"\n\n __slots__ = [\"_iterator\"]\n\n def __init__(self, dataset):\n self._iterator = iter(dataset)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n\n def to_numpy(x):\n numpy = x._numpy() # pylint: disable=protected-access\n if isinstance(numpy, np.ndarray):\n # `numpy` shares the same underlying buffer as the `x` Tensor.\n # Tensors are expected to be immutable, so we disable writes.\n numpy.setflags(write=False)\n return numpy\n\n return nest.map_structure(to_numpy, next(self._iterator))\n\n def next(self):\n return self.__next__()\n\n\nclass _VariantTracker(tracking.CapturableResource):\n \"\"\"Allows export of functions capturing a Dataset in SavedModels.\n\n When saving a SavedModel, `tf.saved_model.save` traverses the object\n graph. Since Datasets reference _VariantTracker objects, that traversal will\n find a _VariantTracker for each Dataset and so know how to save and restore\n functions which reference the Dataset's variant Tensor.\n \"\"\"\n\n def __init__(self, variant_tensor, resource_creator):\n \"\"\"Record that `variant_tensor` is associated with `resource_creator`.\n\n Args:\n variant_tensor: The variant-dtype Tensor associated with the Dataset. This\n Tensor will be a captured input to functions which use the Dataset, and\n is used by saving code to identify the corresponding _VariantTracker.\n resource_creator: A zero-argument function which creates a new\n variant-dtype Tensor. This function will be included in SavedModels and\n run to re-create the Dataset's variant Tensor on restore.\n \"\"\"\n super(_VariantTracker, self).__init__(device=\"CPU\")\n self._resource_handle = variant_tensor\n self._create_resource = resource_creator\n\n\ndef _is_padded_shape_compatible_with(padded_shape, input_component_shape):\n \"\"\"Returns `True` if `input_component_shape` can be padded to `padded_shape`.\n\n Args:\n padded_shape: A `tf.TensorShape`.\n input_component_shape: A `tf.TensorShape`.\n\n Returns:\n `True` if `input_component_shape` can be padded to `padded_shape`, otherwise\n `False`.\n \"\"\"\n\n if padded_shape.dims is None or input_component_shape.dims is None:\n return True\n if len(padded_shape.dims) != len(input_component_shape.dims):\n return False\n for padded_dim, input_dim in zip(\n padded_shape.dims, input_component_shape.dims):\n if (padded_dim.value is not None and input_dim.value is not None\n and padded_dim.value < input_dim.value):\n return False\n return True\n\n\ndef _padded_shape_to_tensor(padded_shape, input_component_shape):\n \"\"\"Converts `padded_shape` to a `tf.Tensor` representing that shape.\n\n Args:\n padded_shape: A shape-like object, which may be a `tf.TensorShape`, a Python\n sequence, or a 1-D `tf.Tensor` of `tf.int64` elements.\n input_component_shape: A `tf.TensorShape`, with which `padded_shape` must\n be compatible.\n\n Returns:\n A 1-D `tf.Tensor` of `tf.int64` elements, representing `padded_shape`.\n\n Raises:\n ValueError: If `padded_shape` is not a shape or not compatible with\n `input_component_shape`.\n TypeError: If `padded_shape` is not convertible to a `tf.int64` tensor.\n \"\"\"\n try:\n # Try to convert the `padded_shape` to a `tf.TensorShape`\n padded_shape_as_shape = tensor_shape.as_shape(padded_shape)\n # We will return the \"canonical\" tensor representation, which uses\n # `-1` in place of `None`.\n ret = ops.convert_to_tensor(\n [dim if dim is not None else -1\n for dim in padded_shape_as_shape.as_list()], dtype=dtypes.int64)\n except (TypeError, ValueError):\n # The argument was not trivially convertible to a\n # `tf.TensorShape`, so fall back on the conversion to tensor\n # machinery.\n ret = ops.convert_to_tensor(padded_shape, preferred_dtype=dtypes.int64)\n if ret.shape.dims is not None and len(ret.shape.dims) != 1:\n six.reraise(ValueError, ValueError(\n \"Padded shape %s must be a 1-D tensor of tf.int64 values, but its \"\n \"shape was %s.\" % (padded_shape, ret.shape)), sys.exc_info()[2])\n if ret.dtype != dtypes.int64:\n six.reraise(\n TypeError,\n TypeError(\n \"Padded shape %s must be a 1-D tensor of tf.int64 values, but \"\n \"its element type was %s.\" % (padded_shape, ret.dtype.name)),\n sys.exc_info()[2])\n padded_shape_as_shape = tensor_util.constant_value_as_shape(ret)\n\n if not _is_padded_shape_compatible_with(padded_shape_as_shape,\n input_component_shape):\n raise ValueError(\"The padded shape %s is not compatible with the \"\n \"corresponding input component shape %s.\"\n % (padded_shape_as_shape, input_component_shape))\n\n return ret\n\n\ndef _padding_value_to_tensor(value, output_type):\n \"\"\"Converts the padding value to a tensor.\n\n Args:\n value: The padding value.\n output_type: Its expected dtype.\n\n Returns:\n A scalar `Tensor`.\n\n Raises:\n ValueError: if the padding value is not a scalar.\n TypeError: if the padding value's type does not match `output_type`.\n \"\"\"\n value = ops.convert_to_tensor(value, name=\"padding_value\")\n if not value.shape.is_compatible_with(tensor_shape.TensorShape([])):\n raise ValueError(\"Padding value should be a scalar, but is not: %s\" % value)\n if value.dtype != output_type:\n raise TypeError(\"Padding value tensor (%s) does not match output type: %s\" %\n (value, output_type))\n return value\n\n\ndef _padding_values_or_default(padding_values, input_dataset):\n \"\"\"Returns padding values with None elements replaced with default values.\"\"\"\n\n def make_zero(t):\n if t.base_dtype == dtypes.string:\n return \"\"\n elif t.base_dtype == dtypes.variant:\n error_msg = (\"Unable to create padding for field of type 'variant' \"\n \"because t.base_type == dtypes.variant == \"\n \"{}.\".format(t.base_dtype))\n raise TypeError(error_msg)\n elif t.base_dtype == dtypes.bfloat16:\n # Special case `bfloat16` because it is not supported by NumPy.\n return constant_op.constant(0, dtype=dtypes.bfloat16)\n else:\n return np.zeros_like(t.as_numpy_dtype())\n\n def value_or_default(value, default):\n return default if value is None else value\n\n default_padding = nest.map_structure(\n make_zero,\n get_legacy_output_types(input_dataset))\n return nest.map_structure_up_to(padding_values, value_or_default,\n padding_values, default_padding)\n\n\nclass PaddedBatchDataset(UnaryDataset):\n \"\"\"A `Dataset` that batches and pads contiguous elements from its input.\"\"\"\n\n def __init__(self, input_dataset, batch_size, padded_shapes, padding_values,\n drop_remainder):\n \"\"\"See `Dataset.batch()` for details.\"\"\"\n self._input_dataset = input_dataset\n\n def check_types(component_spec):\n if not isinstance(component_spec, tensor_spec.TensorSpec):\n raise TypeError(\"Padded batching of components of type \",\n type(component_spec), \" is not supported.\")\n\n nest.map_structure(check_types, input_dataset.element_spec)\n self._input_dataset = input_dataset\n self._batch_size = ops.convert_to_tensor(\n batch_size, dtype=dtypes.int64, name=\"batch_size\")\n padding_values = _padding_values_or_default(padding_values, input_dataset)\n\n input_shapes = get_legacy_output_shapes(input_dataset)\n flat_padded_shapes = nest.flatten_up_to(input_shapes, padded_shapes)\n\n flat_padded_shapes_as_tensors = []\n\n for input_component_shape, padded_shape in zip(\n nest.flatten(input_shapes), flat_padded_shapes):\n flat_padded_shapes_as_tensors.append(\n _padded_shape_to_tensor(padded_shape, input_component_shape))\n\n self._padded_shapes = nest.pack_sequence_as(input_shapes,\n flat_padded_shapes_as_tensors)\n\n # If padding_values is a single element and input_shapes is a structure,\n # \"broadcast\" padding_values to the same structure as input_shapes.\n if nest.is_sequence(input_shapes) and not nest.is_sequence(padding_values):\n padding_values = nest.map_structure(lambda _: padding_values,\n input_shapes)\n\n self._padding_values = nest.map_structure_up_to(\n input_shapes, _padding_value_to_tensor, padding_values,\n get_legacy_output_types(input_dataset))\n self._drop_remainder = ops.convert_to_tensor(\n drop_remainder, dtype=dtypes.bool, name=\"drop_remainder\")\n\n def _padded_shape_to_batch_shape(s):\n return tensor_shape.TensorShape([\n tensor_util.constant_value(self._batch_size)\n if smart_cond.smart_constant_value(self._drop_remainder) else None\n ]).concatenate(tensor_util.constant_value_as_shape(s))\n\n output_shapes = nest.map_structure(\n _padded_shape_to_batch_shape, self._padded_shapes)\n self._structure = structure.convert_legacy_structure(\n get_legacy_output_types(self._input_dataset), output_shapes,\n get_legacy_output_classes(self._input_dataset))\n\n # pylint: disable=protected-access\n # TODO(jsimsa): Switch to using v2 only any time after 6/30/2018.\n if smart_cond.smart_constant_value(self._drop_remainder) is False:\n variant_tensor = gen_dataset_ops.padded_batch_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n batch_size=self._batch_size,\n padded_shapes=[\n ops.convert_to_tensor(s, dtype=dtypes.int64)\n for s in nest.flatten(self._padded_shapes)\n ],\n padding_values=nest.flatten(self._padding_values),\n output_shapes=structure.get_flat_tensor_shapes(self._structure))\n else:\n variant_tensor = gen_dataset_ops.padded_batch_dataset_v2(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n batch_size=self._batch_size,\n padded_shapes=[\n ops.convert_to_tensor(s, dtype=dtypes.int64)\n for s in nest.flatten(self._padded_shapes)\n ],\n padding_values=nest.flatten(self._padding_values),\n drop_remainder=self._drop_remainder,\n output_shapes=structure.get_flat_tensor_shapes(self._structure))\n super(PaddedBatchDataset, self).__init__(input_dataset, variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure\n\n\ndef _should_unpack_args(args):\n \"\"\"Returns `True` if `args` should be `*args` when passed to a callable.\"\"\"\n return type(args) is tuple # pylint: disable=unidiomatic-typecheck\n\n\nclass MapDataset(UnaryDataset):\n \"\"\"A `Dataset` that maps a function over elements in its input.\"\"\"\n\n def __init__(self,\n input_dataset,\n map_func,\n use_inter_op_parallelism=True,\n preserve_cardinality=False,\n use_legacy_function=False):\n \"\"\"See `Dataset.map()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._use_inter_op_parallelism = use_inter_op_parallelism\n self._preserve_cardinality = preserve_cardinality\n self._map_func = StructuredFunctionWrapper(\n map_func,\n self._transformation_name(),\n dataset=input_dataset,\n use_legacy_function=use_legacy_function)\n variant_tensor = gen_dataset_ops.map_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._map_func.function.captured_inputs,\n f=self._map_func.function,\n use_inter_op_parallelism=self._use_inter_op_parallelism,\n preserve_cardinality=self._preserve_cardinality,\n **self._flat_structure)\n super(MapDataset, self).__init__(input_dataset, variant_tensor)\n\n def _functions(self):\n return [self._map_func]\n\n @property\n def element_spec(self):\n return self._map_func.output_structure\n\n def _transformation_name(self):\n return \"Dataset.map()\"\n\n\nclass ParallelMapDataset(UnaryDataset):\n \"\"\"A `Dataset` that maps a function over elements in its input in parallel.\"\"\"\n\n def __init__(self,\n input_dataset,\n map_func,\n num_parallel_calls,\n deterministic,\n use_inter_op_parallelism=True,\n preserve_cardinality=False,\n use_legacy_function=False):\n \"\"\"See `Dataset.map()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._use_inter_op_parallelism = use_inter_op_parallelism\n self._map_func = StructuredFunctionWrapper(\n map_func,\n self._transformation_name(),\n dataset=input_dataset,\n use_legacy_function=use_legacy_function)\n if deterministic is None:\n self._deterministic = \"default\"\n elif deterministic:\n self._deterministic = \"true\"\n else:\n self._deterministic = \"false\"\n self._preserve_cardinality = preserve_cardinality\n self._num_parallel_calls = ops.convert_to_tensor(\n num_parallel_calls, dtype=dtypes.int64, name=\"num_parallel_calls\")\n variant_tensor = gen_dataset_ops.parallel_map_dataset_v2(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._map_func.function.captured_inputs,\n f=self._map_func.function,\n num_parallel_calls=self._num_parallel_calls,\n deterministic=self._deterministic,\n use_inter_op_parallelism=self._use_inter_op_parallelism,\n preserve_cardinality=self._preserve_cardinality,\n **self._flat_structure)\n super(ParallelMapDataset, self).__init__(input_dataset, variant_tensor)\n\n def _functions(self):\n return [self._map_func]\n\n @property\n def element_spec(self):\n return self._map_func.output_structure\n\n def _transformation_name(self):\n return \"Dataset.map()\"\n\n\nclass FlatMapDataset(UnaryDataset):\n \"\"\"A `Dataset` that maps a function over its input and flattens the result.\"\"\"\n\n def __init__(self, input_dataset, map_func):\n \"\"\"See `Dataset.flat_map()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._map_func = StructuredFunctionWrapper(\n map_func, self._transformation_name(), dataset=input_dataset)\n if not isinstance(self._map_func.output_structure, DatasetSpec):\n raise TypeError(\n \"`map_func` must return a `Dataset` object. Got {}\".format(\n type(self._map_func.output_structure)))\n self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access\n variant_tensor = gen_dataset_ops.flat_map_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._map_func.function.captured_inputs,\n f=self._map_func.function,\n **self._flat_structure)\n super(FlatMapDataset, self).__init__(input_dataset, variant_tensor)\n\n def _functions(self):\n return [self._map_func]\n\n @property\n def element_spec(self):\n return self._structure\n\n def _transformation_name(self):\n return \"Dataset.flat_map()\"\n\n\nclass InterleaveDataset(UnaryDataset):\n \"\"\"A `Dataset` that interleaves the result of transformed inputs.\"\"\"\n\n def __init__(self, input_dataset, map_func, cycle_length, block_length):\n \"\"\"See `Dataset.interleave()` for details.\"\"\"\n\n self._input_dataset = input_dataset\n self._map_func = StructuredFunctionWrapper(\n map_func, self._transformation_name(), dataset=input_dataset)\n if not isinstance(self._map_func.output_structure, DatasetSpec):\n raise TypeError(\n \"`map_func` must return a `Dataset` object. Got {}\".format(\n type(self._map_func.output_structure)))\n self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access\n self._cycle_length = ops.convert_to_tensor(\n cycle_length, dtype=dtypes.int64, name=\"cycle_length\")\n self._block_length = ops.convert_to_tensor(\n block_length, dtype=dtypes.int64, name=\"block_length\")\n\n variant_tensor = gen_dataset_ops.interleave_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._map_func.function.captured_inputs, # pylint: disable=protected-access\n self._cycle_length,\n self._block_length,\n f=self._map_func.function,\n **self._flat_structure)\n super(InterleaveDataset, self).__init__(input_dataset, variant_tensor)\n\n def _functions(self):\n return [self._map_func]\n\n @property\n def element_spec(self):\n return self._structure\n\n def _transformation_name(self):\n return \"Dataset.interleave()\"\n\n\nclass ParallelInterleaveDataset(UnaryDataset):\n \"\"\"A `Dataset` that maps a function over its input and interleaves the result.\"\"\"\n\n def __init__(self,\n input_dataset,\n map_func,\n cycle_length,\n block_length,\n num_parallel_calls,\n buffer_output_elements=AUTOTUNE,\n prefetch_input_elements=AUTOTUNE,\n deterministic=None):\n \"\"\"See `Dataset.interleave()` for details.\"\"\"\n self._input_dataset = input_dataset\n self._map_func = StructuredFunctionWrapper(\n map_func, self._transformation_name(), dataset=input_dataset)\n if not isinstance(self._map_func.output_structure, DatasetSpec):\n raise TypeError(\n \"`map_func` must return a `Dataset` object. Got {}\".format(\n type(self._map_func.output_structure)))\n self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access\n self._cycle_length = ops.convert_to_tensor(\n cycle_length, dtype=dtypes.int64, name=\"cycle_length\")\n self._block_length = ops.convert_to_tensor(\n block_length, dtype=dtypes.int64, name=\"block_length\")\n self._buffer_output_elements = ops.convert_to_tensor(\n buffer_output_elements,\n dtype=dtypes.int64,\n name=\"buffer_output_elements\")\n self._prefetch_input_elements = ops.convert_to_tensor(\n prefetch_input_elements,\n dtype=dtypes.int64,\n name=\"prefetch_input_elements\")\n\n self._num_parallel_calls = ops.convert_to_tensor(\n num_parallel_calls, dtype=dtypes.int64, name=\"num_parallel_calls\")\n if deterministic is None:\n deterministic_string = \"default\"\n elif deterministic:\n deterministic_string = \"true\"\n else:\n deterministic_string = \"false\"\n\n variant_tensor = gen_dataset_ops.parallel_interleave_dataset_v4(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._map_func.function.captured_inputs, # pylint: disable=protected-access\n self._cycle_length,\n self._block_length,\n self._buffer_output_elements,\n self._prefetch_input_elements,\n self._num_parallel_calls,\n f=self._map_func.function,\n deterministic=deterministic_string,\n **self._flat_structure)\n super(ParallelInterleaveDataset, self).__init__(input_dataset,\n variant_tensor)\n\n def _functions(self):\n return [self._map_func]\n\n @property\n def element_spec(self):\n return self._structure\n\n def _transformation_name(self):\n return \"Dataset.interleave()\"\n\n\nclass FilterDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that filters its input according to a predicate function.\"\"\"\n\n def __init__(self, input_dataset, predicate, use_legacy_function=False):\n \"\"\"See `Dataset.filter()` for details.\"\"\"\n self._input_dataset = input_dataset\n wrapped_func = StructuredFunctionWrapper(\n predicate,\n self._transformation_name(),\n dataset=input_dataset,\n use_legacy_function=use_legacy_function)\n if not wrapped_func.output_structure.is_compatible_with(\n tensor_spec.TensorSpec([], dtypes.bool)):\n error_msg = (\"`predicate` return type must be convertible to a scalar \"\n \"boolean tensor. Was {}.\").format(\n wrapped_func.output_structure)\n raise ValueError(error_msg)\n self._predicate = wrapped_func\n variant_tensor = gen_dataset_ops.filter_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n other_arguments=self._predicate.function.captured_inputs,\n predicate=self._predicate.function,\n **self._flat_structure)\n super(FilterDataset, self).__init__(input_dataset, variant_tensor)\n\n def _functions(self):\n return [self._predicate]\n\n def _transformation_name(self):\n return \"Dataset.filter()\"\n\n\nclass PrefetchDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that asynchronously prefetches its input.\"\"\"\n\n def __init__(self, input_dataset, buffer_size, slack_period=None):\n \"\"\"See `Dataset.prefetch()` for details.\n\n Args:\n input_dataset: The input dataset.\n buffer_size: See `Dataset.prefetch()` for details.\n slack_period: (Optional.) An integer. If non-zero, determines the number\n of GetNext calls before injecting slack into the execution. This may\n reduce CPU contention at the start of a step. Note that a tensorflow\n user should not have to set this manually; enable this behavior\n automatically via `tf.data.Options.experimental_slack` instead. Defaults\n to None.\n \"\"\"\n self._input_dataset = input_dataset\n if buffer_size is None:\n buffer_size = AUTOTUNE\n self._buffer_size = ops.convert_to_tensor(\n buffer_size, dtype=dtypes.int64, name=\"buffer_size\")\n # pylint: disable=protected-access\n # We colocate the prefetch dataset with its input as this collocation only\n # happens automatically in graph mode.\n with ops.colocate_with(input_dataset._variant_tensor):\n variant_tensor = gen_dataset_ops.prefetch_dataset(\n input_dataset._variant_tensor,\n buffer_size=self._buffer_size,\n slack_period=slack_period,\n **self._flat_structure)\n super(PrefetchDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass WindowDataset(UnaryDataset):\n \"\"\"A dataset that creates window datasets from the input elements.\"\"\"\n\n def __init__(self, input_dataset, size, shift, stride, drop_remainder):\n \"\"\"See `window_dataset()` for more details.\"\"\"\n self._input_dataset = input_dataset\n self._size = ops.convert_to_tensor(size, dtype=dtypes.int64, name=\"size\")\n self._shift = ops.convert_to_tensor(shift, dtype=dtypes.int64, name=\"shift\")\n self._stride = ops.convert_to_tensor(\n stride, dtype=dtypes.int64, name=\"stride\")\n self._drop_remainder = ops.convert_to_tensor(\n drop_remainder, dtype=dtypes.bool, name=\"drop_remainder\")\n self._structure = nest.pack_sequence_as(\n get_legacy_output_classes(input_dataset), [\n DatasetSpec( # pylint: disable=g-complex-comprehension\n structure.convert_legacy_structure(\n output_type, output_shape, output_class))\n for output_class, output_shape, output_type in zip(\n nest.flatten(get_legacy_output_classes(input_dataset)),\n nest.flatten(get_legacy_output_shapes(input_dataset)),\n nest.flatten(get_legacy_output_types(input_dataset)))\n ])\n variant_tensor = gen_dataset_ops.window_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._size,\n self._shift,\n self._stride,\n self._drop_remainder,\n **self._flat_structure)\n super(WindowDataset, self).__init__(input_dataset, variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass _OptionsDataset(UnaryUnchangedStructureDataset):\n \"\"\"An identity `Dataset` that stores options.\"\"\"\n\n def __init__(self, input_dataset, options):\n # pylint: disable=protected-access\n self._input_dataset = input_dataset\n variant_tensor = input_dataset._variant_tensor\n super(_OptionsDataset, self).__init__(input_dataset, variant_tensor)\n\n if self._options_attr:\n self._options_attr._set_mutable(True)\n self._options_attr = self._options_attr.merge(options)\n else:\n self._options_attr = options\n self._options_attr._set_mutable(False)\n\n\nclass _ModelDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that acts as an identity, and models performance.\"\"\"\n\n def __init__(self, input_dataset, algorithm, cpu_budget, ram_budget):\n self._input_dataset = input_dataset\n variant_tensor = gen_dataset_ops.model_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n algorithm=algorithm.value,\n cpu_budget=cpu_budget,\n ram_budget=ram_budget,\n **self._flat_structure)\n super(_ModelDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass _OptimizeDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that acts as an identity, and applies optimizations.\"\"\"\n\n def __init__(self,\n input_dataset,\n optimizations_enabled,\n optimizations_disabled,\n optimizations_default,\n optimization_configs=None):\n self._input_dataset = input_dataset\n if optimization_configs is None:\n optimization_configs = []\n\n # We sort the options here before embedding as constant tensors to ensure\n # that serialization to NodeDef is determinstic.\n if optimizations_enabled:\n optimizations_enabled.sort()\n if optimizations_disabled:\n optimizations_disabled.sort()\n if optimizations_default:\n optimizations_default.sort()\n\n self._optimizations_enabled = convert.optional_param_to_tensor(\n argument_name=\"optimizations_enabled\",\n argument_value=optimizations_enabled,\n argument_default=[],\n argument_dtype=dtypes.string)\n self._optimizations_disabled = convert.optional_param_to_tensor(\n argument_name=\"optimizations_disabled\",\n argument_value=optimizations_disabled,\n argument_default=[],\n argument_dtype=dtypes.string)\n self._optimizations_default = convert.optional_param_to_tensor(\n argument_name=\"optimizations_default\",\n argument_value=optimizations_default,\n argument_default=[],\n argument_dtype=dtypes.string)\n\n variant_tensor = gen_dataset_ops.optimize_dataset_v2(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._optimizations_enabled,\n self._optimizations_disabled,\n self._optimizations_default,\n optimization_configs=optimization_configs,\n **self._flat_structure)\n\n super(_OptimizeDataset, self).__init__(input_dataset, variant_tensor)\n\n\nclass _SetStatsAggregatorDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that acts as an identity, and sets a stats aggregator.\"\"\"\n\n def __init__(self, input_dataset, aggregator, prefix, counter_prefix):\n self._input_dataset = input_dataset\n self._stats_aggregator = aggregator\n self._prefix = prefix\n self._counter_prefix = counter_prefix\n variant_tensor = ged_ops.set_stats_aggregator_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._stats_aggregator._resource, # pylint: disable=protected-access\n self._prefix,\n self._counter_prefix,\n **self._flat_structure)\n super(_SetStatsAggregatorDataset, self).__init__(input_dataset,\n variant_tensor)\n\n\nclass _MaxIntraOpParallelismDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that acts as an identity, overriding intra-op parallelism.\"\"\"\n\n def __init__(self, input_dataset, max_intra_op_parallelism):\n self._input_dataset = input_dataset\n self._max_intra_op_parallelism = ops.convert_to_tensor(\n max_intra_op_parallelism,\n dtype=dtypes.int64,\n name=\"max_intra_op_parallelism\")\n variant_tensor = ged_ops.max_intra_op_parallelism_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._max_intra_op_parallelism,\n **self._flat_structure)\n super(_MaxIntraOpParallelismDataset, self).__init__(input_dataset,\n variant_tensor)\n\n\nclass _PrivateThreadPoolDataset(UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that acts as an identity, setting a private threadpool.\"\"\"\n\n def __init__(self, input_dataset, num_threads):\n self._input_dataset = input_dataset\n self._num_threads = ops.convert_to_tensor(\n num_threads, dtype=dtypes.int64, name=\"num_threads\")\n variant_tensor = ged_ops.private_thread_pool_dataset(\n input_dataset._variant_tensor, # pylint: disable=protected-access\n self._num_threads,\n **self._flat_structure)\n super(_PrivateThreadPoolDataset, self).__init__(input_dataset,\n variant_tensor)\n\n\ndef normalize_to_dense(dataset):\n \"\"\"Normalizes non-tensor components in a dataset to dense representations.\n\n This is necessary for dataset transformations that slice along the batch\n dimension and are oblivious to non-tensors, e.g. `unbatch`, `rebatch`.\n\n Args:\n dataset: Dataset to normalize.\n\n Returns:\n A dataset whose sparse and ragged tensors have been normalized to their\n dense representations.\n \"\"\"\n\n # NOTE(mrry): This leads to a somewhat inefficient re-encoding step for all\n # non-tensor components.\n #\n # TODO(mrry): Consider optimizing this if it turns out to be a bottleneck.\n if _should_unpack_args(dataset.element_spec):\n def normalize(*args):\n return structure.to_batched_tensor_list(dataset.element_spec, tuple(args))\n else:\n def normalize(arg):\n return structure.to_batched_tensor_list(dataset.element_spec, arg)\n\n normalized_dataset = dataset.map(normalize)\n\n # NOTE(mrry): Our `map()` has lost information about the structure of\n # non-tensor components, so re-apply the structure of the original dataset.\n return _RestructuredDataset(normalized_dataset, dataset.element_spec)\n\n\nclass _RestructuredDataset(UnaryDataset):\n \"\"\"An internal helper for changing the element spec of a dataset.\"\"\"\n\n def __init__(self, dataset, structure):\n self._input_dataset = dataset\n self._structure = structure\n\n variant_tensor = self._input_dataset._variant_tensor # pylint: disable=protected-access\n super(_RestructuredDataset, self).__init__(dataset, variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure\n\n\nclass _UnbatchDataset(UnaryDataset):\n \"\"\"A dataset that splits the elements of its input into multiple elements.\"\"\"\n\n def __init__(self, input_dataset):\n \"\"\"See `unbatch()` for more details.\"\"\"\n flat_shapes = input_dataset._flat_shapes # pylint: disable=protected-access\n if any(s.ndims == 0 for s in flat_shapes):\n raise ValueError(\"Cannot unbatch an input with scalar components.\")\n known_batch_dim = tensor_shape.Dimension(None)\n for s in flat_shapes:\n try:\n known_batch_dim = known_batch_dim.merge_with(s[0])\n except ValueError:\n raise ValueError(\"Cannot unbatch an input whose components have \"\n \"different batch sizes.\")\n self._input_dataset = input_dataset\n self._structure = nest.map_structure(\n lambda component_spec: component_spec._unbatch(), # pylint: disable=protected-access\n get_structure(input_dataset))\n variant_tensor = ged_ops.unbatch_dataset(\n self._input_dataset._variant_tensor, # pylint: disable=protected-access\n **self._flat_structure)\n super(_UnbatchDataset, self).__init__(input_dataset, variant_tensor)\n\n @property\n def element_spec(self):\n return self._structure\n\n\ndef _collect_resource_inputs(op):\n \"\"\"Collects resource inputs for the given ops (and its variant inputs).\"\"\"\n\n def _process(op_queue, seen_ops):\n \"\"\"Processes the next element of the op queue.\n\n Args:\n op_queue: Queue of Dataset operations to process.\n seen_ops: Already processed set of Operations.\n\n Returns:\n A 2-tuple containing sets of resource handles. The first tuple entry\n contains read-only handles and the second entry contains read-write\n handles.\n \"\"\"\n\n reads = []\n writes = []\n op = op_queue.pop()\n if op in seen_ops:\n return reads, writes\n seen_ops.add(op)\n # TODO(b/150139257): All resource inputs are in writes right now since we\n # have not updated the functional ops to set the special attribute that ACD\n # uses to figure out which of the op's inputs are read-only.\n reads, writes = acd_utils.get_read_write_resource_inputs(op)\n # Conservatively assume that any variant inputs are datasets.\n op_queue.extend(t.op for t in op.inputs if t.dtype == dtypes.variant)\n return reads, writes\n\n op_queue = [op]\n seen_ops = set()\n all_reads = []\n all_writes = []\n while op_queue:\n reads, writes = _process(op_queue, seen_ops)\n all_reads.extend(reads)\n all_writes.extend(writes)\n\n return all_reads, all_writes\n\n\n@auto_control_deps.register_acd_resource_resolver\ndef _resource_resolver(op, resource_reads, resource_writes):\n \"\"\"Updates resource inputs for tf.data ops with indirect dependencies.\"\"\"\n\n updated = False\n if op.type in [\n \"DatasetToSingleElement\", \"DatasetToTFRecord\", \"ReduceDataset\"\n ]:\n reads, writes = _collect_resource_inputs(op)\n for inp in reads:\n if inp not in resource_reads:\n updated = True\n resource_reads.add(inp)\n for inp in writes:\n if inp not in resource_writes:\n updated = True\n resource_writes.add(inp)\n\n if op.type in [\n \"IteratorGetNext\", \"IteratorGetNextSync\", \"IteratorGetNextAsOptional\"\n ]:\n iterator_resource = op.inputs[0]\n make_iterator_ops = [\n op for op in iterator_resource.consumers() if op.type == \"MakeIterator\"\n ]\n\n if len(make_iterator_ops) == 1:\n reads, writes = _collect_resource_inputs(make_iterator_ops[0])\n for inp in reads:\n if inp not in resource_reads:\n updated = True\n resource_reads.add(inp)\n for inp in writes:\n if inp not in resource_writes:\n updated = True\n resource_writes.add(inp)\n\n return updated\n" ]
[ [ "tensorflow.python.ops.gen_dataset_ops.dummy_seed_generator", "tensorflow.python.ops.gen_dataset_ops.shuffle_dataset", "tensorflow.python.data.util.nest.is_sequence", "tensorflow.python.data.util.nest.flatten_up_to", "tensorflow.python.framework.smart_cond.smart_constant_value", "tensorflow.python.ops.gen_experimental_dataset_ops.max_intra_op_parallelism_dataset", "tensorflow.python.framework.ops.uid", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.framework.tensor_shape.as_shape", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.compat.compat.forward_compatible", "tensorflow.python.ops.gen_dataset_ops.iterator_v2", "tensorflow.python.framework.ops.device", "tensorflow.core.framework.graph_pb2.GraphDef", "tensorflow.python.ops.gen_dataset_ops.filter_dataset", "tensorflow.python.ops.gen_dataset_ops.range_dataset", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.gen_dataset_ops.one_shot_iterator", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.data.util.nest.pack_sequence_as", "tensorflow.python.ops.gen_dataset_ops.sparse_tensor_slice_dataset", "tensorflow.python.ops.gen_dataset_ops.parallel_map_dataset_v2", "tensorflow.python.ops.gen_dataset_ops.parallel_batch_dataset", "tensorflow.python.ops.gen_dataset_ops.batch_dataset_v2", "tensorflow.python.framework.ops.NotDifferentiable", "numpy.array", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.gen_dataset_ops.shard_dataset", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.data.util.nest.flatten", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.data.util.traverse.obtain_capture_by_value_ops", "tensorflow.python.ops.gen_dataset_ops.take_dataset", "tensorflow.python.eager.context.eager_mode", "tensorflow.python.training.tracking.tracking.ResourceTracker", "tensorflow.python.data.experimental.ops.distribute_options.ExternalStatePolicy._from_proto", "tensorflow.python.ops.string_ops.reduce_join", "tensorflow.python.ops.control_flow_ops.Assert", "tensorflow.python.ops.gen_dataset_ops.dataset_cardinality", "tensorflow.python.ops.gen_experimental_dataset_ops.unbatch_dataset", "tensorflow.python.data.util.random_seed.get_seed", "tensorflow.python.framework.tensor_shape.Dimension", "tensorflow.python.ops.script_ops.numpy_function", "tensorflow.python.data.util.structure.normalize_element", "tensorflow.python.ops.gen_dataset_ops.dataset_to_graph_v2", "tensorflow.python.data.util.nest.map_structure", "tensorflow.python.data.experimental.ops.distribute_options.ExternalStatePolicy._to_proto", "tensorflow.python.data.util.options.merge_options", "tensorflow.python.data.util.options.create_option", "tensorflow.python.data.util.structure.get_flat_tensor_types", "tensorflow.python.framework.ops.inside_function", "tensorflow.python.util.nest.map_structure", "tensorflow.python.ops.gen_dataset_ops.flat_map_dataset", "tensorflow.python.data.util.structure.get_flat_tensor_specs", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.data.util.structure.to_batched_tensor_list", "tensorflow.python.ops.gen_dataset_ops.dataset_to_graph", "tensorflow.python.data.util.structure.from_compatible_tensor_list", "tensorflow.core.framework.dataset_options_pb2.Options", "tensorflow.python.ops.gen_dataset_ops.map_dataset", "tensorflow.python.ops.gen_dataset_ops.dummy_memory_cache", "tensorflow.python.ops.gen_io_ops.matching_files", "tensorflow.python.ops.gen_experimental_dataset_ops.set_stats_aggregator_dataset", "tensorflow.python.data.util.structure.convert_legacy_structure", "tensorflow.python.ops.gen_dataset_ops.concatenate_dataset", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.framework.tensor_util.constant_value_as_shape", "tensorflow.python.data.util.structure.type_spec_from_value", "tensorflow.python.data.util.structure.get_flat_tensor_shapes", "tensorflow.python.training.tracking.tracking.resource_tracker_scope", "tensorflow.python.data.util.structure.to_tensor_list", "tensorflow.python.ops.gen_experimental_dataset_ops.private_thread_pool_dataset", "tensorflow.python.data.util.convert.optional_param_to_tensor", "tensorflow.python.ops.gen_dataset_ops.repeat_dataset", "tensorflow.python.eager.def_function.functions_run_eagerly", "tensorflow.python.ops.gen_dataset_ops.make_iterator", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.util.deprecation.deprecated_args", "tensorflow.python.framework.tensor_spec.TensorSpec", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.data.util.options.graph_rewrites", "tensorflow.python.ops.gen_dataset_ops.model_dataset", "tensorflow.python.framework.random_seed.set_random_seed", "tensorflow.python.ops.gen_dataset_ops.parallel_interleave_dataset_v4", "tensorflow.python.util.function_utils.get_func_name", "tensorflow.python.framework.auto_control_deps_utils.get_read_write_resource_inputs", "tensorflow.python.ops.gen_dataset_ops.cache_dataset", "tensorflow.python.ops.gen_dataset_ops.prefetch_dataset", "tensorflow.python.data.experimental.ops.optimization_options.OptimizationOptions", "tensorflow.python.ops.gen_dataset_ops.optimize_dataset_v2", "tensorflow.python.data.util.nest.map_structure_up_to", "tensorflow.python.ops.gen_dataset_ops.interleave_dataset", "tensorflow.python.ops.gen_dataset_ops.skip_dataset", "tensorflow.python.ops.gen_dataset_ops.window_dataset", "tensorflow.python.tf2.enabled", "tensorflow.python.ops.script_ops.FuncRegistry._convert", "tensorflow.python.data.ops.iterator_ops.OwnedIterator", "tensorflow.python.framework.random_seed.get_seed", "tensorflow.python.ops.script_ops._eager_py_func", "tensorflow.python.data.util.structure.are_compatible", "tensorflow.python.framework.ops.name_scope", "numpy.iinfo", "tensorflow.python.framework.ops.convert_n_to_tensor", "tensorflow.python.framework.function.Defun" ] ]
artsobolev/IWHVI
[ "3a8b5631fe5b08587c594bd0aac43f84dc261579" ]
[ "hierarchical_vae/utils.py" ]
[ "import numpy as np\nimport scipy as sp\nimport scipy.special\nfrom tqdm import tqdm\n\nimport utils\nfrom .model import HierarchicalVAE\n\n\ndef calculate_evidence(sess, data, iwhvae, iwae_samples, iwhvi_samples, batch_size, n_repeats,\n tau_force_prior=False, tqdm_desc=None):\n losses = utils.batched_run(sess, data, -iwhvae.loss, lambda x_batch, y_batch: {\n iwhvae.input_x: x_batch,\n iwhvae.output_y: y_batch,\n iwhvae.k_iwhvi_samples: iwhvi_samples,\n iwhvae.m_iwae_samples: iwae_samples,\n iwhvae.tau_force_prior: tau_force_prior\n }, batch_size, n_repeats, tqdm_desc)\n\n return np.array(list(losses))\n\n\ndef calculate_reused_sivi_bound(sess, data, iwhvae, iwae_samples, iwhvi_samples, batch_size_x, n_repeats,\n batch_size_m=None, tqdm_desc=None):\n batch_size_x = min(batch_size_x, data.num_examples) # Fix\n x_samples = data.num_examples\n total_batches_x = (x_samples - 1) // batch_size_x + 1\n\n if batch_size_m is None:\n batch_size_m = iwae_samples\n total_batches_m = (iwae_samples - 1) // batch_size_m + 1\n\n res = np.zeros(n_repeats)\n with tqdm(total=n_repeats * total_batches_x * (total_batches_m + 1), unit='run', desc=tqdm_desc) as tqdm_t:\n for rep in range(n_repeats):\n offset_x = 0\n mean_elbo = 0\n for _ in range(total_batches_x):\n batch = data.next_batch(batch_size_x)\n binarized_batch_x, binarized_batch_y = utils.binarize_batch(batch)\n actual_batch_size_x = binarized_batch_x.shape[0]\n\n psi_rest = sess.run(iwhvae._reuse_psi_rest, feed_dict={\n iwhvae.input_x: binarized_batch_x,\n iwhvae.k_iwhvi_samples: iwhvi_samples\n })\n alphas = np.zeros((iwae_samples, actual_batch_size_x))\n offset_m = 0\n for _ in range(total_batches_m):\n m_samples = min(iwae_samples - offset_m, batch_size_m)\n alpha = sess.run(iwhvae._sivi_reused_alpha, {\n iwhvae.input_x: binarized_batch_x,\n iwhvae.output_y: binarized_batch_y,\n iwhvae.m_iwae_samples: m_samples,\n iwhvae.k_iwhvi_samples: iwhvi_samples,\n iwhvae._reuse_psi_rest: psi_rest\n })\n alphas[offset_m:offset_m + m_samples, :] = alpha\n offset_m += m_samples\n tqdm_t.update()\n\n elbo = np.mean(sp.special.logsumexp(alphas, axis=0) - np.log(iwae_samples))\n offset_x += actual_batch_size_x\n mean_elbo += (elbo - mean_elbo) * actual_batch_size_x / offset_x\n tqdm_t.update()\n\n res[rep] = mean_elbo\n return res\n\n\ndef batched_calculate_evidence_q_gap_kls(sess, data, iwhvae, iwae_samples, iwhvi_samples, batch_size_x, n_repeats,\n tau_force_prior=False, batch_size_m=None, tqdm_desc=None):\n\n batch_size_x = min(batch_size_x, data.num_examples) # Fix\n x_samples = data.num_examples\n total_batches_x = (x_samples - 1) // batch_size_x + 1\n\n if batch_size_m is None:\n batch_size_m = iwae_samples\n total_batches_m = (iwae_samples - 1) // batch_size_m + 1\n\n aux = [iwhvae._q_gap_raw, *iwhvae.bounds]\n aux_size = len(aux)\n res = np.zeros((1 + aux_size, n_repeats))\n with tqdm(total=n_repeats * total_batches_x, unit='run', desc=tqdm_desc) as tqdm_t:\n for rep in range(n_repeats):\n rep_res = np.zeros((1 + aux_size, x_samples))\n offset_x = 0\n for _ in range(total_batches_x):\n batch = data.next_batch(batch_size_x)\n binarized_batch_x, binarized_batch_y = utils.binarize_batch(batch)\n actual_batch_size_x = binarized_batch_x.shape[0]\n\n zs = np.zeros((iwae_samples, actual_batch_size_x, iwhvae.z_dim))\n res_aux = np.zeros((aux_size, iwae_samples, actual_batch_size_x))\n offset_m = 0\n for _ in range(total_batches_m):\n m_samples = min(iwae_samples - offset_m, batch_size_m)\n aux_vals = sess.run(aux + [iwhvae._z], {\n iwhvae.input_x: binarized_batch_x,\n iwhvae.output_y: binarized_batch_y,\n iwhvae.m_iwae_samples: m_samples,\n iwhvae.k_iwhvi_samples: iwhvi_samples,\n iwhvae.tau_force_prior: tau_force_prior\n })\n zs[offset_m:offset_m + m_samples] = aux_vals[-1]\n res_aux[:, offset_m:offset_m + m_samples, :] = aux_vals[:-1]\n offset_m += m_samples\n\n elbos = sess.run(iwhvae.elbo, {\n iwhvae.input_x: binarized_batch_x,\n iwhvae.output_y: binarized_batch_y,\n iwhvae.m_iwae_samples: iwae_samples,\n iwhvae.k_iwhvi_samples: iwhvi_samples,\n iwhvae.tau_force_prior: tau_force_prior,\n iwhvae._z: zs,\n iwhvae.bounds[1]: res_aux[aux.index(iwhvae.bounds[1])]\n })\n\n rep_res[:, offset_x:offset_x + actual_batch_size_x] = [\n elbos,\n *np.mean(res_aux, axis=1)\n ]\n offset_x += actual_batch_size_x\n tqdm_t.update()\n\n res[:, rep] = np.mean(rep_res, axis=1)\n return res\n\n\ndef batched_calculate_p_gap(sess, data, iwhvae, iwae_samples, iwhvi_samples, batch_size_x,\n n_repeats, tau_force_prior, batch_size_m=None, tqdm_desc=None):\n\n batch_size_x = min(batch_size_x, data.num_examples) # Fix\n x_samples = data.num_examples\n total_batches_x = (x_samples - 1) // batch_size_x + 1\n\n if batch_size_m is None:\n batch_size_m = iwae_samples\n total_batches_m = (iwae_samples - 1) // batch_size_m + 1\n\n p_gaps = np.zeros(n_repeats)\n with tqdm(total=n_repeats * total_batches_x, unit='run', desc=tqdm_desc) as tqdm_t:\n for rep in range(n_repeats):\n mean_p_gap = 0\n offset_x = 0\n for _ in range(total_batches_x):\n batch = data.next_batch(batch_size_x)\n binarized_batch_x, binarized_batch_y = utils.binarize_batch(batch)\n actual_batch_size_x = binarized_batch_x.shape[0]\n\n alphas = np.zeros((iwae_samples, actual_batch_size_x))\n offset_m = 0\n for _ in range(total_batches_m):\n m_samples = min(iwae_samples - offset_m, batch_size_m)\n alpha = sess.run(iwhvae._alpha, {\n iwhvae.input_x: binarized_batch_x,\n iwhvae.output_y: binarized_batch_y,\n iwhvae.m_iwae_samples: m_samples,\n iwhvae.k_iwhvi_samples: iwhvi_samples,\n iwhvae.tau_force_prior: tau_force_prior\n })\n\n alphas[offset_m:offset_m + m_samples] = alpha\n offset_m += m_samples\n\n batch_p_gap = sp.special.logsumexp(alphas, axis=0) - np.log(alphas.shape[0]) - np.mean(alphas, axis=0)\n batch_mean_p_gap = np.mean(batch_p_gap, axis=0)\n offset_x += actual_batch_size_x\n mean_p_gap += (batch_mean_p_gap - mean_p_gap) * actual_batch_size_x / offset_x\n\n tqdm_t.update()\n\n p_gaps[rep] = mean_p_gap\n return p_gaps\n\n\ndef calculate_mi_bounds_on_p(sess, vae, n_samples, m_iwae_samples, iwae_batch_size, n_repeats, tqdm_desc):\n res = []\n with tqdm(total=n_repeats * n_samples * (m_iwae_samples + 1), desc=tqdm_desc) as tqdm_t:\n mi_lower_bound_per_sample = []\n mi_upper_bound_per_sample = []\n\n for _ in range(n_repeats):\n for _ in range(n_samples):\n x, hvm_term, log_p_x_z = sess.run([vae._mi_p_x, vae._mi_p_hvm_term, vae._mi_p_log_p_x_z])\n tqdm_t.update(1)\n\n elbos = []\n m_offset = 0\n while m_offset < m_iwae_samples:\n batch_size = min(iwae_batch_size, m_iwae_samples - m_offset)\n m_offset += batch_size\n\n elbos_subset = sess.run(vae._mi_p_elbos, feed_dict={vae._mi_p_x: x, vae.m_iwae_samples: batch_size})\n elbos.extend(elbos_subset)\n\n tqdm_t.update(batch_size)\n\n assert len(elbos) == m_iwae_samples\n log_p_lower_bound = sp.special.logsumexp(elbos) - np.log(m_iwae_samples)\n log_p_upper_bound = sp.special.logsumexp(elbos + [hvm_term]) - np.log(m_iwae_samples + 1)\n\n mi_lower_bound_per_sample.append(log_p_x_z - log_p_upper_bound)\n mi_upper_bound_per_sample.append(log_p_x_z - log_p_lower_bound)\n\n res.append((np.mean(mi_lower_bound_per_sample), np.mean(mi_upper_bound_per_sample)))\n tqdm_t.set_description(tqdm_desc + ', cur. est.: {:.3f} <= MI <= {:.3f}'.format(*np.mean(res, axis=0)))\n\n return np.array(res).T\n\n\ndef calculate_mi_bounds_on_q(sess, vae, data, batch_size_x, m_iwae_samples, iwae_batch_size, n_repeats, tqdm_desc):\n if iwae_batch_size is None:\n iwae_batch_size = m_iwae_samples\n\n total_batches_x = (data.num_examples - 1) // batch_size_x + 1\n res = []\n with tqdm(total=n_repeats * data.num_examples * (m_iwae_samples + 1), desc=tqdm_desc) as tqdm_t:\n mi_lower_bound_per_sample = []\n mi_upper_bound_per_sample = []\n\n for _ in range(n_repeats):\n for _ in range(total_batches_x):\n binarized_x, _ = utils.binarize_batch(data.next_batch(batch_size_x))\n actual_batch_size_x = binarized_x.shape[0]\n\n z, hvm_term, log_q_z_psi = sess.run([vae._mi_q_z, vae._mi_q_hvm_term, vae._mi_q_log_q_z_psi],\n feed_dict={vae.input_x: binarized_x})\n tqdm_t.update(actual_batch_size_x)\n\n elbos = np.zeros((m_iwae_samples, actual_batch_size_x))\n m_offset = 0\n while m_offset < m_iwae_samples:\n batch_size_m = min(iwae_batch_size, m_iwae_samples - m_offset)\n\n elbos_subset = sess.run(vae._mi_q_elbos, feed_dict={\n vae._mi_q_z: z,\n vae.m_iwae_samples: batch_size_m,\n vae.input_x: binarized_x\n })\n assert elbos_subset.shape == (batch_size_m, actual_batch_size_x), elbos_subset.shape\n elbos[m_offset:m_offset+batch_size_m, :] = elbos_subset\n\n tqdm_t.update(batch_size_m * actual_batch_size_x)\n m_offset += batch_size_m\n\n extended_elbos = np.concatenate([hvm_term[None, :], elbos], axis=0)\n\n assert len(elbos) == m_iwae_samples\n log_p_lower_bound = sp.special.logsumexp(elbos, axis=0) - np.log(m_iwae_samples)\n log_p_upper_bound = sp.special.logsumexp(extended_elbos, axis=0) - np.log(m_iwae_samples + 1)\n\n mi_lower_bound_per_sample.append(log_q_z_psi - log_p_upper_bound)\n mi_upper_bound_per_sample.append(log_q_z_psi - log_p_lower_bound)\n\n res.append((np.mean(mi_lower_bound_per_sample), np.mean(mi_upper_bound_per_sample)))\n tqdm_t.set_description(tqdm_desc + ', cur. est.: {:.3f} <= MI <= {:.3f}'.format(*np.mean(res, axis=0)))\n\n return np.array(res).T\n\n\ndef calculate_kl_tau_q(sess, data, iwhvae, n_samples, batch_size, n_repeats, tau_force_prior=False, tqdm_desc=None):\n kls = utils.batched_run(\n sess, data, iwhvae.kl_tau_q,\n lambda x_batch, y_batch: {\n iwhvae.input_x: x_batch,\n iwhvae.output_y: y_batch,\n iwhvae.m_iwae_samples: n_samples,\n iwhvae.tau_force_prior: tau_force_prior\n },\n batch_size, n_repeats, tqdm_desc)\n\n return np.array(list(kls))\n\n\ndef calculate_q_gap(sess, data, iwhvae, m_samples, k_iwhvi_samples, batch_size, n_repeats,\n tau_force_prior=False, tqdm_desc=None):\n gaps = utils.batched_run(sess, data, iwhvae.q_gap, lambda x_batch, y_batch: {\n iwhvae.input_x: x_batch,\n iwhvae.output_y: y_batch,\n iwhvae.m_iwae_samples: m_samples,\n iwhvae.k_iwhvi_samples: k_iwhvi_samples,\n iwhvae.tau_force_prior: tau_force_prior\n }, batch_size, n_repeats, tqdm_desc)\n\n return np.array(list(gaps))\n\n\ndef calculate_kl_bounds(sess, data, iwhvae, m_samples, k_iwhvi_samples, batch_size, n_repeats,\n tau_force_prior=False, tqdm_desc=None):\n bounds = utils.batched_run(sess, data, [iwhvae.kl_lower_bound, iwhvae.kl_upper_bound], lambda x_batch, y_batch: {\n iwhvae.input_x: x_batch,\n iwhvae.output_y: y_batch,\n iwhvae.m_iwae_samples: m_samples,\n iwhvae.k_iwhvi_samples: k_iwhvi_samples,\n iwhvae.tau_force_prior: tau_force_prior\n }, batch_size, n_repeats, tqdm_desc)\n\n avg_bounds = np.array(list(bounds))\n return avg_bounds[:, 0], avg_bounds[:, 1]\n\n\ndef add_model_args(argparser):\n argparser.add_argument('--z_dim', type=int, default=10)\n argparser.add_argument('--noise_dim', type=int, default=10)\n\n argparser.add_argument('--decoder_arch', type=int, nargs='*', default=[200, 200])\n argparser.add_argument('--encoder_arch', type=int, nargs='*', default=[200, 200])\n argparser.add_argument('--encoder_noise_arch', type=int, nargs='*', default=[])\n argparser.add_argument('--tau_arch', type=int, nargs='*', default=[200, 200])\n\n argparser.add_argument('--tau_gate_bias', type=float, default=np.nan)\n argparser.add_argument('--encoder_student', action='store_true')\n argparser.add_argument('--tau_student', action='store_true')\n argparser.add_argument('--batch_norm', action='store_true')\n\n\ndef get_model(args):\n return HierarchicalVAE(\n 28 * 28, 28 * 28, args.noise_dim, args.z_dim,\n args.decoder_arch, args.encoder_arch, args.encoder_noise_arch, args.tau_arch,\n args.tau_gate_bias, args.encoder_student, args.tau_student, args.batch_norm)\n" ]
[ [ "numpy.zeros", "numpy.log", "scipy.special.logsumexp", "numpy.array", "numpy.concatenate", "numpy.mean" ] ]
ProjetEtudeMLFI/TensorFI
[ "961a0205ec90935a238c58112e8119c34a70ba7c" ]
[ "Tests/nearest_neighbor.py" ]
[ "#!/usr/bin/python\n'''\nA nearest neighbor learning algorithm example using TensorFlow library.\nThis example is using the MNIST database of handwritten digits\n(http://yann.lecun.com/exdb/mnist/)\n\nAuthor: Aymeric Damien\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n'''\n\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport TensorFI as ti\n\n# Import MNIST data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\n# In this example, we limit mnist data\nXtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates)\nXte, Yte = mnist.test.next_batch(200) #200 for testing\n\n# tf Graph Input\nxtr = tf.placeholder(\"float\", [None, 784])\nxte = tf.placeholder(\"float\", [784])\n\n# Nearest Neighbor calculation using L1 Distance\n# Calculate L1 Distance\ndistance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))),\n reduction_indices=1)\n# Prediction: Get min distance index (Nearest neighbor)\npred = tf.arg_min(distance, 0)\n\naccuracy = 0.\n\n# Initialize the variables (i.e. assign their default value)\ninit = tf.global_variables_initializer()\n\n# Start training\nwith tf.compat.v1.Session() as sess:\n\n # Run the initializer\n sess.run(init)\n\n # Add the fault injection code here to instrument the graph\n # We start injecting the fault right away here unlike earlier\n fi = ti.TensorFI(sess, name=\"NearestNeighbor\", logLevel=50)\n\n # loop over test data\n for i in range(len(Xte)):\n # Get nearest neighbor\n nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]})\n # Get nearest neighbor class label and compare it to its true label\n print(\"Test\", i, \"Prediction:\", np.argmax(Ytr[nn_index]), \\\n \"True Class:\", np.argmax(Yte[i]))\n # Calculate accuracy\n if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):\n accuracy += 1. / len(Xte)\n print(\"Accuracy:\", accuracy)\n\n # Make the log files in TensorBoard\n logs_path = \"./logs\"\n logWriter = tf.summary.FileWriter(logs_path, sess.graph)\n" ]
[ [ "tensorflow.placeholder", "tensorflow.compat.v1.Session", "tensorflow.negative", "tensorflow.global_variables_initializer", "numpy.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.arg_min", "tensorflow.summary.FileWriter" ] ]
hpzhen/ChZhShCh
[ "a5af067e342cf734fafb827eab17462c4bd6f74d" ]
[ "chzhshch/inner_package/show.py" ]
[ "# -*- coding: UTF-8 -*-\nimport matplotlib.ticker as ticker\nimport matplotlib.pyplot as plt\nimport mpl_finance as mpf\n# from mpl_finance import quotes_historical_yahoo\nimport numpy as np\nfrom pandas import Series, DataFrame\n# http://blog.csdn.net/xiaodongxiexie/article/details/53123371\n\nclass PlotShow(object):\n\n def __init__(self, date_tickers, title):\n self.date_tickers = date_tickers\n self.xaxis_cycle = 30\n self.width = 0.3\n self.colordown ='#53c156'\n self.colorup = '#ff1717'\n self.xlabel = 'datetime'\n self.ylabel = 'value'\n self.title = title\n\n\n # 时间轴转换\n def __format_date(self, x, pos=None):\n if x < 0 or x > len(self.date_tickers) - 1:\n return ''\n return self.date_tickers[int(x)]\n\n # K画图\n def candle_show(self, stock_data, top_bottom_data):\n\n # 创建子图\n fig, ax = plt.subplots(figsize=(192.0 / 72, 108.0 / 72))\n ax.xaxis.set_major_locator(ticker.MultipleLocator(self.xaxis_cycle))\n ax.xaxis.set_major_formatter(ticker.FuncFormatter(self.__format_date))\n mpf.candlestick_ohlc(ax, stock_data, width=self.width, colordown=self.colordown, colorup=self.colorup, alpha=1)\n\n # title 各种设置\n plt.rcParams['font.sans-serif'] = ['SimHei']\n plt.rcParams['axes.unicode_minus'] = False\n\n plt.title(self.title)\n plt.xlabel(self.xlabel)\n plt.ylabel(self.ylabel)\n\n # 顶底、图例等\n if len(top_bottom_data) > 0:\n x = []\n y = []\n for i in top_bottom_data:\n x.append(i[0])\n y.append(i[1])\n plt.plot(x, y, '--y*', label='分笔')\n plt.legend() # 展示图例\n\n ax.grid(True)\n # plt.savefig('E:\\PythonChZhShCh\\\\' + code + k_type + start_date + end_date + '.png')\n plt.show()\n\n # MA 画图\n def ma_kiss_show(self, ma):\n fig, ax = plt.subplots(1, 1, figsize=(1920 / 72, 1080 / 72), sharex=True)\n\n plt.rcParams['font.sans-serif'] = ['SimHei']\n plt.rcParams['axes.unicode_minus'] = False\n\n ax.plot(ma.x_index, ma.short, color='red', linewidth=1.0, label=\"short\")\n ax.plot(ma.x_index, ma.long, color='black', linewidth=1.0, label=\"long\")\n \n # 交点打印\n ax.plot(ma.intersection_x, ma.intersection_y, 'o')\n\n # 吻打印\n ax.plot(ma.lip_kiss_x, ma.lip_kiss_y, 'o')\n\n\n ax.set_title(self.title)\n ax.set_xlabel(\"日期\")\n ax.set_ylabel(\"price\")\n\n plt.xticks(ma.int_tickers)\n plt.xticks(ma.int_tickers, ma.date_tickers)\n ax.legend()\n plt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.xticks", "matplotlib.ticker.FuncFormatter", "matplotlib.pyplot.subplots", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.ticker.MultipleLocator", "matplotlib.pyplot.xlabel" ] ]
vandanavk/sagemaker-debugger
[ "5246cda198295aa1dd1656ad32b30c4bb1e2aec4" ]
[ "tests/tensorflow/hooks/test_mirrored_strategy.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Convolutional Neural Network Estimator for MNIST, built with tf.layers.\"\"\"\n\n# Future\nfrom __future__ import absolute_import, division, print_function\n\n# Third Party\nimport numpy as np\nimport pytest\nimport tensorflow as tf\nfrom tensorflow.python.client import device_lib\nfrom tests.tensorflow.utils import create_trial_fast_refresh\n\n# First Party\nimport smdebug.tensorflow as smd\nfrom smdebug.core.collection import CollectionKeys\nfrom smdebug.core.modes import ModeKeys\nfrom smdebug.exceptions import TensorUnavailableForStep\nfrom smdebug.tensorflow import get_hook\n\n\ndef cnn_model_fn(features, labels, mode):\n \"\"\"Model function for CNN.\"\"\"\n # Input Layer\n # Reshape X to 4-D tensor: [batch_size, width, height, channels]\n # MNIST images are 28x28 pixels, and have one color channel\n input_layer = tf.reshape(features[\"x\"], [-1, 28, 28, 1])\n\n # Convolutional Layer #1\n # Computes 32 features using a 5x5 filter with ReLU activation.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 28, 28, 1]\n # Output Tensor Shape: [batch_size, 28, 28, 32]\n conv1 = tf.layers.conv2d(\n inputs=input_layer, filters=32, kernel_size=[5, 5], padding=\"same\", activation=tf.nn.relu\n )\n\n # Pooling Layer #1\n # First max pooling layer with a 2x2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 28, 28, 32]\n # Output Tensor Shape: [batch_size, 14, 14, 32]\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n\n # Convolutional Layer #2\n # Computes 64 features using a 5x5 filter.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 14, 14, 32]\n # Output Tensor Shape: [batch_size, 14, 14, 64]\n conv2 = tf.layers.conv2d(\n inputs=pool1, filters=64, kernel_size=[5, 5], padding=\"same\", activation=tf.nn.relu\n )\n\n # Pooling Layer #2\n # Second max pooling layer with a 2x2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 14, 14, 64]\n # Output Tensor Shape: [batch_size, 7, 7, 64]\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\n # Flatten tensor into a batch of vectors\n # Input Tensor Shape: [batch_size, 7, 7, 64]\n # Output Tensor Shape: [batch_size, 7 * 7 * 64]\n pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])\n\n # Dense Layer\n # Densely connected layer with 1024 neurons\n # Input Tensor Shape: [batch_size, 7 * 7 * 64]\n # Output Tensor Shape: [batch_size, 1024]\n dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n\n # Add dropout operation; 0.6 probability that element will be kept\n dropout = tf.layers.dropout(\n inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN\n )\n\n # Logits layer\n # Input Tensor Shape: [batch_size, 1024]\n # Output Tensor Shape: [batch_size, 10]\n logits = tf.layers.dense(inputs=dropout, units=10)\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\"),\n }\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\n optimizer = smd.get_hook().wrap_optimizer(optimizer)\n train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(labels=labels, predictions=predictions[\"classes\"])\n }\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)\n\n\ndef per_device_batch_size(batch_size, num_gpus):\n \"\"\"For multi-gpu, batch-size must be a multiple of the number of GPUs.\n Note that this should eventually be handled by DistributionStrategies\n directly. Multi-GPU support is currently experimental, however,\n so doing the work here until that feature is in place.\n Args:\n batch_size: Global batch size to be divided among devices. This should be\n equal to num_gpus times the single-GPU batch_size for multi-gpu training.\n num_gpus: How many GPUs are used with DistributionStrategies.\n Returns:\n Batch size per device.\n Raises:\n ValueError: if batch_size is not divisible by number of devices\n \"\"\"\n if num_gpus <= 1:\n return batch_size\n\n remainder = batch_size % num_gpus\n if remainder:\n err = (\n \"When running with multiple GPUs, batch size \"\n \"must be a multiple of the number of available GPUs. Found {} \"\n \"GPUs with a batch size of {}; try --batch_size={} instead.\"\n ).format(num_gpus, batch_size, batch_size - remainder)\n raise ValueError(err)\n return int(batch_size / num_gpus)\n\n\nclass InputFnProvider:\n def __init__(self, train_batch_size):\n self.train_batch_size = train_batch_size\n self.__load_data()\n\n def __load_data(self):\n # Load training and eval data\n mnist = tf.contrib.learn.datasets.load_dataset(\"mnist\")\n self.train_data = mnist.train.images # Returns np.array\n self.train_labels = np.asarray(mnist.train.labels, dtype=np.int32)\n self.eval_data = mnist.test.images # Returns np.array\n self.eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)\n\n def train_input_fn(self):\n \"\"\"An input function for training\"\"\"\n # Shuffle, repeat, and batch the examples.\n dataset = tf.data.Dataset.from_tensor_slices(({\"x\": self.train_data}, self.train_labels))\n dataset = dataset.shuffle(1000).repeat().batch(self.train_batch_size)\n return dataset\n\n def eval_input_fn(self):\n \"\"\"An input function for evaluation or prediction\"\"\"\n dataset = tf.data.Dataset.from_tensor_slices(({\"x\": self.eval_data}, self.eval_labels))\n dataset = dataset.batch(1).repeat()\n return dataset\n\n\ndef get_available_gpus():\n local_device_protos = device_lib.list_local_devices()\n return len([x.name for x in local_device_protos if x.device_type == \"GPU\"])\n\n\ndef helper_mirrored(\n trial_dir,\n save_all=False,\n num_steps=3,\n save_config=None,\n reduction_config=None,\n include_collections=None,\n steps=None,\n zcc=False,\n eval_distributed=False,\n include_workers=\"all\",\n):\n num_gpus = get_available_gpus()\n num_devices = num_gpus if num_gpus > 0 else 1\n batch_size = 10 * num_devices\n\n # input_fn which serves Dataset\n input_fn_provider = InputFnProvider(per_device_batch_size(batch_size, num_devices))\n\n # Use multiple GPUs by MirroredStragtegy.\n # All avaiable GPUs will be used if `num_gpus` is omitted.\n # if num_devices > 1:\n distribution = tf.contrib.distribute.MirroredStrategy()\n # print(\"### Doing Multi GPU Training\")\n # else:\n # distribution = None\n # Pass to RunConfig\n config = tf.estimator.RunConfig(\n train_distribute=distribution,\n eval_distribute=distribution if eval_distributed else None,\n model_dir=\"/tmp/mnist_convnet_model\",\n )\n\n if save_config is None:\n save_config = smd.SaveConfig(save_interval=2)\n\n if include_collections is None:\n include_collections = [\n CollectionKeys.WEIGHTS,\n CollectionKeys.BIASES,\n CollectionKeys.GRADIENTS,\n CollectionKeys.LOSSES,\n ]\n\n if not zcc:\n ts_hook = smd.SessionHook(\n out_dir=trial_dir,\n save_all=save_all,\n include_collections=include_collections,\n save_config=save_config,\n reduction_config=reduction_config,\n include_workers=include_workers,\n )\n else:\n print(\"zcc is passed. ignoring include_collections and save_config\")\n\n mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn, config=config)\n if steps is None:\n steps = [\"train\"]\n\n for s in steps:\n if s == \"train\":\n print(\"Starting train\")\n if not zcc:\n ts_hook.set_mode(smd.modes.TRAIN)\n # Train the model\n mnist_classifier.train(\n input_fn=input_fn_provider.train_input_fn, steps=num_steps, hooks=[ts_hook]\n )\n else:\n mnist_classifier.train(input_fn=input_fn_provider.train_input_fn, steps=num_steps)\n elif s == \"eval\":\n print(\"Starting eval\")\n\n if not zcc:\n ts_hook.set_mode(smd.modes.EVAL)\n # Evaluate the model and print results\n mnist_classifier.evaluate(\n input_fn=input_fn_provider.eval_input_fn, steps=num_steps, hooks=[ts_hook]\n )\n else:\n mnist_classifier.evaluate(input_fn=input_fn_provider.eval_input_fn, steps=num_steps)\n elif s == \"predict\":\n print(\"Starting predict\")\n if not zcc:\n ts_hook.set_mode(smd.modes.PREDICT)\n # Evaluate the model and print results\n p = mnist_classifier.predict(\n input_fn=input_fn_provider.eval_input_fn, hooks=[ts_hook]\n )\n else:\n p = mnist_classifier.predict(input_fn=input_fn_provider.eval_input_fn)\n for i in range(num_steps):\n next(p)\n get_hook()._cleanup()\n return distribution\n\n\ndef skip_trial_check():\n # Skip trial check as in this case SMDebug is disabled for mirrored strategy\n # trial will not be loaded\n import tensorflow as tf\n from packaging import version\n\n if version.parse(tf.__version__) < version.parse(\"1.14.0\"):\n return True\n else:\n return False\n\n\[email protected]\ndef test_basic(out_dir, zcc=False):\n strategy = helper_mirrored(\n out_dir,\n steps=[\"train\", \"eval\", \"predict\", \"train\"],\n include_collections=[\n CollectionKeys.WEIGHTS,\n CollectionKeys.BIASES,\n CollectionKeys.GRADIENTS,\n CollectionKeys.LOSSES,\n ],\n eval_distributed=False,\n zcc=zcc,\n )\n if skip_trial_check():\n return\n\n tr = create_trial_fast_refresh(out_dir)\n # wts, grads, losses\n assert (\n len(tr.tensor_names()) == 8 + 8 + (1 * strategy.num_replicas_in_sync) + 1\n ) # 1 main loss, and 1 from each worker\n assert len(tr.steps()) == 7\n assert len(tr.steps(ModeKeys.TRAIN)) == 3\n assert len(tr.steps(ModeKeys.EVAL)) == 2\n assert len(tr.steps(ModeKeys.PREDICT)) == 2\n\n assert \"dense_1/kernel:0\" in tr.tensor_names(collection=\"weights\")\n for tname in tr.tensor_names(collection=\"weights\"):\n for s in tr.tensor(tname).steps(ModeKeys.TRAIN):\n assert len(tr.tensor(tname).workers(s, ModeKeys.TRAIN)) == strategy.num_replicas_in_sync\n for worker in tr.tensor(tname).workers(s, ModeKeys.TRAIN):\n assert tr.tensor(tname).value(s, worker=worker, mode=ModeKeys.TRAIN) is not None\n for s in tr.tensor(tname).steps(ModeKeys.EVAL):\n assert len(tr.tensor(tname).workers(s, ModeKeys.EVAL)) == 1 # as eval_dist = False\n assert tr.tensor(tname).value(s, mode=ModeKeys.EVAL) is not None\n\n tensornames = tr.tensor_names(regex=\"Identity_\\d+:0\")\n for s in tr.tensor(tensornames[0]).steps(ModeKeys.TRAIN):\n for w in tr.tensor(tensornames[0]).workers(s, ModeKeys.TRAIN):\n assert tr.tensor(tensornames[0]).value(s, worker=w, mode=ModeKeys.TRAIN) is not None\n assert (\n len(tr.tensor(tensornames[0]).workers(s, ModeKeys.TRAIN))\n == strategy.num_replicas_in_sync\n )\n\n for tname in tr.tensor_names(collection=\"losses\"):\n if tname != tensornames[0]:\n for s in tr.tensor(tname).steps(ModeKeys.TRAIN):\n assert len(tr.tensor(tname).workers(s, ModeKeys.TRAIN)) == 1, tname\n assert tr.tensor(tname).value(s, mode=ModeKeys.TRAIN) is not None\n\n tname = \"sparse_softmax_cross_entropy_loss/value:0\"\n for s in tr.tensor(tname).steps(ModeKeys.EVAL):\n assert len(tr.tensor(tname).workers(s, ModeKeys.EVAL)) == 1 # eval_dist=False\n assert tr.tensor(tname).value(s, mode=ModeKeys.EVAL) is not None\n\n\[email protected]\ndef test_eval_distributed(out_dir):\n strategy = helper_mirrored(\n out_dir,\n steps=[\"train\", \"eval\"],\n include_collections=[CollectionKeys.WEIGHTS, CollectionKeys.BIASES, CollectionKeys.LOSSES],\n eval_distributed=True,\n )\n if skip_trial_check():\n return\n tr = create_trial_fast_refresh(out_dir)\n assert len(tr.tensor_names()) == 8 + 1 * strategy.num_replicas_in_sync + 1\n assert len(tr.steps()) == 4\n assert len(tr.steps(ModeKeys.TRAIN)) == 2\n assert len(tr.steps(ModeKeys.EVAL)) == 2\n\n for tname in tr.tensor_names(collection=\"weights\"):\n for s in tr.tensor(tname).steps(ModeKeys.TRAIN):\n assert len(tr.tensor(tname).workers(s, ModeKeys.TRAIN)) == strategy.num_replicas_in_sync\n for worker in tr.tensor(tname).workers(s, ModeKeys.TRAIN):\n assert tr.tensor(tname).value(s, worker=worker, mode=ModeKeys.TRAIN) is not None\n for s in tr.tensor(tname).steps(ModeKeys.EVAL):\n assert len(tr.tensor(tname).workers(s, ModeKeys.EVAL)) == strategy.num_replicas_in_sync\n assert tr.tensor(tname).value(s, mode=ModeKeys.EVAL) is not None\n\n tensornames = tr.tensor_names(regex=\"Identity_\\d+:0\")\n for s in tr.tensor(tensornames[0]).steps(ModeKeys.TRAIN):\n for w in tr.tensor(tensornames[0]).workers(s, ModeKeys.TRAIN):\n assert tr.tensor(tensornames[0]).value(s, worker=w, mode=ModeKeys.TRAIN) is not None\n assert (\n len(tr.tensor(tensornames[0]).workers(s, ModeKeys.TRAIN))\n == strategy.num_replicas_in_sync\n )\n\n for tname in tr.tensor_names(collection=\"losses\"):\n for s in tr.tensor(tname).steps(ModeKeys.EVAL):\n assert len(tr.tensor(tname).workers(s, ModeKeys.EVAL)) == 1\n assert tr.tensor(tname).value(s, mode=ModeKeys.EVAL) is not None\n if tname != tensornames[0]:\n for s in tr.tensor(tname).steps(ModeKeys.TRAIN):\n assert len(tr.tensor(tname).workers(s, ModeKeys.EVAL)) == 1\n assert tr.tensor(tname).value(s, mode=ModeKeys.EVAL) is not None\n\n\[email protected]\ndef test_reductions(out_dir):\n strategy = helper_mirrored(\n out_dir,\n steps=[\"train\", \"eval\"],\n reduction_config=smd.ReductionConfig(\n reductions=[\"sum\", \"max\"], abs_reductions=[\"sum\", \"max\"], norms=[\"l1\"]\n ),\n include_collections=[CollectionKeys.WEIGHTS, CollectionKeys.BIASES, CollectionKeys.LOSSES],\n eval_distributed=True,\n )\n if skip_trial_check():\n return\n\n tr = create_trial_fast_refresh(out_dir)\n assert len(tr.tensor_names()) == 8 + 1 * strategy.num_replicas_in_sync + 1\n assert len(tr.steps()) == 4\n assert len(tr.steps(ModeKeys.TRAIN)) == 2\n assert len(tr.steps(ModeKeys.EVAL)) == 2\n\n for tname in tr.tensor_names(collection=\"weights\"):\n for s in tr.tensor(tname).steps(ModeKeys.TRAIN):\n try:\n tr.tensor(tname).value(s, mode=ModeKeys.TRAIN)\n assert False\n except TensorUnavailableForStep:\n # for some tensors l1 reduction can't be saved due to improper dimensions for the reduction\n assert len(tr.tensor(tname).reduction_values(s, mode=ModeKeys.TRAIN)) >= 4\n\n for s in tr.tensor(tname).steps(ModeKeys.EVAL):\n try:\n tr.tensor(tname).value(s, mode=ModeKeys.EVAL)\n assert False\n except TensorUnavailableForStep:\n # for some tensors l1 reduction can't be saved due to improper dimensions for the reduction\n assert len(tr.tensor(tname).reduction_values(s, mode=ModeKeys.EVAL)) >= 4\n\n for tname in tr.tensor_names(collection=\"losses\"):\n for s in tr.tensor(tname).steps(ModeKeys.EVAL):\n assert len(tr.tensor(tname).reduction_values(s, mode=ModeKeys.EVAL)) == 0\n assert tr.tensor(tname).value(s, mode=ModeKeys.EVAL) is not None\n\n for tname in tr.tensor_names(collection=\"losses\"):\n for s in tr.tensor(tname).steps(ModeKeys.TRAIN):\n assert len(tr.tensor(tname).reduction_values(s, mode=ModeKeys.TRAIN)) == 0\n assert tr.tensor(tname).value(s, mode=ModeKeys.TRAIN) is not None\n\n\[email protected]\ndef test_save_all(out_dir):\n strategy = helper_mirrored(\n out_dir, steps=[\"train\"], num_steps=1, save_all=True, eval_distributed=True\n )\n if skip_trial_check():\n return\n tr = create_trial_fast_refresh(out_dir)\n assert len(tr.tensor_names()) > 100\n assert len(tr.steps())\n assert len(tr.tensor_names(collection=\"weights\"))\n assert len(tr.tensor_names(collection=\"biases\"))\n assert len(tr.tensor_names(collection=\"gradients\"))\n\n\[email protected]\ndef test_save_all_worker(out_dir):\n # skip test if no gpus available\n if get_available_gpus() == 0:\n return\n strategy = helper_mirrored(\n out_dir,\n steps=[\"train\"],\n num_steps=1,\n save_all=True,\n eval_distributed=True,\n include_workers=\"all\",\n )\n tr = create_trial_fast_refresh(out_dir)\n assert len(tr.steps())\n assert len(tr.workers()) == get_available_gpus()\n assert len(tr.tensor_names(collection=\"weights\"))\n assert \"conv2d/kernel:0\" in tr.tensor_names(collection=\"weights\")\n assert len(tr.tensor(\"conv2d/kernel:0\").workers(0)) == strategy.num_replicas_in_sync\n assert len(tr.tensor_names(collection=\"biases\"))\n assert \"conv2d/bias:0\" in tr.tensor_names(collection=\"biases\")\n assert len(tr.tensor(\"conv2d/bias:0\").workers(0)) == strategy.num_replicas_in_sync\n assert len(tr.tensor_names(collection=\"gradients\"))\n\n\[email protected]\ndef test_save_one_worker(out_dir):\n strategy = helper_mirrored(\n out_dir,\n steps=[\"train\"],\n num_steps=1,\n save_all=True,\n eval_distributed=True,\n include_workers=\"one\",\n )\n tr = create_trial_fast_refresh(out_dir)\n assert len(tr.workers()) == 1\n assert len(tr.steps())\n assert len(tr.tensor_names(collection=\"weights\"))\n assert len(tr.tensor_names(collection=\"biases\"))\n assert len(tr.tensor_names(collection=\"gradients\"))\n" ]
[ [ "tensorflow.layers.conv2d", "tensorflow.reshape", "numpy.asarray", "tensorflow.train.get_global_step", "tensorflow.nn.softmax", "tensorflow.contrib.learn.datasets.load_dataset", "tensorflow.losses.sparse_softmax_cross_entropy", "tensorflow.estimator.RunConfig", "tensorflow.contrib.distribute.MirroredStrategy", "tensorflow.python.client.device_lib.list_local_devices", "tensorflow.estimator.EstimatorSpec", "tensorflow.layers.max_pooling2d", "tensorflow.layers.dense", "tensorflow.layers.dropout", "tensorflow.estimator.Estimator", "tensorflow.metrics.accuracy", "tensorflow.train.GradientDescentOptimizer", "tensorflow.argmax", "tensorflow.data.Dataset.from_tensor_slices" ] ]
SSLAB-SSU/deep-learning-from-scratch
[ "3609360751d67085e0963ee6d7af6d49380cd965" ]
[ "ch07/simple_convnet.py" ]
[ "# coding: utf-8\nimport sys, os\nsys.path.append('/Users/hxxnhxx/Documents/development/deep-learning-from-scratch') # 親ディレクトリのファイルをインポートするための設定\nimport pickle\nimport numpy as np\nfrom collections import OrderedDict\nfrom common.layers import *\nfrom common.gradient import numerical_gradient\n\n\nclass SimpleConvNet:\n \"\"\"単純なConvNet\n\n conv - relu - pool - affine - relu - affine - softmax\n \n Parameters\n ----------\n input_size : 入力サイズ(MNISTの場合は784)\n hidden_size_list : 隠れ層のニューロンの数のリスト(e.g. [100, 100, 100])\n output_size : 出力サイズ(MNISTの場合は10)\n activation : 'relu' or 'sigmoid'\n weight_init_std : 重みの標準偏差を指定(e.g. 0.01)\n 'relu'または'he'を指定した場合は「Heの初期値」を設定\n 'sigmoid'または'xavier'を指定した場合は「Xavierの初期値」を設定\n \"\"\"\n def __init__(self, input_dim=(1, 28, 28), \n conv_param={'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1},\n hidden_size=100, output_size=10, weight_init_std=0.01):\n filter_num = conv_param['filter_num']\n filter_size = conv_param['filter_size']\n filter_pad = conv_param['pad']\n filter_stride = conv_param['stride']\n input_size = input_dim[1]\n conv_output_size = (input_size - filter_size + 2*filter_pad) / filter_stride + 1\n pool_output_size = int(filter_num * (conv_output_size/2) * (conv_output_size/2))\n\n # 重みの初期化\n self.params = {}\n self.params['W1'] = weight_init_std * \\\n np.random.randn(filter_num, input_dim[0], filter_size, filter_size)\n self.params['b1'] = np.zeros(filter_num)\n self.params['W2'] = weight_init_std * \\\n np.random.randn(pool_output_size, hidden_size)\n self.params['b2'] = np.zeros(hidden_size)\n self.params['W3'] = weight_init_std * \\\n np.random.randn(hidden_size, output_size)\n self.params['b3'] = np.zeros(output_size)\n\n # レイヤの生成\n self.layers = OrderedDict()\n self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'],\n conv_param['stride'], conv_param['pad'])\n self.layers['Relu1'] = Relu()\n self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)\n self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])\n self.layers['Relu2'] = Relu()\n self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])\n\n self.last_layer = SoftmaxWithLoss()\n\n def predict(self, x):\n for layer in self.layers.values():\n x = layer.forward(x)\n\n return x\n\n def loss(self, x, t):\n \"\"\"損失関数を求める\n 引数のxは入力データ、tは教師ラベル\n \"\"\"\n y = self.predict(x)\n return self.last_layer.forward(y, t)\n\n def accuracy(self, x, t, batch_size=100):\n if t.ndim != 1 : t = np.argmax(t, axis=1)\n \n acc = 0.0\n \n for i in range(int(x.shape[0] / batch_size)):\n tx = x[i*batch_size:(i+1)*batch_size]\n tt = t[i*batch_size:(i+1)*batch_size]\n y = self.predict(tx)\n y = np.argmax(y, axis=1)\n acc += np.sum(y == tt) \n \n return acc / x.shape[0]\n\n def numerical_gradient(self, x, t):\n \"\"\"勾配を求める(数値微分)\n\n Parameters\n ----------\n x : 入力データ\n t : 教師ラベル\n\n Returns\n -------\n 各層の勾配を持ったディクショナリ変数\n grads['W1']、grads['W2']、...は各層の重み\n grads['b1']、grads['b2']、...は各層のバイアス\n \"\"\"\n loss_w = lambda w: self.loss(x, t)\n\n grads = {}\n for idx in (1, 2, 3):\n grads['W' + str(idx)] = numerical_gradient(loss_w, self.params['W' + str(idx)])\n grads['b' + str(idx)] = numerical_gradient(loss_w, self.params['b' + str(idx)])\n\n return grads\n\n def gradient(self, x, t):\n \"\"\"勾配を求める(誤差逆伝搬法)\n\n Parameters\n ----------\n x : 入力データ\n t : 教師ラベル\n\n Returns\n -------\n 各層の勾配を持ったディクショナリ変数\n grads['W1']、grads['W2']、...は各層の重み\n grads['b1']、grads['b2']、...は各層のバイアス\n \"\"\"\n # forward\n self.loss(x, t)\n\n # backward\n dout = 1\n dout = self.last_layer.backward(dout)\n\n layers = list(self.layers.values())\n layers.reverse()\n for layer in layers:\n dout = layer.backward(dout)\n\n # 設定\n grads = {}\n grads['W1'], grads['b1'] = self.layers['Conv1'].dW, self.layers['Conv1'].db\n grads['W2'], grads['b2'] = self.layers['Affine1'].dW, self.layers['Affine1'].db\n grads['W3'], grads['b3'] = self.layers['Affine2'].dW, self.layers['Affine2'].db\n\n return grads\n \n def save_params(self, file_name=\"params.pkl\"):\n params = {}\n for key, val in self.params.items():\n params[key] = val\n with open(file_name, 'wb') as f:\n pickle.dump(params, f)\n\n def load_params(self, file_name=\"params.pkl\"):\n with open(file_name, 'rb') as f:\n params = pickle.load(f)\n for key, val in params.items():\n self.params[key] = val\n\n for i, key in enumerate(['Conv1', 'Affine1', 'Affine2']):\n self.layers[key].W = self.params['W' + str(i+1)]\n self.layers[key].b = self.params['b' + str(i+1)]" ]
[ [ "numpy.sum", "numpy.random.randn", "numpy.argmax", "numpy.zeros" ] ]
acpadua/pycelle
[ "8ec8c474c04e115635a76d360f5f4c9613b01705" ]
[ "analyse_images.py" ]
[ "import numpy as np\r\n\r\nfrom math import sqrt\r\nfrom skimage import data\r\nfrom skimage.feature import blob_dog, blob_log, blob_doh\r\nfrom skimage.color import rgb2gray\r\nfrom pandas import DataFrame\r\nimport pandas as pd\r\n\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nfrom os import path\r\nimport glob\r\nimport cv2\r\nimport time\r\nimport math\r\nimport json\r\n\r\ndef convert_to_grayscale(img, file_name, output_gray):\r\n \"\"\"\r\n This function converts the original image in a grayscale image. Then shows and saves the gray scale image.\r\n :param img: path to image of interest\r\n :param output_path: path for the output image\r\n \"\"\"\r\n # reads the image img in grayscale\r\n img_gray = cv2.imread(img,0)\r\n save_name = file_name + '_gray.jpg'\r\n\r\n cv2.imwrite(os.path.join(output_gray, save_name), img_gray)\r\n #cv2.namedWindow(file_name, cv2.WINDOW_NORMAL)\r\n #cv2.imshow(file_name, img_gray)\r\n #cv2.waitKey(2)\r\n #cv2.destroyAllWindows()\r\n print(\"converted to gray\")\r\n return img_gray\r\n\r\ndef cumsum_histogram_percentage(img, file_name, output_histograms):\r\n \"\"\"\r\n Reads the image 'img', gets the images' histogram and then the cumulative distribution.\r\n Next, normalises the cumulative distribution to be between 0 and 1. Finally, shows and saves it in a plot. \r\n :param img: path to the image of interest \r\n :param output_histograms: path where the cumulative distribution histogram will be saved. \r\n :return: cumulative distribution histogram, grayscale values, image name \r\n \"\"\"\r\n hist,bins = np.histogram(img.flatten(),256,[0,256])\r\n cdf = hist.cumsum()\r\n cdf_percentage = cdf * 1 / cdf.max()\r\n plt.plot(cdf_percentage, color = 'b')\r\n plt.xlim([0,256])\r\n plt.legend(('cdf'), loc = 'upper left')\r\n fig1=plt.gcf()\r\n #plt.show()\r\n print(img)\r\n name_fig1 = file_name + '_hist'\r\n print(name_fig1)\r\n fig1.savefig(os.path.join(output_histograms, name_fig1))\r\n print(\"histogram_done\")\r\n\r\n print(cdf_percentage)\r\n return cdf_percentage, bins\r\n\r\ndef remove_background(img, file_name, cdf_percentage, bins, output_thresh):\r\n \"\"\"\r\n Finds the value (n) of grayscale where the cumulative distribution of the image's histogram is 75 %.\r\n Then, applied a threshold (n) to the image, that converts all the values bellow n to 0 (black).\r\n :param image: path to image of interest\r\n :param file_name: name of the image of interest\r\n :param cdf_percentage: array with the values of the cumsum histogram of the image\r\n :param bins: grayscale values [0, 255]\r\n :param output_thresh_blur: path where the output image will be saved\r\n :return: image with threshold of 75 % applied\r\n \"\"\"\r\n #get array with all values > 75 % from cumsum\r\n third_quartil_cumsum = np.where(cdf_percentage > 0.75)[0]\r\n\r\n # get first value > 75 % from third quartil cumsum, which will be the position where to cut\r\n position_where_to_cut = third_quartil_cumsum[0]\r\n print(position_where_to_cut)\r\n\r\n # get gray value where to cut, which is the treshold\r\n threshold = bins[position_where_to_cut]\r\n print(threshold)\r\n\r\n ret, img_thresh_75 = cv2.threshold(img, threshold, 255, cv2.THRESH_TOZERO)\r\n save_name = file_name + '_thresh75.jpg'\r\n\r\n cv2.imwrite(os.path.join(output_thresh, save_name), img_thresh_75)\r\n #cv2.namedWindow(file_name, cv2.WINDOW_NORMAL)\r\n #cv2.imshow(save_name, img_thresh_75)\r\n #cv2.waitKey(2)\r\n #cv2.destroyAllWindows()\r\n print(\"converted to thresh75\")\r\n return img_thresh_75\r\n\r\ndef remove_white(img, file_name, output_background):\r\n \"\"\"\r\n This function converts the white pixels of the image in black pixels.\r\n \"\"\"\r\n white_px = 220\r\n black_px = 0\r\n\r\n (row, col) = img.shape\r\n img_array = np.array(img)\r\n\r\n for r in range(row):\r\n for c in range(col):\r\n px = img[r][c]\r\n if (px > white_px):\r\n img_array[r][c] = black_px\r\n\r\n print(\"end for cycle\")\r\n\r\n save_name = file_name + '_no_white.jpg'\r\n\r\n cv2.imwrite(os.path.join(output_background, save_name), img_array)\r\n cv2.namedWindow(save_name, cv2.WINDOW_NORMAL)\r\n cv2.imshow(save_name, img_array)\r\n cv2.waitKey(1)\r\n cv2.destroyAllWindows()\r\n\r\n\r\ndef get_droplets(img, file_name, parameters, output_circles):\r\n\r\n blobs_log1 = blob_log(img, min_sigma= parameters['min_sigma_value'], max_sigma= parameters['max_sigma_value'], num_sigma= parameters['num_sigma_value'], threshold= parameters['threshold_value'])\r\n\r\n blobs_log1[:, 2] = blobs_log1[:, 2] * sqrt(2)\r\n print(\"end of blob\")\r\n\r\n color = 'lime'\r\n\r\n file_name_parts = file_name.split(\"_\")\r\n\r\n image_description = []\r\n\r\n for part in file_name_parts:\r\n image_description.append(part)\r\n\r\n x_set = []\r\n y_set = []\r\n r_set = []\r\n\r\n droplets = []\r\n\r\n fig1, ax = plt.subplots(1, 1, figsize=(9, 3))\r\n\r\n for blob in blobs_log1:\r\n y, x, r = blob\r\n c = plt.Circle((x, y), r, color=color, linewidth=1, fill=False)\r\n ax.add_patch(c)\r\n\r\n x_set.append(x)\r\n y_set.append(y)\r\n r_set.append(r)\r\n ax.set_axis_off()\r\n\r\n ax.set_title(file_name)\r\n ax.imshow(img)\r\n\r\n plt.tight_layout()\r\n fig1 = plt.gcf()\r\n #plt.show(block=False)\r\n #time.sleep(0.5)\r\n #plt.close()\r\n\r\n\r\n save_name = file_name + '_min' + str(parameters['min_sigma_value']) + '_max' + str(parameters['max_sigma_value']) + '_num' + str(parameters['num_sigma_value']) + '_thresh' + str(parameters['threshold_value']) + \".svg\"\r\n fig1.savefig(os.path.join(output_circles, save_name), format='svg', dpi=1200)\r\n return x_set, y_set, r_set\r\n\r\ndef file_droplets(file_name, output_path, parameters, x_set, y_set, r_set):\r\n\r\n Droplets = {'min_sigma': parameters['min_sigma_value'], 'max_sigma': parameters['max_sigma_value'], 'num_sigma': parameters['num_sigma_value'], 'threshold': parameters['threshold_value'], 'x': x_set, 'y': y_set, 'r': r_set}\r\n\r\n df = DataFrame(Droplets, columns=['min_sigma', 'max_sigma', 'num_sigma', 'threshold', 'x', 'y', 'r'])\r\n\r\n export_csv = df.to_csv(output_path + '/files/' + file_name + '.csv', index=None, header=True) # Don't forget to add '.csv' at the end of the path\r\n\r\n print (df)\r\n\r\ndef analyse_droplets(files_path, output_files_analysis):\r\n\r\n voc =[]\r\n time = []\r\n label = []\r\n\r\n min_sigma = []\r\n max_sigma = []\r\n num_sigma = []\r\n threshold = []\r\n\r\n droplets_nr = []\r\n droplets_mean_radius = []\r\n optical_area = []\r\n\r\n droplets_nr_exp = []\r\n droplets_mean_radius_exp = []\r\n optical_area_exp = []\r\n\r\n for file in glob.glob(\"{}/*csv\".format(files_path)):\r\n df = pd.read_csv(file)\r\n name_file = os.path.basename(file)\r\n name_info = name_file.split('_')\r\n\r\n voc.append(name_info[0])\r\n time.append(name_info[1])\r\n label.append(name_info[2])\r\n\r\n r_set = ((df['r'] * 5000) / 1850)\r\n\r\n min_sigma.append(df['min_sigma'][0])\r\n max_sigma.append(df['max_sigma'][0])\r\n num_sigma.append(df['num_sigma'][0])\r\n threshold.append(df['threshold'][0])\r\n\r\n count_lines = df.shape[0]\r\n count_droplets = count_lines +1\r\n print(str(count_droplets))\r\n droplets_nr.append(count_droplets)\r\n droplets_mean_radius.append(r_set.mean())\r\n optical_area_sensor = 0\r\n\r\n for i in range(count_lines -1):\r\n optical_area_sensor += (np.pi * (df['r'][i] ** 2))\r\n optical_area_sensor_relative = (optical_area_sensor / (np.pi * (2500 ** 2)))\r\n optical_area.append(optical_area_sensor_relative)\r\n print(str(optical_area_sensor))\r\n\r\n\r\n\r\n Analysis = {'voc': voc, 'time': time, 'label': label, 'min_sigma': min_sigma, 'max_sigma': max_sigma, 'num_sigma': num_sigma,\r\n 'threshold': threshold, 'droplets_nr': droplets_nr, 'droplets_mean_radius': droplets_mean_radius, 'optical_area': optical_area}\r\n\r\n df = DataFrame(Analysis, columns=['voc', 'time', 'label', 'min_sigma', 'max_sigma', 'num_sigma', 'threshold', 'droplets_nr', 'droplets_mean_radius', 'optical_area'])\r\n\r\n save_name = 'droplets_analysis.csv'\r\n analysis_file = df.to_csv(os.path.join(output_files_analysis, save_name), index=None, header=True)\r\n return (analysis_file)\r\n\r\ndef sort_file(file, output_path):\r\n\r\n df = pd.read_csv(file)\r\n\r\n result = df.sort_values(['voc', 'time', 'label'], ascending=[1, 1, 1])\r\n result.to_csv(os.path.join(output_path, 'droplets_analysis_sorted.csv'))\r\n\r\n print(result)\r\n\r\ndef add_columns_validation(file_data, file_validation, output_path):\r\n df_a = pd.read_csv(file_data)\r\n df_e = pd.read_csv(file_validation)\r\n\r\n df_a['droplets_nr_exp'] = df_e['droplets_nr_exp']\r\n df_a['mean_diameter_exp'] = df_e['mean_diameter_exp']\r\n\r\n save_name = 'droplets_analysis_complete.csv'\r\n complete_file = df_a.to_csv(os.path.join(output_path, save_name))\r\n return complete_file\r\n\r\ndef compare_results(complete_file, output_path):\r\n\r\n df_c = pd.read_csv(complete_file)\r\n compare_nr = []\r\n compare_radius = []\r\n error = []\r\n\r\n for i, row in df_c.iterrows():\r\n sub_nr_i = (row['droplets_nr'] - row['droplets_nr_exp']) / row['droplets_nr_exp']\r\n compare_nr.append(sub_nr_i)\r\n print(sub_nr_i)\r\n\r\n mean_radius_i_a = (row['droplets_mean_radius'] * 5000) / 1400\r\n mean_radius_i_e = (row['mean_diameter_exp'] / 2)\r\n sub_radius_i = ((mean_radius_i_a - mean_radius_i_e) / mean_radius_i_e)\r\n print(sub_radius_i)\r\n compare_radius.append(sub_radius_i)\r\n\r\n error_i = math.sqrt((sub_radius_i ** 2) + (sub_radius_i ** 2))\r\n print(error_i)\r\n error.append(error_i)\r\n\r\n df_c['comparison_nr'] = compare_nr\r\n df_c['compare_raius'] = compare_radius\r\n df_c['error'] = error\r\n\r\n save_name = 'droplets_analysis_comparison.csv'\r\n complete_file = df_c.to_csv(os.path.join(output_path, save_name))\r\n return complete_file\r\n\r\ndef read_parameters(file_name):\r\n if(file_name):\r\n with open(file_name, 'r') as f:\r\n return json.load(f)\r\n\r\ndef main():\r\n #from pudb.remote import set_trace; set_trace(term_size=(160, 40), host='0.0.0.0', port=6900)\r\n input_dir = os.path.abspath('./data')\r\n output_dir = os.path.abspath('./data/output')\r\n if(path.exists(output_dir)):\r\n print(\"Output folder already exists. Please, remove output folders.\")\r\n return None\r\n #last_char = output_dir[:-1]\r\n #if(last_char.isdigit()):\r\n # nr = int(last_char) + 1\r\n # os.mkdir(os.path.abspath('/data/output' + '_' + str(nr)))\r\n # output_path = os.path.abspath('/data/output' +'_' + str(nr))\r\n #else:\r\n # nr = 2\r\n # os.mkdir(os.path.abspath('/data/output' + '_' + str(nr)))\r\n # output_path = os.path.abspath('/data/output' +'_' + str(nr))\r\n else:\r\n os.mkdir(output_dir)\r\n output_path = output_dir \r\n\r\n\r\n output_gray = os.path.join(output_path, 'gray/')\r\n os.mkdir(output_gray)\r\n\r\n output_thresh = os.path.join(output_path, 'thresh/')\r\n os.mkdir(output_thresh)\r\n output_files = os.path.join(output_path, 'files/')\r\n os.mkdir(output_files)\r\n output_files_analysis = os.path.join(output_path, 'files_analysis/')\r\n os.mkdir(output_files_analysis)\r\n\r\n output_histograms = os.path.join(output_path, 'histogram/')\r\n os.mkdir(output_histograms)\r\n\r\n output_circles = os.path.join(output_path, 'circles/')\r\n os.mkdir(output_circles)\r\n\r\n for img in glob.glob(\"{}/*jpg\".format(input_dir)):\r\n file_name = os.path.basename(img)[:-4]\r\n parameters = read_parameters(img[:-4] + '.json')\r\n\r\n print(file_name)\r\n img_gray = convert_to_grayscale(img, file_name, output_gray)\r\n cdf_percentage, bins = cumsum_histogram_percentage(img_gray, file_name, output_histograms) #confirm if histogram is well done\r\n img_thresh75 = remove_background(img_gray, file_name, cdf_percentage, bins, output_thresh)\r\n\r\n x_set, y_set, r_set = get_droplets(img_thresh75, file_name, parameters, output_circles)\r\n file_droplets(file_name, output_path, parameters, x_set, y_set, r_set)\r\n analyse_droplets(output_files, output_files_analysis)\r\n #analysis_file = 'droplets_analysis.csv'\r\n #sort_file(os.path.join(output_files_analysis, analysis_file), output_files_analysis)\r\n #analysis_file_sorted = 'droplets_analysis_sorted.csv'\r\n #validation_file = 'vocs_expected_sorted.csv'\r\n\r\n #complete_file = add_columns_validation(os.path.join(output_files_analysis, analysis_file_sorted), os.path.join(output_files_analysis, validation_file), output_files_analysis)\r\n #complete_file = os.path.join(output_files_analysis, 'droplets_analysis_complete.csv')\r\n #validation_file = os.path.join(output_files_analysis, validation_file)\r\n #compare_results(complete_file, output_files_analysis)\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n\r\n\r\n\r\n#for i in range(nr_images):\r\n" ]
[ [ "matplotlib.pyplot.legend", "pandas.read_csv", "matplotlib.pyplot.Circle", "matplotlib.pyplot.gcf", "matplotlib.pyplot.tight_layout", "pandas.DataFrame", "matplotlib.pyplot.subplots", "matplotlib.pyplot.xlim", "numpy.array", "matplotlib.pyplot.plot", "numpy.where" ] ]
FedeClaudi/fedes_utils
[ "2ef6f037303fc426d5c5b2851d2c99f17efa4002" ]
[ "fcutils/maths/coordinates.py" ]
[ "import numpy as np\n\n\ndef R(theta):\n \"\"\"\n Returns the rotation matrix for rotating an object\n centered around the origin with a given angle\n\n Arguments:\n theta: angle in degrees\n\n Returns:\n R: 2x2 np.ndarray with rotation matrix\n \"\"\"\n theta = np.radians(theta)\n return np.array(\n [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]\n )\n\n\ndef M(axis=\"x\"):\n \"\"\"\n Returns a matrix to mirror an object against a given axis\n\n Arguments:\n axis: str. 'x', 'y', 'origin' or 'xy'\n\n Returns:\n M: mirror matrix\n \"\"\"\n if axis == \"x\":\n return np.array([[1, 0], [0, -1]])\n elif axis == \"y\":\n return np.array([[-1, 0], [0, 1]])\n elif axis == \"origin\":\n return np.array([[-1, 0], [0, -1]])\n elif axis == \"xy\":\n return np.array([[0, 1], [1, 0]])\n else:\n raise NotImplementedError(\n f\"Could not recognize axis of mirroring: {axis}\"\n )\n\n\ndef cart2pol(x, y):\n \"\"\"\n Cartesian to polar coordinates\n\n angles in degrees\n \"\"\"\n rho = np.hypot(x, y)\n phi = np.degrees(np.arctan2(y, x))\n return rho, phi\n\n\ndef pol2cart(rho, phi):\n \"\"\"\n Polar to cartesian coordinates\n\n angles in degrees\n \"\"\"\n x = rho * np.cos(np.radians(phi))\n y = rho * np.sin(np.radians(phi))\n return x, y\n" ]
[ [ "numpy.arctan2", "numpy.hypot", "numpy.cos", "numpy.array", "numpy.sin", "numpy.radians" ] ]
algonommy/Analyzing-Apple-Stock-Data-to-Predict-Gain-Loss-leveraging-Machine-Learning-Models
[ "04d5b42d58f64248a45b62c7f7a0835c481c12fb" ]
[ "Dash-app/app.py" ]
[ "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport plotly.graph_objs as go\nfrom dash.dependencies import Input, Output\nimport datetime as dt\nimport pandas_datareader as web\n\n\n\napp = dash.Dash()\nserver = app.server\n\nstart = dt.datetime(2000,1,1)\nend = dt.datetime.now()\ndf = web.DataReader('AAPL','yahoo', start, end)\ndf=df.reset_index()\n\ndf[\"Date\"]=pd.to_datetime(df.Date,format=\"%Y-%m-%d\")\ndf.index=df['Date']\n\ndata=df.sort_index(ascending=True,axis=0)\nnew_data=pd.DataFrame(index=range(0,len(df)),columns=['Date','Close'])\n\nfor i in range(0,len(data)):\n new_data[\"Date\"][i]=data['Date'][i]\n new_data[\"Close\"][i]=data[\"Close\"][i]\n \nnew_data=new_data.set_index('Date')\ndataset=new_data.values\n\n\ntickers = ['TSLA','AAPL','FB','MSFT','SBUX']\ndf1 = web.DataReader(tickers, data_source='yahoo', start='2017-01-01', end=dt.datetime.now())\ndf=df1.stack().reset_index().rename(index=str, columns={\"level_1\": \"Symbols\"}).sort_values(['Symbols','Date'])\ndf[\"Date\"]=pd.to_datetime(df.Date,format=\"%Y-%m-%d\")\ndf.index=df['Date']\n\n\nD_validationData= pd.read_csv(\"LSTM_validation.csv\")\nD_train_data= pd.read_csv(\"LSTM_train.csv\")\n\n\nfig2 = go.Figure()\nfig2.add_trace(go.Scatter(x=D_validationData[\"Date\"], y=D_validationData[\"Close\"],\n mode='lines',name='Validation',line=dict(color=\"blue\",width=4)))\nfig2.add_trace(go.Scatter(x=D_validationData[\"Date\"], y=D_validationData[\"Predictions\"],\n mode='lines',name='Stock Price Predicted ',line=dict(color=\"red\",width=4)))\nfig2.add_trace(go.Scatter(x=D_train_data[\"Date\"], y=D_train_data[\"Close\"],\n mode='lines', name='Train',line=dict(color=\"darkblue\",width=4)))\n\n\nfig2.update_layout(hovermode='x unified',\n showlegend=True,\n plot_bgcolor=\"white\",\n paper_bgcolor = \"rgba(0,0,0,0)\",\n xaxis_title=\"Date\",\n yaxis_title=\"Closing Rate\",\n legend_title=\"Data:\",\n margin=dict(t=50,l=200,b=50,r=200),\n \n)\n\nfig2.update_xaxes(showline=True, linewidth=2, linecolor='white', gridcolor='lightgray')\nfig2.update_yaxes(showline=True, linewidth=2, linecolor='white', gridcolor='lightgray')\n\n\nmoving_avg= pd.read_csv(\"test_mov_avg.csv\")\n\n\nfig3 = go.Figure()\nfig3.add_trace(go.Scatter(x=moving_avg[\"date\"], y=moving_avg[\"close\"],\n mode='lines',\n name='Test',\n line=dict(color=\"darkblue\",width=3)))\nfig3.add_trace(go.Scatter(x=moving_avg[\"date\"], y=moving_avg[\"est_N2\"],\n mode='lines',\n name='Stock Price Predicted',\n line=dict(color=\"red\",width=3)))\n\nfig3.update_layout(hovermode='x unified',\n showlegend=True,\n plot_bgcolor=\"white\",\n paper_bgcolor = \"rgba(0,0,0,0)\",\n xaxis_title=\"Date\",\n yaxis_title=\"Closing Rate\",\n legend_title=\"Data:\",\n margin=dict(t=50,l=200,b=50,r=200),\n \n)\nfig3.update_xaxes(showline=True, linewidth=1, linecolor='white', gridcolor='lightgray')\nfig3.update_yaxes(showline=True, linewidth=1, linecolor='white', gridcolor='lightgray')\n\n# Import CSV tree training data\nTree_training= pd.read_csv(\"Tree_training_data.csv\")\nTree_prediction= pd.read_csv(\"Tree_model_prediction.csv\")\n\n\nfigt = go.Figure()\nfigt.add_trace(go.Scatter(x=Tree_training[\"Date\"], y=Tree_training[\"Close\"],\n mode='lines',\n name='Training ',\n line=dict(color=\"darkblue\",width=3)))\nfigt.add_trace(go.Scatter(x=Tree_prediction[\"Date\"], y=Tree_prediction[\"Close\"],\n mode='lines',\n name='Validation',\n line=dict(color=\"blue\",width=4)))\nfigt.add_trace(go.Scatter(x=Tree_prediction[\"Date\"], y=Tree_prediction[\"Predictions\"],\n mode='lines', name='Stock Price Predicted',\n line=dict(color=\"red\",width=2)))\n\n\nfigt.update_layout(hovermode='x unified',\n showlegend=True,\n plot_bgcolor=\"white\",\n paper_bgcolor = \"rgba(0,0,0,0)\",\n xaxis_title=\"Date\",\n yaxis_title=\"Closing Rate\",\n legend_title=\"Data:\",\n margin=dict(t=50,l=200,b=50,r=200),\n \n)\nfigt.update_xaxes(showline=True, linewidth=2, linecolor='white', gridcolor='lightgray')\nfigt.update_yaxes(showline=True, linewidth=2, linecolor='white', gridcolor='lightgray')\n\n\n# Linear Regression Model data\nLR_train= pd.read_csv(\"LR_train.csv\")\nLR_prediction= pd.read_csv(\"LR_prediction.csv\")\n\n# Create figure lines for Linear Regression Model\nfigLR = go.Figure()\nfigLR.add_trace(go.Scatter(x=LR_train[\"Date\"], y=LR_train[\"Close\"],\n mode='lines',\n name='Training ',\n line=dict(color=\"darkblue\",width=3)))\nfigLR.add_trace(go.Scatter(x=LR_prediction[\"Date\"], y=LR_prediction[\"Close\"],\n mode='lines',\n name='Validation',\n line=dict(color=\"blue\",width=3)))\nfigLR.add_trace(go.Scatter(x=LR_prediction[\"Date\"], y=LR_prediction[\"Predictionslr\"],\n mode='lines', name='Stock Price Predicted',\n line=dict(color=\"red\",width=3)))\n\n\n\nfigLR.update_layout(hovermode='x unified',\n showlegend=True,\n plot_bgcolor=\"white\",\n paper_bgcolor = \"rgba(0,0,0,0)\",\n xaxis_title=\"Date\",\n yaxis_title=\"Closing Rate\",\n legend_title=\"Data:\",\n margin=dict(t=50,l=200,b=50,r=200),\n \n)\n\nfigLR.update_xaxes(showline=True, linewidth=2, linecolor='white', gridcolor='lightgray')\nfigLR.update_yaxes(showline=True, linewidth=2, linecolor='white', gridcolor='lightgray')\n\n\n# Getting Info for all Models comparison\nl30=dt.datetime.now()- dt.timedelta(30)\nm_actual = web.DataReader('AAPL','yahoo', l30, dt.datetime.now())\nm_actual=m_actual.reset_index()\n\n# COMPLETE code to get models table\nl30=dt.datetime.now()- dt.timedelta(30)\n\n# Actual\nm_actual_df = web.DataReader('AAPL','yahoo', l30, dt.datetime.now())\nm_actual_df=m_actual_df.reset_index()\nm_actual=m_actual_df[['Date','Close']]\nm_actual[\"Model\"]=\"Actual Close Price\"\nm_actual.rename(columns={'Close':'Predictions'}, inplace=True)\n\n# LR\nm_LR=LR_prediction[['Date','Predictionslr']]\nm_LR[\"Model\"]=\"Linear Regression Model\"\nm_LR.rename(columns={'Predictionslr':'Predictions'}, inplace=True)\n\n\n# Tree Prediction\nm_tree=Tree_prediction[['Date','Predictions']]\nm_tree[\"Model\"]=\"Tree Model\"\n\n# Moving Average\nm_MA=moving_avg[['date','est_N2']]\nm_MA[\"Model\"]=\"Moving Average Model\"\nm_MA.rename(columns={'est_N2':'Predictions','date':\"Date\"}, inplace=True)\nm_MA[\"Date\"]=pd.to_datetime(m_MA.Date,format=\"%Y-%m-%d\")\nm_MA1 = m_MA[(m_MA['Date']>(dt.datetime.now()- dt.timedelta(30))) & (m_MA['Date']<dt.datetime.now())] \n\n# Long short-term memory\nD_validationData[\"Date\"]=pd.to_datetime(D_validationData.Date,format=\"%Y-%m-%d\")\nm_LSTM=D_validationData[['Date','Predictions']]\nm_LSTM[\"Model\"]=\"Long Short-Term Memory\"\nm_LSTM1 = m_LSTM[(m_LSTM['Date']>(dt.datetime.now()- dt.timedelta(30))) & (m_LSTM['Date']<dt.datetime.now())] \n\n\n# Model table\nframes=[m_tree,m_actual,m_LR,m_MA1,m_LSTM1]\nmodels=pd.concat(frames)\nmodels\n\n# HTML code to render results- Layout formating\n\napp.layout = html.Div([\n \n html.H1(\"Apple Stock Price Prediction- Machine Learning & Python\", style={'textAlign': 'center','color':'#07098d'}),\n html.H2(\"\", style={'textAlign': 'center','color':'#07098d'}),\n dcc.Tabs(id=\"tabs\", children=[\n \n dcc.Tab(label='LSTM',children=[\n\t\t\thtml.Div([\t\t\t\t\n\t\t\t html.H2(\"Long Short-Term Memory (LSTM)\", \n style={'textAlign': 'center'}),\n html.H3(\"On this LSTM Model 75% of the data was trained and 25% was tested to predict Apple stock price using the past 60 days closing price.\", \n style={'textAlign': 'left'}),\n html.H3(\"The data was taken from Yahoo from 2010-01-04 to 2021-03-16. The LSTM-rsme is 24.37\", \n style={'textAlign': 'left'}),\n html.H3(\"The predicted price for the March 17th = USS 123.5\", \n style={'textAlign': 'left'}),\n html.H3(\"The predicted price for the March 18th = USS 128.5 \", \n style={'textAlign': 'left'}),\n\n\n dcc.Graph(id = 'GrapLTSM',\n figure = fig2),\n ]\n ), \t\t\n ]),\n\n\n dcc.Tab(label='Moving Average',children=[\n\t\t\thtml.Div([\t\t\t\t\n\t\t\t html.H2(\"Moving Average to predict Apple Stock Price\", \n style={'textAlign': 'center'}),\n html.H3(\"The moving average is a simple technical analysis tool that smooths out price data by creating a constantly updating average.\", \n style={'textAlign': 'left'}),\n\n dcc.Graph(id = 'GrapMovingAvg',\n figure = fig3),\n ]\n ),\n ]),\n\n dcc.Tab(label='Tree Model and Linear Regression',children=[\n\t\t\thtml.Div([\t\t\t\t\n\t\t\t html.H2(\"Apple Sock Price Prediction For the Last 30 Days - Tree Prediction Model\", \n style={'textAlign': 'center'}),\n # html.H3(\"Tree Model and Linear Regression Stock Prediction Tree Model and Linear Regression Stock Prediction Tree Model and Linear Regression Stock Prediction\", \n # style={'textAlign': 'left'}),\n\n dcc.Graph(id = 'GrapTreeLR',\n figure = figt),\n \n\n html.H2(\"Apple Stock Price For The Last 30 Days - Linear Regression Model\", \n style={'textAlign': 'center'}),\n # html.H3(\"Tree Model and Linear Regression Stock Prediction Tree Model and Linear Regression Stock Prediction Tree Model and Linear Regression Stock Prediction\", \n # style={'textAlign': 'left'}),\n\n dcc.Graph(id = 'GrapLR',\n figure = figLR),\n ],className=\"container\"),\n \n ]),\n\n dcc.Tab(label='Model Comparison', children=[\n html.Div([\n html.H2(\"Select Model to compare\", \n style={'textAlign': 'center','color':'#07098d'}),\n \n dcc.Dropdown(id='my-dropdownM',\n options=[{'label': 'Long Short-Term Memory', 'value': 'Long Short-Term Memory'},\n {'label': 'Moving Average','value': 'Moving Average Model'},\n {'label': 'Tree Model','value': 'Tree Model'}, \n {'label': 'Actual Close Price','value' :'Actual Close Price'},\n {'label': 'Linear Regression', 'value': 'Linear Regression Model'}], \n multi=True,value=['Actual Close Price'],\n style={\"display\": \"block\", \"margin-left\": \"auto\", \n \"margin-right\": \"auto\", \"width\": \"60%\"}),\n dcc.Graph(id='models'),\n \n ], className=\"container\"),\n ]),\n\n\n\n dcc.Tab(label='Stock Data other Companies', children=[\n html.Div([\n html.H2(\"Stocks Price comparison High and Lows\", \n style={'textAlign': 'center','color':'#07098d'}),\n \n dcc.Dropdown(id='my-dropdown',\n options=[{'label': 'Tesla', 'value': 'TSLA'},\n {'label': 'Starbucks','value': 'SBUX'},\n {'label': 'Apple','value': 'AAPL'}, \n {'label': 'Facebook', 'value': 'FB'}, \n {'label': 'Microsoft','value': 'MSFT'}], \n multi=True,value=['SBUX'],\n style={\"display\": \"block\", \"margin-left\": \"auto\", \n \"margin-right\": \"auto\", \"width\": \"60%\"}),\n dcc.Graph(id='highlow'),\n\n html.H2(\"Stocks Market Volume\", style={'textAlign': 'center'}),\n \n dcc.Dropdown(id='my-dropdown2',\n options=[{'label': 'Tesla', 'value': 'SBUX'},\n {'label': 'Starbucks', 'value': 'SBUX'},\n {'label': 'Apple','value': 'AAPL'}, \n {'label': 'Facebook', 'value': 'FB'},\n {'label': 'Microsoft','value': 'MSFT'}], \n multi=True,value=['SBUX'],\n style={\"display\": \"block\", \"margin-left\": \"auto\", \n \"margin-right\": \"auto\", \"width\": \"60%\"}),\n dcc.Graph(id='volume')\n ], className=\"container\"),\n ])\n ])\n])\n\n\n\[email protected](Output('highlow', 'figure'),\n [Input('my-dropdown', 'value')])\ndef update_graph(selected_dropdown):\n dropdown = {\"TSLA\": \"Tesla\",\"SBUX\":\"Starbucks\",\"AAPL\": \"Apple\",\"FB\": \"Facebook\",\"MSFT\": \"Microsoft\",}\n trace1 = []\n trace2 = []\n for stock in selected_dropdown:\n trace1.append(\n go.Scatter(x=df[df[\"Symbols\"] == stock][\"Date\"],\n y=df[df[\"Symbols\"] == stock][\"High\"],\n mode='lines', opacity=0.7, \n name=f'High {dropdown[stock]}',textposition='bottom center'))\n trace2.append(\n go.Scatter(x=df[df[\"Symbols\"] == stock][\"Date\"],\n y=df[df[\"Symbols\"] == stock][\"Low\"],\n mode='lines', opacity=0.6,\n name=f'Low {dropdown[stock]}',textposition='bottom center'))\n traces = [trace1, trace2]\n data = [val for sublist in traces for val in sublist]\n figure = {'data': data,\n 'layout': go.Layout(colorway=[\"#5E0DAC\", '#FF4F00', '#375CB1', \n '#FF7400', '#FFF400', '#FF0056'],\n height=600,\n title=f\"High and Low Prices for {', '.join(str(dropdown[i]) for i in selected_dropdown)} Over Time\",\n xaxis={\"title\":\"Date\",\n 'rangeselector': {'buttons': list([{'count': 1, 'label': '1M', \n 'step': 'month', \n 'stepmode': 'backward'},\n {'count': 6, 'label': '6M', \n 'step': 'month', \n 'stepmode': 'backward'},\n {'step': 'all'}])},\n 'rangeslider': {'visible': True}, 'type': 'date'},\n yaxis={\"title\":\"Price (USD)\"})}\n return figure\n\n\[email protected](Output('volume', 'figure'),\n [Input('my-dropdown2', 'value')])\ndef update_graph(selected_dropdown_value):\n dropdown = {\"TSLA\": \"Tesla\",\"AAPL\": \"Apple\",\"FB\": \"Facebook\",\"SBUX\":\"Starbucks\",\"MSFT\": \"Microsoft\",}\n trace1 = []\n for stock in selected_dropdown_value:\n trace1.append(\n go.Scatter(x=df[df[\"Symbols\"] == stock][\"Date\"],\n y=df[df[\"Symbols\"] == stock][\"Volume\"],\n mode='lines', opacity=0.7,\n name=f'Volume {dropdown[stock]}', textposition='bottom center'))\n traces = [trace1]\n data = [val for sublist in traces for val in sublist]\n figure = {'data': data, \n 'layout': go.Layout(colorway=[\"#5E0DAC\", '#FF4F00', '#375CB1', \n '#FF7400', '#FFF400', '#FF0056'],\n height=600,\n title=f\"Market Volume for {', '.join(str(dropdown[i]) for i in selected_dropdown_value)} Over Time\",\n xaxis={\"title\":\"Date\",\n 'rangeselector': {'buttons': list([{'count': 1, 'label': '1M', \n 'step': 'month', \n 'stepmode': 'backward'},\n {'count': 6, 'label': '6M',\n 'step': 'month', \n 'stepmode': 'backward'},\n {'step': 'all'}])},\n 'rangeslider': {'visible': True}, 'type': 'date'},\n yaxis={\"title\":\"Transactions Volume\"})}\n return figure\n\[email protected](Output('models', 'figure'),\n [Input('my-dropdownM', 'value')])\n\ndef update_graph(mydropdownM):\n dropdown = {\"Actual Close Price\":\"Actual Close Price\",\"Long Short-Term Memory\": \"Long Short-Term Memory\",\"Moving Average Model\":\"Moving Average\",\"Tree Model\": \"Tree Model\",\"Linear Regression Model\": \"Linear Regression\",}\n trace1=[] \n for model in mydropdownM: \n trace1.append(\n go.Scatter(x=models[models[\"Model\"] == model][\"Date\"],\n y=models[models[\"Model\"] == model][\"Predictions\"],\n mode='lines', opacity=0.7, \n name=f' {dropdown[model]}',textposition='bottom center'))\n traces = [trace1] \n data = [val for sublist in traces for val in sublist]\n figure = {'data': data,\n 'layout': go.Layout(colorway=[\"#5E0DAC\", '#FF4F00', '#375CB1', \n '#FF7400', '#FFF400', '#FF0056'],\n height=400,\n title=f\"ML Models: {', '.join(str(dropdown[i]) for i in mydropdownM)} \",\n xaxis={\"title\":\"Date\",\n 'rangeselector': {'buttons': list([{'count': 1, 'label': '1M', \n 'step': 'month', \n 'stepmode': 'backward'},\n {'count': 6, 'label': '6M', \n 'step': 'month', \n 'stepmode': 'backward'},\n {'step': 'all'}])},\n 'rangeslider': {'visible': False}, 'type': 'date'},\n yaxis={\"title\":\"Price (USD)\"})}\n return figure\n\n\nif __name__=='__main__':\n\tapp.run_server(debug=True)" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "pandas.concat" ] ]
mrmotallebi/pytorch_geometric
[ "5d768659d2a54544219c057ad05172ca55b43119" ]
[ "torch_geometric/nn/pool/asap.py" ]
[ "from typing import Union, Optional, Callable\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn import Linear\nfrom torch_scatter import scatter\nfrom torch_sparse import SparseTensor\n\nfrom torch_geometric.nn import LEConv\nfrom torch_geometric.utils import softmax\nfrom torch_geometric.nn.pool.topk_pool import topk\nfrom torch_geometric.utils import add_remaining_self_loops\n\n\nclass ASAPooling(torch.nn.Module):\n r\"\"\"The Adaptive Structure Aware Pooling operator from the\n `\"ASAP: Adaptive Structure Aware Pooling for Learning Hierarchical\n Graph Representations\" <https://arxiv.org/abs/1911.07979>`_ paper.\n\n Args:\n in_channels (int): Size of each input sample.\n ratio (float or int): Graph pooling ratio, which is used to compute\n :math:`k = \\lceil \\mathrm{ratio} \\cdot N \\rceil`, or the value\n of :math:`k` itself, depending on whether the type of :obj:`ratio`\n is :obj:`float` or :obj:`int`. (default: :obj:`0.5`)\n GNN (torch.nn.Module, optional): A graph neural network layer for\n using intra-cluster properties.\n Especially helpful for graphs with higher degree of neighborhood\n (one of :class:`torch_geometric.nn.conv.GraphConv`,\n :class:`torch_geometric.nn.conv.GCNConv` or\n any GNN which supports the :obj:`edge_weight` parameter).\n (default: :obj:`None`)\n dropout (float, optional): Dropout probability of the normalized\n attention coefficients which exposes each node to a stochastically\n sampled neighborhood during training. (default: :obj:`0`)\n negative_slope (float, optional): LeakyReLU angle of the negative\n slope. (default: :obj:`0.2`)\n add_self_loops (bool, optional): If set to :obj:`True`, will add self\n loops to the new graph connectivity. (default: :obj:`False`)\n **kwargs (optional): Additional parameters for initializing the\n graph neural network layer.\n \"\"\"\n def __init__(self, in_channels: int, ratio: Union[float, int] = 0.5,\n GNN: Optional[Callable] = None, dropout: float = 0.0,\n negative_slope: float = 0.2, add_self_loops: bool = False,\n **kwargs):\n super(ASAPooling, self).__init__()\n\n self.in_channels = in_channels\n self.ratio = ratio\n self.negative_slope = negative_slope\n self.dropout = dropout\n self.GNN = GNN\n self.add_self_loops = add_self_loops\n\n self.lin = Linear(in_channels, in_channels)\n self.att = Linear(2 * in_channels, 1)\n self.gnn_score = LEConv(self.in_channels, 1)\n if self.GNN is not None:\n self.gnn_intra_cluster = GNN(self.in_channels, self.in_channels,\n **kwargs)\n self.reset_parameters()\n\n def reset_parameters(self):\n self.lin.reset_parameters()\n self.att.reset_parameters()\n self.gnn_score.reset_parameters()\n if self.GNN is not None:\n self.gnn_intra_cluster.reset_parameters()\n\n def forward(self, x, edge_index, edge_weight=None, batch=None):\n N = x.size(0)\n\n edge_index, edge_weight = add_remaining_self_loops(\n edge_index, edge_weight, fill_value=1, num_nodes=N)\n\n if batch is None:\n batch = edge_index.new_zeros(x.size(0))\n\n x = x.unsqueeze(-1) if x.dim() == 1 else x\n\n x_pool = x\n if self.GNN is not None:\n x_pool = self.gnn_intra_cluster(x=x, edge_index=edge_index,\n edge_weight=edge_weight)\n\n x_pool_j = x_pool[edge_index[0]]\n x_q = scatter(x_pool_j, edge_index[1], dim=0, reduce='max')\n x_q = self.lin(x_q)[edge_index[1]]\n\n score = self.att(torch.cat([x_q, x_pool_j], dim=-1)).view(-1)\n score = F.leaky_relu(score, self.negative_slope)\n score = softmax(score, edge_index[1], num_nodes=N)\n\n # Sample attention coefficients stochastically.\n score = F.dropout(score, p=self.dropout, training=self.training)\n\n v_j = x[edge_index[0]] * score.view(-1, 1)\n x = scatter(v_j, edge_index[1], dim=0, reduce='add')\n\n # Cluster selection.\n fitness = self.gnn_score(x, edge_index).sigmoid().view(-1)\n perm = topk(fitness, self.ratio, batch)\n x = x[perm] * fitness[perm].view(-1, 1)\n batch = batch[perm]\n\n # Graph coarsening.\n row, col = edge_index\n A = SparseTensor(row=row, col=col, value=edge_weight,\n sparse_sizes=(N, N))\n S = SparseTensor(row=row, col=col, value=score, sparse_sizes=(N, N))\n S = S[:, perm]\n\n A = S.t() @ A @ S\n\n if self.add_self_loops:\n A = A.fill_diag(1.)\n else:\n A = A.remove_diag()\n\n row, col, edge_weight = A.coo()\n edge_index = torch.stack([row, col], dim=0)\n\n return x, edge_index, edge_weight, batch, perm\n\n def __repr__(self):\n return '{}({}, ratio={})'.format(self.__class__.__name__,\n self.in_channels, self.ratio)\n" ]
[ [ "torch.stack", "torch.nn.Linear", "torch.nn.functional.dropout", "torch.nn.functional.leaky_relu", "torch.cat" ] ]
jameshgrn/sliderule-python
[ "47fad1465358956a876c9680dd55e535ab5bdcb7" ]
[ "sliderule/ipysliderule.py" ]
[ "# Copyright (c) 2021, University of Washington\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the University of Washington nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF WASHINGTON AND CONTRIBUTORS\n# “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\n# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF WASHINGTON OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\n# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport os\nimport sys\nimport copy\nimport datetime\nimport numpy as np\nfrom traitlets.utils.bunch import Bunch\nimport sliderule.io\n\n# imports with warnings if not present\ntry:\n import ipywidgets\nexcept ModuleNotFoundError as e:\n sys.stderr.write(\"Warning: missing packages, some functions will throw an exception if called. (%s)\\n\" % (str(e)))\ntry:\n import tkinter.filedialog\nexcept ModuleNotFoundError as e:\n sys.stderr.write(\"Warning: missing packages, some functions will throw an exception if called. (%s)\\n\" % (str(e)))\ntry:\n import IPython.display\nexcept ModuleNotFoundError as e:\n sys.stderr.write(\"Warning: missing packages, some functions will throw an exception if called. (%s)\\n\" % (str(e)))\n\n# imports that raise error if not present\ntry:\n import ipyleaflet\nexcept ModuleNotFoundError as e:\n sys.stderr.write(\"Error: missing required packages. (%s)\\n\" % (str(e)))\n raise\n\ntry:\n import xyzservices\nexcept ModuleNotFoundError as e:\n sys.stderr.write(\"Error: missing required packages. (%s)\\n\" % (str(e)))\n raise\n\nclass widgets:\n def __init__(self):\n # dropdown menu for setting asset\n self.asset = ipywidgets.Dropdown(\n options=['atlas-local', 'atlas-s3', 'nsidc-s3'],\n value='nsidc-s3',\n description='Asset:',\n disabled=False,\n )\n\n # dropdown menu for setting data release\n self.release = ipywidgets.Dropdown(\n options=['003', '004'],\n value='004',\n description='Release:',\n disabled=False,\n )\n\n # dropdown menu for setting surface type\n # 0-land, 1-ocean, 2-sea ice, 3-land ice, 4-inland water\n surface_type_options = [\n 'Land',\n 'Ocean',\n 'Sea ice',\n 'Land ice',\n 'Inland water'\n ]\n self.surface_type = ipywidgets.Dropdown(\n options=surface_type_options,\n value='Land',\n description='Surface Type:',\n disabled=False,\n )\n\n # slider for setting length of ATL06-SR segment in meters\n self.length = ipywidgets.IntSlider(\n value=40,\n min=5,\n max=200,\n step=5,\n description='Length:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='d'\n )\n\n # slider for setting step distance for successive segments in meters\n self.step = ipywidgets.IntSlider(\n value=20,\n min=5,\n max=200,\n step=5,\n description='Step:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='d'\n )\n\n # slider for setting confidence level for PE selection\n # eventually would be good to switch this to a IntRangeSlider with value=[0,4]\n self.confidence = ipywidgets.IntSlider(\n value=4,\n min=0,\n max=4,\n step=1,\n description='Confidence:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='d'\n )\n\n # selection for land surface classifications\n land_options = [\n 'atl08_noise',\n 'atl08_ground',\n 'atl08_canopy',\n 'atl08_top_of_canopy',\n 'atl08_unclassified'\n ]\n self.land_class = ipywidgets.SelectMultiple(\n options=land_options,\n description='Land Class:',\n disabled=False\n )\n\n # slider for setting maximum number of iterations\n # (not including initial least-squares-fit selection)\n self.iteration = ipywidgets.IntSlider(\n value=1,\n min=0,\n max=20,\n step=1,\n description='Iterations:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='d'\n )\n\n # slider for setting minimum along track spread\n self.spread = ipywidgets.FloatSlider(\n value=20,\n min=1,\n max=100,\n step=0.1,\n description='Spread:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='0.1f'\n )\n # slider for setting minimum photon event (PE) count\n self.count = ipywidgets.IntSlider(\n value=10,\n min=1,\n max=50,\n step=1,\n description='PE Count:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='d'\n )\n\n # slider for setting minimum height of PE window in meters\n self.window = ipywidgets.FloatSlider(\n value=3,\n min=0.5,\n max=10,\n step=0.1,\n description='Window:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='0.1f'\n )\n\n # slider for setting maximum robust dispersion in meters\n self.sigma = ipywidgets.FloatSlider(\n value=5,\n min=1,\n max=10,\n step=0.1,\n description='Sigma:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='0.1f'\n )\n\n # dropdown menu for setting map projection for polygons\n # Global: Web Mercator (EPSG:3857)\n # North: Alaska Polar Stereographic (EPSG:5936)\n # South: Polar Stereographic South (EPSG:3031)\n projection_list = ['Global','North','South']\n self.projection = ipywidgets.Dropdown(\n options=projection_list,\n value='Global',\n description='Projection:',\n disabled=False,\n )\n\n # button and label for output file selection\n self.file = copy.copy(self.filename)\n self.savebutton = ipywidgets.Button(\n description=\"Save As\"\n )\n self.savelabel = ipywidgets.Text(\n value=self.file,\n disabled=False\n )\n # connect fileselect button with action\n self.savebutton.on_click(self.saveas_file)\n self.savelabel.observe(self.set_savefile)\n # create hbox of file selection\n if os.environ.get(\"DISPLAY\"):\n self.filesaver = ipywidgets.HBox([\n self.savebutton,\n self.savelabel\n ])\n else:\n self.filesaver = copy.copy(self.savelabel)\n\n # button and label for input file selection\n self.loadbutton = ipywidgets.Button(\n description=\"File select\"\n )\n self.loadlabel = ipywidgets.Text(\n value='',\n disabled=False\n )\n # connect fileselect button with action\n self.loadbutton.on_click(self.select_file)\n self.loadlabel.observe(self.set_loadfile)\n # create hbox of file selection\n if os.environ.get(\"DISPLAY\"):\n self.fileloader = ipywidgets.HBox([\n self.loadbutton,\n self.loadlabel\n ])\n else:\n self.fileloader = copy.copy(self.loadlabel)\n\n def saveas_file(self, b):\n \"\"\"function for file save\n \"\"\"\n IPython.display.clear_output()\n root = tkinter.Tk()\n root.withdraw()\n root.call('wm', 'attributes', '.', '-topmost', True)\n filetypes = ((\"HDF5 file\", \"*.h5\"),\n (\"netCDF file\", \"*.nc\"),\n (\"All Files\", \"*.*\"))\n b.files = tkinter.filedialog.asksaveasfilename(\n initialfile=self.file,\n defaultextension='h5',\n filetypes=filetypes)\n self.savelabel.value = b.files\n self.file = b.files\n return self\n\n def set_savefile(self, sender):\n self.file = self.savelabel.value\n\n def select_file(self, b):\n \"\"\"function for file selection\n \"\"\"\n IPython.display.clear_output()\n root = tkinter.Tk()\n root.withdraw()\n root.call('wm', 'attributes', '.', '-topmost', True)\n filetypes = ((\"HDF5 file\", \"*.h5\"),\n (\"netCDF file\", \"*.nc\"),\n (\"All Files\", \"*.*\"))\n b.files = tkinter.filedialog.askopenfilename(\n defaultextension='h5',\n filetypes=filetypes,\n multiple=False)\n self.loadlabel.value = b.files\n self.file = b.files\n return self\n\n def set_loadfile(self, sender):\n self.file = self.loadlabel.value\n\n @property\n def filename(self):\n \"\"\"default input and output file string\n \"\"\"\n # get sliderule submission time\n now = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\n args = (now, self.release.value)\n return \"ATL06-SR_{0}_{1}.h5\".format(*args)\n\n @property\n def format(self):\n \"\"\"return the file format from file string\n \"\"\"\n hdf = ('h5','hdf5','hdf')\n netcdf = ('nc','netcdf','nc3')\n if self.file.endswith(hdf):\n return 'hdf'\n elif self.file.endswith(netcdf):\n return 'netcdf'\n else:\n return ''\n\n# define projections for ipyleaflet tiles\nprojections = Bunch(\n # Alaska Polar Stereographic (WGS84)\n EPSG5936=dict(\n name='EPSG5936',\n custom=True,\n proj4def=\"\"\"+proj=stere +lat_0=90 +lat_ts=90 +lon_0=-150 +k=0.994\n +x_0=2000000 +y_0=2000000 +datum=WGS84 +units=m +no_defs\"\"\",\n origin=[-2.8567784109255e+07, 3.2567784109255e+07],\n resolutions=[\n 238810.813354,\n 119405.406677,\n 59702.7033384999,\n 29851.3516692501,\n 14925.675834625,\n 7462.83791731252,\n 3731.41895865639,\n 1865.70947932806,\n 932.854739664032,\n 466.427369832148,\n 233.213684916074,\n 116.60684245803701,\n 58.30342122888621,\n 29.151710614575396,\n 14.5758553072877,\n 7.28792765351156,\n 3.64396382688807,\n 1.82198191331174,\n 0.910990956788164,\n 0.45549547826179,\n 0.227747739130895,\n 0.113873869697739,\n 0.05693693484887,\n 0.028468467424435\n ],\n bounds=[\n [-2623285.8808999992907047,-2623285.8808999992907047],\n [6623285.8803000003099442,6623285.8803000003099442]\n ]\n )\n ,\n # Polar Stereographic South (WGS84)\n EPSG3031=dict(\n name='EPSG3031',\n custom=True,\n proj4def=\"\"\"+proj=stere +lat_0=-90 +lat_ts=-71 +lon_0=0 +k=1\n +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs\"\"\",\n origin=[-3.06361E7, 3.0636099999999993E7],\n resolutions=[\n 67733.46880027094,\n 33866.73440013547,\n 16933.367200067736,\n 8466.683600033868,\n 4233.341800016934,\n 2116.670900008467,\n 1058.3354500042335,\n 529.1677250021168,\n 264.5838625010584,\n ],\n bounds=[\n [-4524583.19363305,-4524449.487765655],\n [4524449.4877656475,4524583.193633042]\n ]\n )\n)\n\n# attributions for the different basemaps\nglims_attribution = \"\"\"\nImagery reproduced from GLIMS and NSIDC (2005, updated 2018):\nGlobal Land Ice Measurements from Space glacier database. (doi:10.7265/N5V98602)\n\"\"\"\nesri_attribution = \"\"\"\nTiles &copy; Esri &mdash; Esri, DeLorme, NAVTEQ, TomTom, Intermap, iPC,\nUSGS, FAO, NPS, NRCAN, GeoBase, Kadaster NL, Ordnance Survey, Esri Japan,\nMETI, Esri China (Hong Kong), and the GIS User Community\n\"\"\"\nnoaa_attribution = \"\"\"\nImagery provided by NOAA National Centers for Environmental Information (NCEI);\nInternational Bathymetric Chart of the Southern Ocean (IBCSO);\nGeneral Bathymetric Chart of the Oceans (GEBCO).\n\"\"\"\n\n# define background ipyleaflet tiles\nbasemaps = {\n \"Esri\": {\n \"ArcticOceanBase\": {\n \"name\": 'Esri.ArcticOceanBase',\n \"crs\": projections.EPSG5936,\n \"attribution\": esri_attribution,\n \"url\": 'http://server.arcgisonline.com/ArcGIS/rest/services/Polar/Arctic_Ocean_Base/MapServer/tile/{z}/{y}/{x}'\n },\n \"ArcticOceanReference\": {\n \"name\": 'Esri.ArcticOceanReference',\n \"crs\": projections.EPSG5936,\n \"attribution\": esri_attribution,\n \"url\": 'http://server.arcgisonline.com/ArcGIS/rest/services/Polar/Arctic_Ocean_Reference/MapServer/tile/{z}/{y}/{x}'\n },\n \"AntarcticBasemap\": {\n \"name\": 'Esri.AntarcticBasemap',\n \"crs\": projections.EPSG3031,\n \"attribution\":noaa_attribution,\n \"url\": 'https://tiles.arcgis.com/tiles/C8EMgrsFcRFL6LrL/arcgis/rest/services/Antarctic_Basemap/MapServer/tile/{z}/{y}/{x}'\n }\n }\n}\n\n# define background ipyleaflet WMS layers\nlayers = Bunch(\n GLIMS = Bunch(\n glaciers = ipyleaflet.WMSLayer(\n attribution=glims_attribution,\n layers='GLIMS_GLACIERS',\n format='image/png',\n url='https://www.glims.org/mapservice'\n )\n )\n)\n\n# load basemap providers from dict\n# https://github.com/geopandas/xyzservices/blob/main/xyzservices/lib.py\ndef _load_dict(data):\n providers = Bunch()\n for provider_name in data.keys():\n provider = data[provider_name]\n if \"url\" in provider.keys():\n providers[provider_name] = xyzservices.lib.TileProvider(provider)\n else:\n providers[provider_name] = Bunch(\n {i: xyzservices.lib.TileProvider(provider[i]) for i in provider.keys()}\n )\n return providers\n\n# draw ipyleaflet map\nclass leaflet:\n def __init__(self, projection, **kwargs):\n # set default keyword arguments\n kwargs.setdefault('zoom',False)\n kwargs.setdefault('scale',True)\n kwargs.setdefault('cursor',True)\n kwargs.setdefault('center',(39,-108))\n kwargs.setdefault('color','green')\n providers = _load_dict(basemaps)\n # create basemap in projection\n if (projection == 'Global'):\n self.map = ipyleaflet.Map(center=kwargs['center'],\n zoom=9, max_zoom=15,\n basemap=ipyleaflet.basemaps.Esri.WorldTopoMap)\n self.map.add_layer(layers.GLIMS.glaciers)\n elif (projection == 'North'):\n self.map = ipyleaflet.Map(center=(90,0),\n zoom=5, max_zoom=24,\n basemap=providers.Esri.ArcticOceanBase,\n crs=projections.EPSG5936)\n self.map.add_layer(providers.Esri.ArcticOceanReference)\n elif (projection == 'South'):\n self.map = ipyleaflet.Map(center=(-90,0),\n zoom=2, max_zoom=9,\n basemap=providers.Esri.AntarcticBasemap,\n crs=projections.EPSG3031)\n # add control for zoom\n if kwargs['zoom']:\n zoom_slider = ipywidgets.IntSlider(description='Zoom level:',\n min=self.map.min_zoom, max=self.map.max_zoom, value=self.map.zoom)\n ipywidgets.jslink((zoom_slider, 'value'), (self.map, 'zoom'))\n zoom_control = ipyleaflet.WidgetControl(widget=zoom_slider,\n position='topright')\n self.map.add_control(zoom_control)\n # add scale bar\n if kwargs['scale']:\n scale_control = ipyleaflet.ScaleControl(position='topright')\n self.map.add_control(scale_control)\n # add label for cursor position\n if kwargs['cursor']:\n self.cursor = ipywidgets.Label()\n label_control = ipyleaflet.WidgetControl(widget=self.cursor,\n position='bottomright')\n self.map.add_control(label_control)\n # keep track of cursor position\n self.map.on_interaction(self.handle_interaction)\n # add control for drawing polygons or bounding boxes\n draw_control = ipyleaflet.DrawControl(polyline={},circlemarker={},\n edit=False)\n shapeOptions = {'color':kwargs['color'],'fill_color':kwargs['color']}\n draw_control.rectangle = dict(shapeOptions=shapeOptions,\n metric=['km','m'])\n draw_control.polygon = dict(shapeOptions=shapeOptions,\n allowIntersection=False,showArea=True,metric=['km','m'])\n # create regions\n self.regions = []\n draw_control.on_draw(self.handle_draw)\n self.map.add_control(draw_control)\n\n # handle cursor movements for label\n def handle_interaction(self, **kwargs):\n if (kwargs.get('type') == 'mousemove'):\n lat,lon = kwargs.get('coordinates')\n lon = sliderule.io.wrap_longitudes(lon)\n self.cursor.value = u\"\"\"Latitude: {d[0]:8.4f}\\u00B0,\n Longitude: {d[1]:8.4f}\\u00B0\"\"\".format(d=[lat,lon])\n\n # keep track of rectangles and polygons drawn on map\n def handle_draw(self, obj, action, geo_json):\n lon,lat = np.transpose(geo_json['geometry']['coordinates'])\n lon = sliderule.io.wrap_longitudes(lon)\n cx,cy = sliderule.io.centroid(lon,lat)\n wind = sliderule.io.winding(lon,lat)\n # set winding to counter-clockwise\n if (wind > 0):\n lon = lon[::-1]\n lat = lat[::-1]\n # create sliderule region from list\n region = sliderule.io.to_region(lon,lat)\n # append coordinates to list\n if (action == 'created'):\n self.regions.append(region)\n elif (action == 'deleted'):\n self.regions.remove(region)\n return self\n\n" ]
[ [ "numpy.transpose" ] ]
brosscle/CT-TIQUA
[ "d56104cd60ffa962afae9506b6bc9d4afc0d0de9" ]
[ "CT_TIQUA/blast_ct/blast_ct/models/deepmedic.py" ]
[ "import torch.nn as nn\nimport torch\nfrom ..models.base import BiomedicalBlock, DownSample, UpSample, PreActBlock, crop_center\n\nSCALE_FACTORS = ((5, 5, 5), (3, 3, 3), (1, 1, 1))\nFEATURE_MAPS = (30, 30, 40, 40, 40, 40, 50, 50)\nFULLY_CONNECTED = (250, 250)\nDROPOUT = (.0, .5, .5)\n\n\nclass Path(BiomedicalBlock):\n def __init__(self, scale_factor, input_channels, feature_maps):\n super().__init__()\n self.layers = list()\n self.scale_factor = tuple(scale_factor)\n\n self.layers.append(DownSample(self.scale_factor))\n for i, feature_map in enumerate(feature_maps):\n in_channels = feature_maps[i - 1] if i > 0 else input_channels\n self.layers.append(PreActBlock(in_channels, feature_map))\n self.layers.append(UpSample(self.scale_factor))\n\n self.path = nn.Sequential(*self.layers)\n\n def forward(self, x, output_size):\n input_size = self.calculate_input_size(output_size)\n out = crop_center(x, input_size)\n out = self.path(out)\n out = crop_center(out, output_size)\n return out\n\n\nclass DeepMedic(BiomedicalBlock):\n def __init__(self,\n input_channels,\n num_classes,\n scale_factors=SCALE_FACTORS,\n feature_maps=FEATURE_MAPS,\n fully_connected=FULLY_CONNECTED,\n dropout=DROPOUT):\n\n super().__init__()\n # assert all scale factors are equal or less than the next one\n assert all([all(l[i] >= l[i + 1] for i in range(len(l) - 1)) for l in [i for i in list(zip(*scale_factors))]])\n self.scale_factors = tuple(scale_factors)\n self.feature_maps = tuple(feature_maps)\n self.output_size = None\n\n self.paths = []\n for i, scale_factor in enumerate(scale_factors):\n path = Path(scale_factor, input_channels, feature_maps)\n self.paths.append(path)\n self.add_module('path' + str(scale_factor) + str(i), path)\n\n assert len(fully_connected) + 1 == len(dropout)\n fms = []\n channels = (feature_maps[-1] * len(self.paths),) + tuple(fully_connected) + (num_classes, )\n for i in range(len(channels[:-1])):\n fms.append(PreActBlock(channels[i], channels[i + 1], kernel_size=(1, 1, 1), dropout_prob=dropout[i]))\n\n self.fully_connected = nn.Sequential(*fms)\n\n # to calculate sizes\n self.layers = self.paths[0].layers\n\n def forward(self, image, **kwargs):\n input_size = tuple(image.shape[2:])\n output_size = self.get_output_size(input_size)\n\n activations = []\n for i, path in enumerate(self.paths):\n out = path(image, output_size)\n activations.append(out)\n\n out = torch.cat(tuple(activations), dim=1)\n out = self.fully_connected(out)\n return out, {}\n" ]
[ [ "torch.nn.Sequential" ] ]
SemyonSinchenko/QOpt
[ "0b273a887f765a16cac706510681c1a3f5901e72" ]
[ "notebooks/ResultViewer.py" ]
[ "#%%\nimport os\nimport pylab\nimport numpy as np\n\n#%%\n\nNUM_EDGES = 2474\nEXACT_SOLUTION = 1430\nsamples = np.loadtxt(os.path.join(\"results\", \"FFNN\", \"100vertexGraph\",\"stateAdvanced_1000steps.txt\"))\n\nloaded_matrix = np.loadtxt(\"data/g05_100.0\", skiprows=0, dtype=np.int32)\nedgelist = [[loaded_matrix[i, 0] - 1, loaded_matrix[i, 1] - 1]\n for i in range(loaded_matrix.shape[0])]\n\n#%%\n\ndef score(state, edges):\n r = -NUM_EDGES\n for e in edges:\n r += state[e[0]] * state[e[1]]\n\n return -r / 2\n\nresults = []\nfor i in range(samples.shape[0]):\n results.append(score(samples[i, :], edgelist))\n\nresults = np.array(results)\n\n#%%\n\npylab.figure(figsize=(8, 4))\npylab.plot(np.arange(results.shape[0]), results, \".-\", label=\"Results\")\npylab.plot(np.arange(results.shape[0]),\n np.ones(results.shape[0]) * EXACT_SOLUTION, \"--\", label=\"Exact\")\npylab.xlabel(\"Sample number\")\npylab.ylabel(\"CutSize\")\npylab.legend()\npylab.grid()\npylab.savefig(os.path.join(\"results\", \"FFNN\", \"100vertexGraph\", \"SamplesResults.png\"))\n\n#%%" ]
[ [ "numpy.array", "numpy.ones", "numpy.arange", "numpy.loadtxt" ] ]
ericleehy/PeekingDuck
[ "8cf1be842235fa60bac13bc466cac09747a780ea" ]
[ "peekingduck/pipeline/nodes/dabble/keypoints_to_3d_loc.py" ]
[ "# Copyright 2022 AI Singapore\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nEstimates the 3D coordinates of a person given 2D pose coordinates.\n\"\"\"\n\nfrom typing import Any, Dict\n\nimport numpy as np\n\nfrom peekingduck.pipeline.nodes.abstract_node import AbstractNode\n\nNOSE = 0\nLEFT_SHOULDER = 5\nRIGHT_SHOULDER = 6\nLEFT_PELVIS = 11\nRIGHT_PELVIS = 12\nTORSO_KEYPOINTS = [NOSE, LEFT_SHOULDER, RIGHT_SHOULDER, LEFT_PELVIS, RIGHT_PELVIS]\n\n\nclass Node(AbstractNode):\n \"\"\"Uses pose keypoint information of the torso to estimate 3D location.\n\n Inputs:\n |keypoints_data|\n\n Outputs:\n |obj_3D_locs_data|\n\n Configs:\n focal_length (:obj:`float`): **default = 1.14**. |br|\n Approximate focal length of webcam used, in metres. Example on\n measuring focal length can be found `here <https://learnopencv.com\n /approximate-focal-length-for-webcams-and-cell-phone-cameras/>`_.\n torso_factor (:obj:`float`): **default = 0.9**. |br|\n A factor used to estimate real-world distance from pixels, based on\n average human torso length in metres. The value varies across\n different camera set-ups, and calibration may be required.\n \"\"\"\n\n def __init__(self, config: Dict[str, Any] = None, **kwargs: Any) -> None:\n super().__init__(config, node_path=__name__, **kwargs)\n\n def run(self, inputs: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Converts pose keypoints into 3D locations.\"\"\"\n locations = []\n\n for keypoints in inputs[\"keypoints\"]:\n torso_keypoints = self._get_torso_keypoints(keypoints)\n if self._enough_torso_keypoints(torso_keypoints):\n bbox = self._get_bbox(torso_keypoints)\n else:\n bbox = self._get_bbox(keypoints)\n\n point = self._get_3d_point_from_bbox(\n bbox, self.focal_length, self.torso_factor\n )\n locations.append(point)\n\n outputs = {\"obj_3D_locs\": locations}\n\n return outputs\n\n @staticmethod\n def _get_torso_keypoints(keypoints: np.ndarray) -> np.ndarray:\n \"\"\"Filter keypoints to get only selected keypoints for torso\"\"\"\n torso_keypoints = keypoints[TORSO_KEYPOINTS, :] # type: ignore\n # ignore keypoints that are '-1.' as below confidence score and are masked\n torso_keypoints = np.reshape(torso_keypoints[torso_keypoints != -1.0], (-1, 2))\n\n return torso_keypoints\n\n @staticmethod\n def _enough_torso_keypoints(torso_keypoints: np.ndarray) -> bool:\n \"\"\"Returns False if not enough keypoints to represent torso\"\"\"\n if torso_keypoints.shape[0] >= 2:\n return True\n return False\n\n @staticmethod\n def _get_bbox(keypoints: np.ndarray) -> np.ndarray:\n \"\"\"Get coordinates of a bbox around keypoints\"\"\"\n top_left_x, top_left_y = keypoints.min(axis=0)\n btm_right_x, btm_right_y = keypoints.max(axis=0)\n\n return np.array([top_left_x, top_left_y, btm_right_x, btm_right_y])\n\n @staticmethod\n def _get_3d_point_from_bbox(\n bbox: np.ndarray, focal_length: float, torso_factor: float\n ) -> np.ndarray:\n \"\"\"Get the 3d coordinates of the centre of a bounding box\"\"\"\n # Subtraction is to make the camera the origin of the coordinate system\n center_2d = ((bbox[0:2] + bbox[2:4]) * 0.5) - np.array([0.5, 0.5])\n torso_height = bbox[3] - bbox[1]\n\n z_coord = (focal_length * torso_factor) / torso_height\n x_coord = (center_2d[0] * torso_factor) / torso_height\n y_coord = (center_2d[1] * torso_factor) / torso_height\n\n return np.array([x_coord, y_coord, z_coord])\n" ]
[ [ "numpy.array", "numpy.reshape" ] ]
hanjoo0211/deep-learning-from-scratch
[ "dae38d476cc5156d6f111179b60c30124b47e59c" ]
[ "predictNumber.py" ]
[ "import numpy as np\nfrom PIL import Image\nfrom ch08.deep_convnet import DeepConvNet\nfrom common.functions import softmax\n\n\ndef predictNumber(img):\n img = img.convert(\"L\") # 흑백처리\n img = np.array(img) / 255 # normalize 해줘야함..\n img = img * -1 + 1 # 흑백반전도 해줘야함.. 검은배경에 흰 글자로 나오도록!\n imgArray = img.reshape(1,28,28,1).transpose(0,3,1,2)\n\n network = DeepConvNet()\n network.load_params(\"./ch08/deep_convnet_params.pkl\")\n\n y = network.predict(imgArray)\n y = softmax(y)\n n = np.argmax(y, axis=1)\n \n return y, n" ]
[ [ "numpy.array", "numpy.argmax" ] ]
ErikEkstedt/Project
[ "c56b852440041775caaa242b7e86779666c0f1c3" ]
[ "gesture/environments/social.py" ]
[ "'''\nsocial movement environment (Roboschool for poses)\n'''\nfrom roboschool.scene_abstract import Scene, SingleRobotEmptyScene\nimport os\nimport numpy as np\nimport gym\nfrom OpenGL import GLE # fix for opengl issues on desktop / nvidia\nimport cv2\n\n\nPATH_TO_CUSTOM_XML = os.path.join(os.path.dirname(__file__), \"xml_files\")\n\n\nclass MyGymEnv(gym.Env):\n ''' OpenAI zGym wrapper\n\n functions:\n\n self._reset : resets the environment (robot)\n self._step : steps, returns s, st, o, ot, reward, done, info\n self._seed : sets seed. self.np_random\n self._render : r\n '''\n\n metadata = {\n 'render.modes': ['human', 'machine', 'target', 'all', 'all_rgb_array'],\n 'video.frames_per_second': 60\n }\n def __init__(self, action_dim=2, state_dim=7, obs_dim=(600, 400, 3)):\n self.scene = None\n self.VIDEO_W = obs_dim[0]\n self.VIDEO_H = obs_dim[1]\n\n self.Human_VIDEO_W = 600 # for human render\n self.Human_VIDEO_H = 400\n\n high = np.ones([action_dim])\n self.action_space = gym.spaces.Box(-high, high)\n\n high = np.inf*np.ones([state_dim])\n self.state_space = gym.spaces.Box(-high, high)\n\n self.observation_space = gym.spaces.Box(low=0, high=255, shape=obs_dim)\n\n self.state_target = None\n self.obs_target = None\n if self.scene is None:\n ''' First reset '''\n self.scene = self.initialize_scene()\n # If load_xml_get_robot() is moved outside this condition after\n # env.reset all states become nan\n self.load_xml_get_robot()\n\n def _seed(self, seed=None):\n self.np_random, seed = gym.utils.seeding.np_random(seed)\n return [seed]\n\n def _reset(self):\n self.get_joint_dicts()\n self.robot_specific_reset()\n for r in self.mjcf:\n r.query_position()\n\n # Important Resets\n self.done = False\n self.frame = 0\n self.reward = 0\n self.camera = self.scene.cpp_world.new_camera_free_float(self.VIDEO_W,\n self.VIDEO_H,\n \"video_camera\")\n self.human_camera = self.scene.cpp_world.new_camera_free_float(self.Human_VIDEO_W,\n self.Human_VIDEO_H,\n \"human_video_camera\")\n\n if self.state_target is None:\n print('Random Targets. Use \"env.set_target(state, obs)\"')\n self.state_target = np.random.randint(4)\n self.obs_target = np.random.randint(0, 255, (100,100,3)).astype('uint8')\n\n state_robot = self.calc_state() # pos and speed\n self.potential = self.calc_potential() # potential to target\n obs = self.get_rgb() #observation\n\n return (state_robot, self.state_target, obs, self.obs_target)\n\n def _step(self, a):\n self.apply_action(a) # Singleplayer (originally in a condition)\n self.scene.global_step()\n self.frame += 1\n\n state = self.calc_state() # also calculates self.joints_at_limit\n reward = self.calc_reward(a)\n done = self.stop_condition() # max frame reached?\n self.done = done\n self.reward = reward\n\n obs = self.get_rgb()\n return (state, self.state_target, obs, self.obs_target, reward, bool(done), {})\n\n def _render(self, mode, close):\n def cv2_render(rgb, title='frame'):\n cv2.imshow(title, cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR))\n if cv2.waitKey(1) & 0xFF == ord('q'):\n print('Stop')\n return\n if close:\n return\n if mode=='human':\n self.human_camera_adjust()\n rgb, _, _, _, _ = self.human_camera.render(False, False, False) # render_depth, render_labeling, print_timing)\n rendered_rgb = np.fromstring(rgb, dtype=np.uint8).reshape( (self.Human_VIDEO_H, self.Human_VIDEO_W,3) )\n cv2_render(rendered_rgb, 'human')\n return [True, False, False]\n elif mode==\"machine\":\n self.camera_adjust()\n rgb, _, _, _, _ = self.camera.render(False, False, False) # render_depth, render_labeling, print_timing)\n rendered_rgb = np.fromstring(rgb, dtype=np.uint8).reshape( (self.VIDEO_H,self.VIDEO_W,3) )\n cv2_render(rendered_rgb, 'machine')\n return [False, True, False]\n elif mode==\"target\":\n cv2_render(self.obs_target, 'target')\n return [False, False, True]\n elif mode=='all':\n self._render('human', False)\n self._render('machine', False)\n self._render('target', False)\n return [True, True, True]\n elif mode==\"all_rgb_array\":\n self.camera_adjust()\n rgb, _, _, _, _ = self.camera.render(False, False, False) # render_depth, render_labeling, print_timing)\n machine = np.fromstring(rgb, dtype=np.uint8).reshape( (self.VIDEO_H,self.VIDEO_W,3) )\n\n self.human_camera_adjust()\n rgb, _, _, _, _ = self.human_camera.render(False, False, False) # render_depth, render_labeling, print_timing)\n human = np.fromstring(rgb, dtype=np.uint8).reshape( (self.Human_VIDEO_H, self.Human_VIDEO_W,3) )\n return human, machine, self.obs_target\n else:\n assert(0)\n\n\nclass Base(MyGymEnv):\n def __init__(self, XML_PATH=PATH_TO_CUSTOM_XML,\n robot_name='robot',\n model_xml='NOT/A/FILE.xml',\n ac=2, st=6,\n args=None):\n self.XML_PATH = XML_PATH\n self.model_xml = model_xml\n self.robot_name = robot_name\n if args is None:\n ''' Defaults '''\n self.MAX_TIME = 300\n self.potential_constant = 100\n self.electricity_cost = -2.0 # cost for using motors -- this parameter should be carefully tuned against reward for making progress, other values less improtant\n self.stall_torque_cost = -0.1 # cost for running electric current through a motor even at zero rotational speed, small\n self.joints_at_limit_cost = -0.2 # discourage stuck joints\n\n self.reward_constant1 = 1\n self.reward_constant2 = 1\n\n # Scene\n self.gravity = 9.81\n self.timestep = 0.0165/4\n self.frame_skip = 1\n\n # Robot\n self.power = 0.5\n MyGymEnv.__init__(self, action_dim=ac, state_dim=st)\n else:\n self.MAX_TIME=args.MAX_TIME\n\n # Reward penalties/values\n self.potential_constant = args.potential_constant\n self.electricity_cost = args.electricity_cost\n self.stall_torque_cost = args.stall_torque_cost\n self.joints_at_limit_cost = args.joints_at_limit_cost\n self.MAX_TIME = args.MAX_TIME\n self.reward_constant1 = args.r1\n self.reward_constant2 = args.r2\n\n # Scene\n self.gravity = args.gravity\n self.timestep = 0.0165/4\n self.frame_skip = 1\n\n # Robot\n self.power = args.power # 0.5\n MyGymEnv.__init__(self,\n action_dim=ac,\n state_dim=st,\n obs_dim=(args.video_w, args.video_h, args.video_c))\n\n def initialize_scene(self):\n return Scene(self.gravity, self.timestep, self.frame_skip)\n\n def apply_action(self, a):\n assert( np.isfinite(a).all() )\n for i, m, power in zip(range(len(self.motors)), self.motors, self.motor_power):\n m.set_motor_torque( 0.05*float(power*self.power*np.clip(a[i], -1, +1)) )\n\n def stop_condition(self):\n max_time = False\n if self.frame>=self.MAX_TIME:\n max_time = True\n return max_time\n\n def load_xml_get_robot(self, verbose=False):\n self.mjcf = self.scene.cpp_world.load_mjcf(\n os.path.join(os.path.dirname(__file__),\n \"xml_files/\",\n self.model_xml))\n self.ordered_joints = []\n self.jdict = {}\n self.parts = {}\n self.frame = 0\n self.done = 0\n self.reward = 0\n for r in self.mjcf:\n if verbose:\n print('Load XML Model')\n print('Path:', os.path.join(self.XML_PATH, self.model_xml))\n print(\"ROBOT '%s'\" % r.root_part.name)\n # store important parts\n if r.root_part.name==self.robot_name:\n self.cpp_robot = r\n self.robot_body = r.root_part\n\n for part in r.parts:\n if verbose: print(\"\\tPART '%s'\" % part.name)\n self.parts[part.name] = part\n if part.name==self.robot_name:\n self.cpp_robot = r\n self.robot_body = part\n\n for j in r.joints:\n if verbose:\n print(\"\\tALL JOINTS '%s' limits = %+0.2f..%+0.2f \\\n effort=%0.3f speed=%0.3f\" % ((j.name,) + j.limits()))\n j.power_coef = 100.0\n self.ordered_joints.append(j)\n self.jdict[j.name] = j\n\n def get_joint_dicts(self, verbose=False):\n ''' This function separates all parts/joints by containing `robot` or `target`.'''\n self.robot_joints, self.robot_parts = self.get_joints_parts_by_name('robot')\n self.target_joints, self.target_parts = self.get_joints_parts_by_name('target') # used only in SocialReacher_targets\n if verbose:\n print('{}\\n'.format(self.robot_joints))\n print('{}\\n'.format(self.robot_parts))\n print('{}\\n'.format(self.target_joints))\n assert(self.cpp_robot)\n\n def get_joints_parts_by_name(self, name):\n joints, parts = {}, {}\n for jname, joint in self.jdict.items():\n if name in jname:\n joints[jname] = joint\n for jname, part in self.parts.items():\n if name in jname:\n parts[jname] = part\n return joints, parts\n\n\nclass SocialReacher(Base):\n def __init__(self, args=None):\n Base.__init__(self, XML_PATH=PATH_TO_CUSTOM_XML,\n robot_name='robot_arm',\n model_xml='SocialPlane.xml',\n ac=2, st=6, args=args)\n print('I am', self.model_xml)\n\n def set_target(self, targets):\n ''' targets should be a\n list [numpy.ndarray, numpy.ndarray]\n\n state.shape (N,)\n obs.shape (W,H,C)\n '''\n self.state_target = targets[0]\n self.obs_target = targets[1]\n assert type(targets[0]) is np.ndarray, 'state target must be numpy'\n assert type(targets[1]) is np.ndarray, 'obs target must be numpy'\n\n def robot_specific_reset(self):\n self.motor_names = [\"robot_shoulder_joint_z\", \"robot_elbow_joint\"]\n self.motor_power = [100, 100]\n self.motors = [self.jdict[n] for n in self.motor_names]\n\n self.robot_reset()\n self.calc_robot_keypoints()\n\n def robot_reset(self):\n ''' self.np_random for correct seed. '''\n for j in self.robot_joints.values():\n j.reset_current_position(self.np_random.uniform(low=-0.01, high=0.01 ), 0)\n j.set_motor_torque(0)\n\n def calc_robot_keypoints(self):\n ''' gets hand position, target position and the vector in bewteen'''\n elbow_position = np.array(self.parts['robot_elbow'].pose().xyz())[:2]\n hand_position = np.array(self.parts['robot_hand'].pose().xyz())[:2]\n self.robot_key_points = np.concatenate((elbow_position, hand_position))\n\n def calc_reward(self, a):\n ''' Difference potential as reward '''\n potential_old = self.potential\n self.potential = self.calc_potential()\n r = self.reward_constant1 * float(self.potential - potential_old)\n return r\n\n def calc_potential(self):\n self.diff_key_points = self.state_target - self.robot_key_points\n p = -self.potential_constant*np.linalg.norm(self.diff_key_points)\n return np.array(p)\n\n def calc_state(self):\n j = np.array([j.current_relative_position()\n for j in self.robot_joints.values()],\n dtype=np.float32).flatten()\n self.joints_at_limit = np.count_nonzero(np.abs(j[0::2]) > 0.99)\n self.joint_speeds = j[1::2]\n self.calc_robot_keypoints() # calcs target_position, important_pos, to_target_vec\n return np.concatenate((self.robot_key_points, self.joint_speeds))\n\n def get_rgb(self):\n self.camera_adjust()\n rgb, _, _, _, _ = self.camera.render(False, False, False) # render_depth, render_labeling, print_timing)\n rendered_rgb = np.fromstring(rgb, dtype=np.uint8).reshape( (self.VIDEO_H,self.VIDEO_W,3) )\n return rendered_rgb\n\n def camera_adjust(self):\n ''' Vision from straight above '''\n self.camera.move_and_look_at( 0, 0, 1, 0, 0, 0.4)\n\n def human_camera_adjust(self):\n ''' Vision from straight above '''\n self.human_camera.move_and_look_at( 0, 0, 1, 0, 0, 0.4)\n\n\nclass SocialHumanoid(Base):\n def __init__(self, args=None):\n Base.__init__(self, XML_PATH=PATH_TO_CUSTOM_XML,\n robot_name='robot',\n model_xml='SocialHumanoid.xml',\n ac=6, st=18, args=args)\n print('I am', self.model_xml)\n\n def set_target(self, targets):\n ''' targets should be a\n list [numpy.ndarray, numpy.ndarray]\n\n state.shape (N,)\n obs.shape (W,H,C)\n '''\n self.state_target = targets[0]\n self.obs_target = targets[1]\n assert type(targets[0]) is np.ndarray, 'state target must be numpy'\n assert type(targets[1]) is np.ndarray, 'obs target must be numpy'\n\n def robot_specific_reset(self):\n self.motor_names = [\"robot_right_shoulder1\",\n \"robot_right_shoulder2\",\n \"robot_right_elbow\",\n \"robot_left_shoulder1\",\n \"robot_left_shoulder2\",\n \"robot_left_elbow\"]\n self.motor_power = [2000] * len(self.motor_names)\n self.motors = [self.jdict[n] for n in self.motor_names]\n\n self.robot_reset()\n self.calc_robot_keypoints()\n\n def robot_reset(self):\n ''' self.np_random for correct seed. '''\n for j in self.robot_joints.values():\n j.reset_current_position(self.np_random.uniform(low=-1.1, high=1.1 ), 0)\n j.set_motor_torque(0)\n\n def calc_robot_keypoints(self):\n ''' gets hand position, target position and the vector in bewteen'''\n left_elbow_position = np.array(self.parts['robot_left_elbow'].pose().xyz())\n left_hand_position = np.array(self.parts['robot_left_hand'].pose().xyz())\n right_elbow_position = np.array(self.parts['robot_right_elbow'].pose().xyz())\n right_hand_position = np.array(self.parts['robot_right_hand'].pose().xyz())\n self.robot_key_points = np.concatenate((left_elbow_position,\n left_hand_position,\n right_elbow_position,\n right_hand_position ))\n\n def calc_reward(self, a):\n ''' Difference potential as reward '''\n potential_old = self.potential\n self.potential = self.calc_potential()\n r = self.reward_constant1 * float(self.potential - potential_old)\n joints_at_limit_cost = float(self.joints_at_limit_cost * self.joints_at_limit)\n return r + joints_at_limit_cost\n\n def calc_potential(self):\n self.diff_key_points = self.state_target - self.robot_key_points\n p = -self.potential_constant*np.linalg.norm(self.diff_key_points)\n return np.array(p)\n\n def calc_state(self):\n j = np.array([j.current_relative_position()\n for j in self.robot_joints.values()],\n dtype=np.float32).flatten()\n self.joints_at_limit = np.count_nonzero(np.abs(j[0::2]) > 0.99)\n self.joint_speeds = j[1::2]\n self.calc_robot_keypoints() # important_pos\n return np.concatenate((self.robot_key_points, self.joint_speeds))\n\n def get_rgb(self):\n self.camera_adjust()\n rgb, _, _, _, _ = self.camera.render(False, False, False) # render_depth, render_labeling, print_timing)\n rendered_rgb = np.fromstring(rgb, dtype=np.uint8).reshape( (self.VIDEO_H,self.VIDEO_W,3) )\n return rendered_rgb\n\n def camera_adjust(self):\n ''' camera used as observation for agent default: (40,40,3)'''\n self.camera.move_and_look_at(1, 0, 0, 0, 0, 0)\n\n def human_camera_adjust(self):\n ''' Camera used for regular rendering. Default: (400, 600, 3)'''\n self.human_camera.move_and_look_at(1, 0, 0, 0, 0, 0)\n\n\n#####-------------------------\n# Nothing done on this... needed for rendering reward functions again.\nclass SocialReacherTargets(Base):\n def __init__(self, args=None):\n Base.__init__(self, XML_PATH=PATH_TO_CUSTOM_XML,\n robot_name='robot_arm',\n model_xml='SocialPlane.xml',\n ac=2, st=6, args=args)\n print('I am', self.model_xml)\n\n def set_target(self, targets):\n ''' targets should be a\n list [numpy.ndarray, numpy.ndarray]\n\n state.shape (N,)\n obs.shape (W,H,C)\n '''\n self.state_target = targets[0]\n self.obs_target = targets[1]\n assert type(targets[0]) is np.ndarray, 'state target must be numpy'\n assert type(targets[1]) is np.ndarray, 'obs target must be numpy'\n\n for j in self.target_joints.values():\n j.reset_current_position(self.np_random.uniform(low=-0.01, high=0.01 ), 0)\n j.set_motor_torque(0)\n\n def robot_specific_reset(self):\n self.motor_names = [\"robot_shoulder_joint_z\", \"robot_elbow_joint\"]\n self.motor_power = [100, 100]\n self.motors = [self.jdict[n] for n in self.motor_names]\n\n self.robot_reset()\n self.calc_robot_keypoints()\n\n def robot_reset(self):\n ''' self.np_random for correct seed. '''\n for j in self.robot_joints.values():\n j.reset_current_position(self.np_random.uniform(low=-0.01, high=0.01 ), 0)\n j.set_motor_torque(0)\n\n def calc_robot_keypoints(self):\n ''' gets hand position, target position and the vector in bewteen'''\n elbow_position = np.array(self.parts['robot_elbow'].pose().xyz())[:2]\n hand_position = np.array(self.parts['robot_hand'].pose().xyz())[:2]\n self.robot_key_points = np.concatenate((elbow_position, hand_position))\n\n def calc_reward(self, a):\n ''' Difference potential as reward '''\n potential_old = self.potential\n self.potential = self.calc_potential()\n r = self.reward_constant1 * float(self.potential - potential_old)\n return r\n\n def calc_potential(self):\n self.diff_key_points = self.state_target - self.robot_key_points\n p = -self.potential_constant*np.linalg.norm(self.diff_key_points)\n return np.array(p)\n\n def calc_state(self):\n j = np.array([j.current_relative_position()\n for j in self.robot_joints.values()],\n dtype=np.float32).flatten()\n self.joints_at_limit = np.count_nonzero(np.abs(j[0::2]) > 0.99)\n self.joint_speeds = j[1::2]\n self.calc_robot_keypoints() # calcs target_position, important_pos, to_target_vec\n return np.concatenate((self.robot_key_points, self.joint_speeds))\n\n def get_rgb(self):\n self.camera_adjust()\n rgb, _, _, _, _ = self.camera.render(False, False, False) # render_depth, render_labeling, print_timing)\n rendered_rgb = np.fromstring(rgb, dtype=np.uint8).reshape( (self.VIDEO_H,self.VIDEO_W,3) )\n return rendered_rgb\n\n def camera_adjust(self):\n ''' Vision from straight above '''\n self.camera.move_and_look_at( 0, 0, 1, 0, 0, 0.4)\n\n def human_camera_adjust(self):\n ''' Vision from straight above '''\n self.human_camera.move_and_look_at( 0, 0, 1, 0, 0, 0.4)\n\n#####-------------------------\ndef Social_multiple(Env, args):\n from gesture.environments.SubProcEnv import SubprocVecEnv_Social as SubprocVecEnv\n def multiple_envs(Env, args, rank):\n def _thunk():\n env = Env(args)\n env.seed(args.seed+rank*100)\n return env\n return _thunk\n return SubprocVecEnv([multiple_envs(Env, args, i) for i in range(args.num_proc)])\n\n\n# test functions\ndef test_social(Env, args):\n from gesture.environments.utils import random_run\n from gesture.environments.utils import random_run_with_changing_targets\n from gesture.agent.memory import Targets\n from gesture.utils.utils import load_dict\n\n # === Targets ===\n print('\\nLoading targets from:')\n print('path:\\t', args.test_target_path)\n datadict = load_dict(args.test_target_path)\n targets = Targets(args.num_proc, datadict)\n targets.remove_speed(args.njoints)\n st, ot = targets()\n\n env = Env(args)\n env.seed(args.seed)\n\n print(env)\n print('action space:', env.action_space)\n print('state space:', env.state_space)\n print('target state space:', st.shape)\n print('obs space:', env.observation_space)\n # random_run(env, render=args.render, verbose=args.verbose)\n random_run_with_changing_targets(env, targets, args)\n\n\ndef test_social_parallel(Env, args):\n from gesture.environments.utils import random_run_with_changing_targets_parallel\n from gesture.environments.utils import random_run_parallel\n from gesture.utils.utils import load_dict\n from gesture.agent.memory import Targets\n from torch import load\n\n print('path:\\t', args.test_target_path)\n datadict = load_dict(args.test_target_path)\n targets = Targets(args.num_proc, datadict)\n targets.remove_speed(args.njoints)\n st, ot = targets()[0]\n\n env = Social_multiple(Env, args)\n print(env)\n print('action space:', env.action_space)\n print('state space:', env.state_space)\n print('target state space:', st.shape)\n print('obs space:', env.observation_space)\n # random_run_parallel(env, args)\n random_run_with_changing_targets_parallel(env, targets, args)\n\n\nif __name__ == '__main__':\n from gesture.utils.arguments import get_args\n from gesture.environments.utils import env_from_args\n args = get_args()\n Env = env_from_args(args)\n\n if args.num_proc > 1:\n test_social_parallel(Env, args)\n else:\n test_social(Env, args)\n" ]
[ [ "numpy.ones", "numpy.fromstring", "numpy.abs", "numpy.clip", "numpy.array", "numpy.concatenate", "numpy.random.randint", "numpy.linalg.norm", "numpy.isfinite" ] ]
Jamal-dev/asymproj_edge_dnn_tensorFlow2
[ "efb3a3721bc7e39dfa126d3c7af529b091bba9cb" ]
[ "create_dataset_arrays.py" ]
[ "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Creates dataset files to be used by `deep_edge_trainer.py`.\n\nThe input should be edge-list text file, with lines like \"node1 node2\", where\nthe nodes can be strings or ints. The line depicts a relationship node1-node2\n(undirected) or node1->node2 (directed). The node IDs (e.g. \"node1\") will be\nmapped to integers in [0, |V| - 1], where |V| is the number of graph nodes. The\nmapping will be saved in `index.pkl`.\n\nBy default, the input graph (edge-list) will be partitioned into train and test,\nboth of equal number of edges, where the train partition is connected (following\nnode2vec). \n\nThe output directory will be populated with files:\n train.txt.npy: int32.numpy array (|E|/2, 2) containing training edges.\n test.txt.npy: int32.numpy array (|E|/2, 2) containing test edges.\n train.neg.txt.npy: int32.numpy array (|E|/2, 2) containing negative trai\n edges, sampled from compliment of (train.txt.npy).\n test.neg.txt.npy: int32.numpy array (|E|/2, 2) containing negative test edges,\n sampled from compliment of (test.txt.npy union train.txt.npy)\n train.pairs.<i>.txt.npy: One or more training pair numpy arrays [size (?, 2)].\n\nSee doc of `CreateDatasetFiles()` for complete list of files and description.\n\nTo run, you should download node2vec.py from\nhttps://github.com/aditya-grover/node2vec/blob/master/src/node2vec.py\nand place in the same directory as this file. If you do not download it, it will\nautomatically be downloaded on your behalf.\n\"\"\"\n\nimport pickle as cPickle\nimport copy\nimport random\nimport networkx as nx\nimport numpy\nimport os\nimport sys\n\nimport tensorflow as tf\n\n#from tensorflow import flags\nfrom tensorflow.python.platform import flags\n\nfrom third_party.node2vec import node2vec\nimport argparse\nimport csv\n\ndef check_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--output_dir\", type=str, help=\"Directory where training files will be written.\",\n nargs='?', default=\"output/\", const=\"output/\")\n parser.add_argument(\"--directed\", type=bool, help=\"Must be set if graph is directed.\",\n nargs='?', default=False, const=False)\n parser.add_argument(\"--partition\",type=bool, help=\"If set (default), separates a test split, containing half of the edges. In which case, train graph will be connected.\",\n nargs='?', default=True, const=True)\n parser.add_argument(\"--num_walks\", type=int, help=\"Number of walks per node.\",\n nargs='?', default=5, const=5)\n parser.add_argument(\"--walk_length\", type=int, help=\"Length of each walk. Total number of pairs will be\\n O(walk_length * num_walks * num nodes * context^2)\",\n nargs='?', default=40, const=40)\n parser.add_argument(\"--context\", type=int, help=\"Size of context from each side (right and left). If \\n --directed, then context is only taken from right side\",\n nargs='?', default=3, const=3)\n parser.add_argument(\"--only_simulate_walks\", type=bool, help=\"If set, train.txt.npy will be read from --output_dir, \\n random walks will be simulated, out their output will be\\n written to --output_dir\",\n nargs='?', default=False, const=False)\n parser.add_argument(\"--input\", type=str, help=\"Path to edge-list textfile. Required unless --only_simulate_walks is set.\",\n nargs='?', default=\"datasets/CA-HepTh.txt\", const=\"datasets/CA-HepTh.txt\")\n parser.add_argument(\"--bs\", type=int, help=\"Enter the size of the batch\",\n nargs='?', default=256, const=256)\n\n args = parser.parse_args()\n input_ = args.input\n only_simulate_walks = args.only_simulate_walks\n output_dir = args.output_dir\n directed = args.directed\n partition = args.partition\n num_walks = args.num_walks\n walk_length = args.walk_length\n context = args.context\n \n \n return input_, only_simulate_walks, output_dir, directed, partition, num_walks, walk_length, context\n\n#flags.DEFINE_string('input', '',\n# 'Path to edge-list textfile. Required unless '\n# '--only_simulate_walks is set.')\n#flags.DEFINE_boolean('only_simulate_walks', False,\n# 'If set, train.txt.npy will be read from --output_dir, '\n# 'random walks will be simulated, out their output will be '\n# 'written to --output_dir')\n#flags.DEFINE_string('output_dir', '',\n# 'Directory where training files will be written.')\n#flags.DEFINE_boolean('directed', False, 'Must be set if graph is directed.')\n#flags.DEFINE_boolean('partition', True,\n# 'If set (default), separates a test split, containing '\n# 'half of the edges. In which case, train graph will be '\n# 'connected.')\n#\n#flags.DEFINE_integer('num_walks', 5, 'Number of walks per node.')\n#flags.DEFINE_integer('walk_length', 40,\n# 'Length of each walk. Total number of pairs will be '\n# 'O(walk_length * num_walks * num nodes * context^2)')\n#flags.DEFINE_integer('context', 3,\n# 'Size of context from each side (right and left). If '\n# '--directed, then context is only taken from right side')\n#\n#FLAGS = flags.FLAGS\n\n# node2vec parameters\nN2V_P = 1.0\nN2V_Q = 1.0\n\n\ndef LargestSubgraph(graph):\n \"\"\"Returns the Largest connected-component of `graph`.\"\"\"\n if graph.__class__ == nx.Graph:\n return LargestUndirectedSubgraph(graph)\n elif graph.__class__ == nx.DiGraph:\n largest_undirected_cc = LargestUndirectedSubgraph(nx.Graph(graph))\n directed_subgraph = nx.DiGraph()\n for (n1, n2) in graph.edges():\n if n2 in largest_undirected_cc and n1 in largest_undirected_cc[n2]:\n directed_subgraph.add_edge(n1, n2)\n\n return directed_subgraph\n\ndef LargestUndirectedSubgraph(graph):\n \"\"\"Returns the largest connected-component of undirected `graph`.\"\"\"\n if nx.is_connected(graph):\n return graph\n cc = (graph.subgraph(c) for c in nx.connected_components(graph))\n cc = list(cc)\n # cc = list(nx.connected_component_subgraphs(graph))\n sizes = map(len, cc)\n sizes_and_cc = zip(sizes, cc)\n sizes_and_cc = sorted(sizes_and_cc, key=lambda x: x[0])\n # changed here\n # sizes_and_cc.sort()\n sizes_and_cc = list(sizes_and_cc)\n return sizes_and_cc[-1][1]\n\ndef SampleTestEdgesAndPruneGraph(graph, remove_percent=0.5, check_every=5):\n \"\"\"Removes and returns `remove_percent` of edges from graph.\n \n Removal is random but makes sure graph stays connected.\"\"\"\n graph = copy.deepcopy(graph)\n undirected_graph = graph.to_undirected()\n edges = copy.deepcopy(graph.edges())\n edges = list(edges)\n # changed to list here\n random.shuffle(edges)\n remove_edges = int(len(edges) * remove_percent)\n num_edges_removed = 0\n currently_removing_edges = []\n removed_edges = []\n last_printed_prune_percentage = -1\n for j in range(len(edges)):\n n1, n2 = edges[j]\n graph.remove_edge(n1, n2)\n if n1 not in graph[n2]:\n undirected_graph.remove_edge(*(edges[j]))\n currently_removing_edges.append(edges[j])\n if j % check_every == 0:\n if nx.is_connected(undirected_graph):\n num_edges_removed += check_every\n removed_edges += currently_removing_edges\n currently_removing_edges = []\n else:\n for i in range(check_every):\n graph.add_edge(*(edges[j - i]))\n undirected_graph.add_edge(*(edges[j - i]))\n currently_removing_edges = []\n if not nx.is_connected(undirected_graph):\n print( ' DID NOT RECOVER :(')\n return None\n prunned_percentage = int(100 * len(removed_edges) / remove_edges)\n rounded = (prunned_percentage / 10) * 10\n if rounded != last_printed_prune_percentage:\n last_printed_prune_percentage = rounded\n print (f'Partitioning into train/test. Progress={rounded}')\n \n if len(removed_edges) >= remove_edges:\n break\n\n return graph, removed_edges\n\n\ndef SampleNegativeEdges(graph, num_edges):\n \"\"\"Samples `num_edges` edges from compliment of `graph`.\"\"\"\n random_negatives = set()\n nodes = list(graph.nodes())\n while len(random_negatives) < num_edges:\n i1 = random.randint(0, len(nodes) - 1)\n i2 = random.randint(0, len(nodes) - 1)\n if i1 == i2:\n continue\n if i1 > i2:\n i1, i2 = i2, i1\n n1 = nodes[i1]\n n2 = nodes[i2]\n if graph.has_edge(n1, n2):\n continue\n random_negatives.add((n1, n2))\n\n return random_negatives\n\n\ndef RandomNegativesPerNode(graph, negatives_per_node=400):\n \"\"\"For every node u in graph, samples 20 (u, v) where v is not in graph[u].\"\"\"\n negatives = []\n node_list = list(graph.nodes())\n num_nodes = len(node_list)\n print_every = num_nodes / 10\n for i, n in enumerate(node_list):\n found_negatives = 0\n if i % print_every == 0:\n print (f'Finished sampling negatives for {i} / {num_nodes} nodes')\n while found_negatives < negatives_per_node:\n n2 = node_list[random.randint(0, num_nodes - 1)]\n if n == n2 or n2 in graph[n]:\n continue\n negatives.append((n, n2))\n found_negatives += 1\n return negatives\n\n\ndef NumberNodes(graph):\n \"\"\"Returns a copy of `graph` where nodes are replaced by incremental ints.\"\"\"\n node_list = sorted(graph.nodes())\n index = {n: i for (i, n) in enumerate(node_list)}\n \n newgraph = graph.__class__()\n for (n1, n2) in graph.edges():\n newgraph.add_edge(index[n1], index[n2])\n\n return newgraph, index\n\n\nclass WalkPairsWriter(object):\n \"\"\"Writes one or more int numpy.array of size (S, 2).\n \n Where `S` is the size of the array, up to `self.capacity`. The total number\n of pairs should be the number of times `AddPair` is called.\n \"\"\"\n \n def __init__(self, file_format):\n \"\"\"file_format must contain %i.\"\"\"\n self.file_format = file_format\n self.capacity = 1000000 # 1 million.\n self.pairs = []\n self.next_file_id = 0\n\n def AddPair(self, n1, n2):\n self.pairs.append((n1, n2))\n if len(self.pairs) > self.capacity:\n self.Write()\n \n def Write(self):\n if len(self.pairs) == 0:\n return\n file_name = self.file_format % self.next_file_id\n random.shuffle(self.pairs)\n pairs_arr = numpy.array(self.pairs, dtype='int32')\n numpy.save(file_name, pairs_arr)\n self.pairs = []\n self.next_file_id += 1\n \n \ndef MakeDirectedNegatives(positive_edges):\n positive_set = set([(u, v) for (u, v) in list(positive_edges)])\n directed_negatives = []\n for (u, v) in positive_set:\n if (v, u) not in positive_set:\n directed_negatives.append((v, u))\n return numpy.array(directed_negatives, dtype='int32')\n\n\ndef CreateDatasetFiles(graph, output_dir, num_walks,\n context_right, context_left,\n walk_length, p, q ,partition=True):\n \"\"\"\n Writes a number of dataset files to `output_dir`.\n \n Args:\n graph: nx.Graph or nx.DiGraph to simulate walks on and extract negatives.\n output_dir: files will be written in this directory, including:\n {train, train.neg, test, test.neg}.txt.npy, index.pkl, and\n if flag --directed is set, test.directed.neg.txt.npy.\n The files {train, train.neg}.txt.npy are used for model selection;\n {test, test.neg, test.directed.neg}.txt.npy will be used for calculating\n eval metrics; index.pkl contains information about the graph (# of nodes,\n mapping from original graph IDs to new assigned integer ones in\n [0, largest_cc_size-1].\n partition: If set largest connected component will be used and data will \n separated into train/test splits.\n \n Returns:\n The training graph, after node renumbering.\n \"\"\"\n num_floats = num_walks * walk_length * len(graph)\n num_floats *= (context_left + context_right) ** 2\n print(f\"Writing up to {num_floats} training pairs, with size = {num_floats * 4/1000000.0} megabytes.\")\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n original_size = len(graph)\n if partition:\n graph = LargestSubgraph(graph)\n size_largest_cc = len(graph)\n else:\n size_largest_cc = -1\n graph, index = NumberNodes(graph)\n\n if partition:\n train_graph, test_edges = SampleTestEdgesAndPruneGraph(graph)\n else:\n train_graph, test_edges = graph, []\n\n # Sample negatives, to be equal to number of `test_edges` * 2.\n random_negatives = list(\n SampleNegativeEdges(graph, len(test_edges) + len(train_graph.edges())))\n random.shuffle(random_negatives)\n test_negatives = random_negatives[:len(test_edges)]\n # These are only used for evaluation, never training.\n train_eval_negatives = random_negatives[len(test_edges):]\n\n test_negatives = numpy.array(test_negatives, dtype='int32')\n test_edges = numpy.array(test_edges, dtype='int32')\n train_edges = numpy.array(train_graph.edges(), dtype='int32')\n train_eval_negatives = numpy.array(train_eval_negatives, dtype='int32')\n \n \n\n numpy.save(os.path.join(output_dir, 'train.txt'), train_edges)\n numpy.save(os.path.join(output_dir, 'train.neg.txt'), train_eval_negatives)\n numpy.save(os.path.join(output_dir, 'test.txt'), test_edges)\n numpy.save(os.path.join(output_dir, 'test.neg.txt'), test_negatives)\n # saving as csv file as well,\n # numpy.savetxt(os.path.join(output_dir, 'train.csv'), train_edges, delimiter=\",\")\n # numpy.savetxt(os.path.join(output_dir, 'train_neg.csv'), train_eval_negatives, delimiter=\",\")\n # numpy.savetxt(os.path.join(output_dir, 'test.csv'), test_edges, delimiter=\",\")\n # numpy.savetxt(os.path.join(output_dir, 'test_neg.csv'), test_negatives, delimiter=\",\")\n if directed:\n directed_negatives = MakeDirectedNegatives(\n numpy.concatenate([train_edges, test_edges], axis=0))\n directed_negatives = numpy.concatenate([directed_negatives, test_negatives],\n axis=0)\n numpy.save(\n os.path.join(output_dir, 'test.directed.neg.txt'), directed_negatives)\n # saving as csv\n # numpy.savetxt(\n # os.path.join(output_dir, 'test_directed_neg.csv'), directed_negatives,delimiter=\",\")\n\n cPickle.dump({\n 'index': index,\n 'original_num_nodes': original_size,\n 'largest_cc_num_nodes': size_largest_cc,\n 'num_pos_test_edges': len(test_edges),\n 'num_neg_test_edges': len(test_negatives),\n 'num_pos_train_edges': len(train_edges),\n 'num_neg_train_edges': len(train_eval_negatives),\n }, open(os.path.join(output_dir, 'index.pkl'), 'wb'))\n\n # saving index as csv\n # with open(os.path.join(output_dir, 'index.csv'), 'w') as csv_file: \n # writer = csv.writer(csv_file)\n # for key, value in index.items():\n # writer.writerow([key, value])\n\n return train_graph\n\n\ndef SimulateWalks(train_graph, output_dir, num_walks=10, walk_length=80,\n context_left=3, context_right=3, p=N2V_P, q=N2V_Q):\n \"\"\"Simulates Random Walks on `train_graph`, writing onto `output_dir`.\n \n Args:\n train_graph: nx.Graph or nx.DiGraph to simulate walks on and extract\n negatives.\n output_dir: files will be written in this directory, including:\n train.neg_per_node.txt.npy and train.pairs.<i>.txt.npy, for integer <i> in\n [0, num_walks - 1]. These files will be used for training the linear\n approximation of the Graph Likelihood objective.\n num_walks: Number of walks per node.\n walk_length: Walk length from every node.\n context_left: left offset from central word, inclusive.\n context_right: right offset from central word, inclusive.\n p: Node2vec's p parameter.\n q: Node2vec's q parameter.\n \"\"\"\n train_negatives_per_node = RandomNegativesPerNode(train_graph, negatives_per_node=400)\n train_negatives_per_node = numpy.array(train_negatives_per_node,dtype='int32')\n numpy.save(os.path.join(output_dir, 'train.neg_per_node.txt'),train_negatives_per_node)\n \n for edge in train_graph.edges():\n train_graph[edge[0]][edge[1]]['weight'] = 1\n directed = (train_graph.__class__ == nx.DiGraph)\n node2vec_graph = node2vec.Graph(train_graph, is_directed=directed, p=p, q=q)\n node2vec_graph.preprocess_transition_probs()\n \n pairs_writer = WalkPairsWriter(os.path.join(output_dir, 'train.pairs.%i'))\n for unused_j in range(num_walks):\n walks = node2vec_graph.simulate_walks(1, walk_length)\n \n for c, node_list in enumerate(walks):\n if c % 1000 == 0:\n print(f'Writing Walk Pairs {c} / {len(walks)}' )\n for i in range(len(node_list)):\n start_i = max(0, i - context_left)\n end_i = min(len(node_list), i + context_right + 1)\n for k in range(start_i, end_i):\n # if i == k: continue\n pairs_writer.AddPair(node_list[i], node_list[k])\n\n pairs_writer.Write()\n \n print(f'All Done. Nodes = {len(train_graph)}')\n\ndef main_exp(directed):\n if directed:\n graph = nx.DiGraph()\n else:\n graph = nx.Graph()\n\n if not only_simulate_walks:\n # Read graph\n graph = nx.read_edgelist(input_, create_using=graph)\n\n # Create dataset files.\n graph = CreateDatasetFiles(\n graph, output_dir, num_walks=num_walks,\n context_right=context, context_left=context*directed,\n walk_length=walk_length, p=N2V_P, q=N2V_Q)\n else:\n if os.path.exists(os.path.join(output_dir, 'test.directed.neg.txt.npy')):\n graph = nx.DiGraph()\n directed = True\n\n # Only simulating walks. Read graph from --output_dir\n train_edges = numpy.load(os.path.join(output_dir, 'train.txt.npy'))\n for n1, n2 in list(train_edges):\n graph.add_edge(n1, n2)\n\n left_context = context * (not directed)\n print(f'left_context = {left_context}' )\n SimulateWalks(\n graph, output_dir, num_walks=num_walks,\n context_right=context, context_left=left_context,\n walk_length=walk_length, p=N2V_P, q=N2V_Q)\n\n\nif __name__ == '__main__':\n \n \n global input_, only_simulate_walks, output_dir, directed, partition, num_walks, walk_length, context \n input_, only_simulate_walks, output_dir, directed, partition, num_walks, walk_length, context = check_args()\n main_exp(directed=directed)\n" ]
[ [ "numpy.array", "numpy.save", "numpy.concatenate" ] ]
ng390/selfstudy-adversarial-robustness
[ "e225142564793ba7799d7e76727928f72cfb769e" ]
[ "convert_pytorch.py" ]
[ "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport tensorflow as tf\nimport torch\nimport math\nimport numpy as np\n\nfrom common.networks import AllConvModel, AllConvModelTorch\nfrom common.framework import get_checkpoint_abs_path\n\nimport logging\ntf.get_logger().setLevel(logging.ERROR) \n\n\ndef fix(path):\n path_tf = path[:-6]\n path_torch = path_tf + \".torchmodel\"\n if os.path.exists(path_torch):\n return\n\n print()\n print(\"Converting\", path)\n \n # Get input sizes\n all_vars = tf.train.list_variables(\n get_checkpoint_abs_path(path_tf))\n\n # Is it a list of models? Or just one?\n if 'model/0' in \"\".join([x[0] for x in all_vars]):\n prefix = 'model/0'\n else:\n prefix = 'model'\n \n input_size, filter_size = [shape for name,shape in all_vars if prefix+'/layers/0/kernel' in name][0][2:]\n output_size = [shape for name,shape in all_vars if prefix+'/layers/9/kernel' in name][0][-1]\n\n num_models = sum('/0/kernel' in x for x,_ in all_vars)\n\n # Create the TF convnet\n convnet = [AllConvModel(num_classes=output_size,\n num_filters=filter_size,\n input_shape=(32, 32, input_size))\n for _ in range(num_models)]\n \n convnet_load = convnet[0] if num_models == 1 else convnet\n tf.train.Checkpoint(model=convnet_load).restore(\n get_checkpoint_abs_path(path_tf))\n\n weights = []\n for model in convnet:\n ws = []\n for layer in model.layers:\n if len(layer.weights) > 0:\n ws.append(layer.weights)\n weights.extend(ws[::-1])\n \n models = [AllConvModelTorch(10, 64, (input_size, 32, 32)) for _ in range(num_models)]\n for model in models:\n for layer in model.layers:\n if isinstance(layer, torch.nn.Conv2d):\n w, b = weights.pop()\n layer.weight = torch.nn.Parameter(torch.tensor(w.numpy().transpose((3,2,0,1))))\n layer.bias = torch.nn.Parameter(torch.tensor(b.numpy()))\n\n if len(models) == 1:\n torch.save(models[0].state_dict(), path_torch)\n else:\n torch.save([model.state_dict() for model in models], path_torch)\n\n\ndef run():\n for root,_,fs in os.walk(sys.argv[1] if len(sys.argv) > 1 else 'checkpoints'):\n for f in fs:\n if \".index\" in f:\n fix(os.path.join(root, f))\n\nif __name__ == \"__main__\":\n run()\n" ]
[ [ "tensorflow.train.Checkpoint", "tensorflow.get_logger" ] ]
yukgu/covid-model-seiir-pipeline
[ "3433034d3f089938e7993b6321d570365bdf62db" ]
[ "src/covid_model_seiir_pipeline/pipeline/regression/task/beta_regression.py" ]
[ "from pathlib import Path\n\nimport click\nimport numpy as np\nimport pandas as pd\n\nfrom covid_model_seiir_pipeline.lib import (\n cli_tools,\n math,\n static_vars,\n)\nfrom covid_model_seiir_pipeline.pipeline.regression.data import RegressionDataInterface\nfrom covid_model_seiir_pipeline.pipeline.regression.specification import RegressionSpecification\nfrom covid_model_seiir_pipeline.pipeline.regression import model\n\n\nlogger = cli_tools.task_performance_logger\n\n\ndef run_beta_regression(regression_version: str, draw_id: int, progress_bar: bool) -> None:\n logger.info('Starting beta regression.', context='setup')\n # Build helper abstractions\n regression_spec_file = Path(regression_version) / static_vars.REGRESSION_SPECIFICATION_FILE\n regression_specification = RegressionSpecification.from_path(regression_spec_file)\n data_interface = RegressionDataInterface.from_specification(regression_specification)\n\n logger.info('Loading ODE fit input data', context='read')\n hierarchy = data_interface.load_hierarchy()\n past_infection_data = data_interface.load_past_infection_data(draw_id=draw_id)\n population = data_interface.load_five_year_population()\n rhos = data_interface.load_variant_prevalence()\n vaccinations = data_interface.load_vaccinations()\n\n logger.info('Prepping ODE fit parameters.', context='transform')\n infections = model.clean_infection_data_measure(past_infection_data, 'infections')\n regression_params = regression_specification.regression_parameters.to_dict()\n\n np.random.seed(draw_id)\n sampled_params = model.sample_params(\n infections.index, regression_params,\n params_to_sample=['alpha', 'sigma', 'gamma1', 'gamma2', 'kappa', 'chi', 'pi']\n )\n\n sampled_params['phi'] = pd.Series(\n np.random.normal(loc=sampled_params['chi'] + regression_params['phi_mean_shift'],\n scale=regression_params['phi_sd']),\n index=infections.index, name='phi',\n )\n sampled_params['psi'] = pd.Series(\n np.random.normal(loc=sampled_params['chi'] + regression_params['psi_mean_shift'],\n scale=regression_params['psi_sd']),\n index=infections.index, name='psi',\n )\n\n ode_parameters = model.prepare_ode_fit_parameters(\n infections,\n population,\n rhos,\n vaccinations,\n sampled_params,\n )\n\n logger.info('Running ODE fit', context='compute_ode')\n beta_fit, compartments = model.run_ode_fit(\n ode_parameters=ode_parameters,\n progress_bar=progress_bar,\n )\n\n logger.info('Loading regression input data', context='read')\n covariates = data_interface.load_covariates(list(regression_specification.covariates))\n gaussian_priors = data_interface.load_priors(regression_specification.covariates.values())\n prior_coefficients = data_interface.load_prior_run_coefficients(draw_id=draw_id)\n if gaussian_priors and prior_coefficients:\n raise NotImplementedError\n\n logger.info('Fitting beta regression', context='compute_regression')\n coefficients = model.run_beta_regression(\n beta_fit['beta'],\n covariates,\n regression_specification.covariates.values(),\n gaussian_priors,\n prior_coefficients,\n hierarchy,\n )\n log_beta_hat = math.compute_beta_hat(covariates, coefficients)\n beta_hat = np.exp(log_beta_hat).rename('beta_hat')\n\n # Format and save data.\n logger.info('Prepping outputs', context='transform')\n betas = pd.concat([beta_fit, beta_hat], axis=1).reindex(infections.index)\n deaths = model.clean_infection_data_measure(past_infection_data, 'deaths')\n ode_parameters = ode_parameters.to_df()\n\n logger.info('Writing outputs', context='write')\n data_interface.save_infections(infections, draw_id=draw_id)\n data_interface.save_deaths(deaths, draw_id=draw_id)\n data_interface.save_betas(betas, draw_id=draw_id)\n data_interface.save_compartments(compartments, draw_id=draw_id)\n data_interface.save_coefficients(coefficients, draw_id=draw_id)\n data_interface.save_ode_parameters(ode_parameters, draw_id=draw_id)\n\n logger.report()\n\n\[email protected]()\n@cli_tools.with_task_regression_version\n@cli_tools.with_draw_id\n@cli_tools.add_verbose_and_with_debugger\n@cli_tools.with_progress_bar\ndef beta_regression(regression_version: str, draw_id: int,\n progress_bar: bool, verbose: int, with_debugger: bool):\n cli_tools.configure_logging_to_terminal(verbose)\n run = cli_tools.handle_exceptions(run_beta_regression, logger, with_debugger)\n run(regression_version=regression_version,\n draw_id=draw_id,\n progress_bar=progress_bar)\n\n\nif __name__ == '__main__':\n beta_regression()\n" ]
[ [ "numpy.random.normal", "numpy.random.seed", "pandas.concat", "numpy.exp" ] ]
teomotun/Restaurant-Plug
[ "1ecaab7bb60706ec0eca96c2f3efb31276c536e7" ]
[ "Model/code/training_restaurant_features.py" ]
[ "import pandas as pd\nimport h5py\n\n# Paths\nDATA_HOME = \"/content/drive/My Drive/Yelp-Restaurant-Classification/Model/data/\"\nFEATURES_HOME = '/content/drive/My Drive/Yelp-Restaurant-Classification/Model/features/'\n\n# Get photo->business mapping from the file provided\ntrain_photo_to_biz_ids = pd.read_csv(DATA_HOME + 'train_photo_to_biz_ids.csv')\n\n# Get labels for businesses in the training data\ntrain_data_business = pd.read_csv(DATA_HOME + 'train.csv').dropna()\n\n# Sort these labels in the ascending order for simplicity e.g. (0, 6, 4, 2, 5) -> (0, 2, 4, 5, 6)\ntrain_data_business['labels'] = train_data_business['labels'].apply(\n lambda feature_vector: tuple(sorted(int(feature) for feature in feature_vector.split())))\ntrain_data_business.set_index('business_id', inplace=True)\n\n# Get business ids\nbusiness_ids = train_data_business.index.unique()\nprint(\"Total train business:\", len(business_ids))\n\n# Reading stored features from h5 file\ntrain_features_file = h5py.File(FEATURES_HOME + 'train_features.h5', 'r')\ntrain_features = np.copy(train_features_file['feature'])\ntrain_features_file.close()\n\n# Create a pandas dataframe to make the data ready for training the SVM classifier in the following format\ntrain_df = pd.DataFrame(columns=['business_id', 'label', 'feature'])\n\nfor business_id in business_ids:\n \"\"\"\n For each business, write the values for the above triplet in the file viz. ['business_id', 'label', 'feature']\n \"\"\"\n business_id = int(business_id)\n\n # Get the labels for the current business\n label = train_data_business.loc[business_id]['labels']\n\n # Get all the images which represent the current business with business_id\n images_for_business_id = train_photo_to_biz_ids[train_photo_to_biz_ids['business_id'] == business_id].index.tolist()\n\n # As a feature for current business, take the average over all the images\n feature = list(np.mean(train_features[images_for_business_id], axis=0))\n\n # Put the triplet into the data frame\n train_df.loc[business_id] = [business_id, label, feature]\n\nprint(\"Train business feature extraction is completed.\")\n\n# Write the above data frame into a csv file\nwith open(FEATURES_HOME + 'train_aggregate_features.csv', 'w') as business_features_file:\n train_df.to_csv(business_features_file, index=False)" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
potti95/fewrel
[ "5706b45c6936a60ef94326808e24d0ed0643a579" ]
[ "train_demo.py" ]
[ "from fewshot_re_kit.data_loader import get_loader, get_loader_pair, get_loader_unsupervised\nfrom fewshot_re_kit.framework import FewShotREFramework\nfrom fewshot_re_kit.sentence_encoder import FasttextSentenceEncoder, CNNSentenceEncoder, BERTSentenceEncoder, \\\n BERTPAIRSentenceEncoder, RobertaSentenceEncoder, RobertaPAIRSentenceEncoder, BERTPAIRMULTISentenceEncoder, \\\n BertSentenceEncoderOWN\nimport models\nfrom models.proto import Proto\nfrom models.gnn import GNN\nfrom models.snail import SNAIL\nfrom models.metanet import MetaNet\nfrom models.siamese import Siamese\nfrom models.pair import Pair\nfrom models.d import Discriminator\nfrom models.mtb import Mtb\nimport sys\nimport torch\nfrom torch import optim, nn\nimport numpy as np\nimport json\nimport argparse\nimport os\nimport io\n#import fasttext\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--train', default='train_wiki', #train_wiki\n help='train file')\n parser.add_argument('--val', default='val_wiki',\n help='val file')\n parser.add_argument('--test', default='val_wiki',\n help='test file')\n parser.add_argument('--adv', default=None,\n help='adv file')\n parser.add_argument('--trainN', default=5, type=int,\n help='N in train')\n parser.add_argument('--N', default=5, type=int,\n help='N way')\n parser.add_argument('--K', default=1, type=int,\n help='K shot')\n parser.add_argument('--Q', default=1, type=int,\n help='Num of query per class')\n parser.add_argument('--batch_size', default=1, type=int,\n help='batch size')\n parser.add_argument('--train_iter', default=30000, type=int,\n help='num of iters in training')\n parser.add_argument('--val_iter', default=1000, type=int, #1000\n help='num of iters in validation')\n parser.add_argument('--test_iter', default=10000, type=int, #10000\n help='num of iters in testing')\n parser.add_argument('--val_step', default=2000, type=int,\n help='val after training how many iters')\n parser.add_argument('--model', default='proto',\n help='model name')\n parser.add_argument('--encoder', default='cnn',\n help='encoder: cnn or bert or roberta')\n parser.add_argument('--max_length', default=128, type=int,\n help='max length')\n parser.add_argument('--lr', default=-1, type=float,\n help='learning rate')\n parser.add_argument('--weight_decay', default=1e-5, type=float,\n help='weight decay')\n parser.add_argument('--dropout', default=0.0, type=float,\n help='dropout rate')\n parser.add_argument('--na_rate', default=0, type=int,\n help='NA rate (NA = Q * na_rate)')\n parser.add_argument('--grad_iter', default=1, type=int,\n help='accumulate gradient every x iterations')\n parser.add_argument('--optim', default='sgd',\n help='sgd / adam / adamw')\n parser.add_argument('--hidden_size', default=230, type=int,\n help='hidden size')\n parser.add_argument('--load_ckpt', default=None,\n help='load ckpt')\n parser.add_argument('--save_ckpt', default=None,\n help='save ckpt')\n parser.add_argument('--fp16', action='store_true',\n help='use nvidia apex fp16')\n parser.add_argument('--only_test', action='store_true',\n help='only test')\n parser.add_argument('--ckpt_name', type=str, default='',\n help='checkpoint name.')\n\n\n # only for bert / roberta\n parser.add_argument('--pair', action='store_true',\n help='use pair model')\n parser.add_argument('--pretrain_ckpt', default=None,\n help='bert / roberta pre-trained checkpoint')\n parser.add_argument('--cat_entity_rep', action='store_true',\n help='concatenate entity representation as sentence rep')\n\n # only for prototypical networks\n parser.add_argument('--dot', action='store_true', \n help='use dot instead of L2 distance for proto')\n\n # only for mtb\n parser.add_argument('--no_dropout', action='store_true',\n help='do not use dropout after BERT (still has dropout in BERT).')\n \n # experiment\n parser.add_argument('--mask_entity', action='store_true',\n help='mask entity names')\n parser.add_argument('--use_sgd_for_bert', action='store_true',\n help='use SGD instead of AdamW for BERT.')\n\n opt = parser.parse_args()\n trainN = opt.trainN\n N = opt.N\n K = opt.K\n Q = opt.Q\n batch_size = opt.batch_size\n model_name = opt.model\n encoder_name = opt.encoder\n max_length = opt.max_length\n \n print(\"{}-way-{}-shot Few-Shot Relation Classification\".format(N, K))\n print(\"model: {}\".format(model_name))\n print(\"encoder: {}\".format(encoder_name))\n print(\"max_length: {}\".format(max_length))\n\n ########################################################################saját\n #def load_vectors(fname):\n # fin = io.open(fname, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n # n, d = map(int, fin.readline().split())\n # data = {}\n # for line in fin:\n # tokens = line.rstrip().split(' ')\n # data[tokens[0]] = map(float, tokens[1:])\n # return data\n #if encoder_name == 'fasttext':\n # try:\n # fasttext_mat=load_vectors('./pretrain/fasttext/wiki.hu.align.vec')\n # elsoszo=list(fasttext_mat['az'])\n # except:\n # raise Exception(\"Cannot find fasttext files.\")\n # sentence_encoder= FasttextSentenceEncoder(\n # fasttext_mat,\n # max_length\n # )\n if encoder_name == 'fasttext':\n sentence_encoder= FasttextSentenceEncoder(\n max_length\n )\n\n ###################################################################saját\n\n elif encoder_name == 'cnn':\n try:\n glove_mat = np.load('./pretrain/glove/glove_mat.npy')\n glove_word2id = json.load(open('./pretrain/glove/glove_word2id.json'))\n except:\n raise Exception(\"Cannot find glove files. Run glove/download_glove.sh to download glove files.\")\n sentence_encoder = CNNSentenceEncoder(\n glove_mat,\n glove_word2id,\n max_length)\n elif encoder_name == 'bert':\n pretrain_ckpt = opt.pretrain_ckpt or './pretrain/bert-base-uncased'\n if opt.pair:\n sentence_encoder = BERTPAIRSentenceEncoder(\n pretrain_ckpt,\n max_length)\n else:\n sentence_encoder = BertSentenceEncoderOWN(\n pretrain_ckpt,\n max_length,\n cat_entity_rep=opt.cat_entity_rep,\n mask_entity=opt.mask_entity)\n elif encoder_name == 'bertsimple':\n pretrain_ckpt = opt.pretrain_ckpt or './pretrain/bert-base-uncased-simple/proba/bert-base-uncased'\n sentence_encoder = BertSentenceEncoderOWN(\n pretrain_ckpt,\n max_length,\n cat_entity_rep=opt.cat_entity_rep,\n mask_entity=opt.mask_entity)\n elif encoder_name == 'bertmultiling':\n pretrain_ckpt = opt.pretrain_ckpt or './pretrain/bert-base-multilingual-uncased/bert-base-multilingual-uncased'\n sentence_encoder = BertSentenceEncoderOWN(\n pretrain_ckpt,\n max_length,\n cat_entity_rep=opt.cat_entity_rep,\n mask_entity=opt.mask_entity)\n elif encoder_name == 'roberta':\n pretrain_ckpt = opt.pretrain_ckpt or 'roberta-base'\n if opt.pair:\n sentence_encoder = RobertaPAIRSentenceEncoder(\n pretrain_ckpt,\n max_length)\n else:\n sentence_encoder = RobertaSentenceEncoder(\n pretrain_ckpt,\n max_length,\n cat_entity_rep=opt.cat_entity_rep)\n else:\n raise NotImplementedError\n #print(sentence_encoder) csak encoder információk, DATA LOADER HÍV MINDENT\n if opt.pair:\n train_data_loader = get_loader_pair(opt.train, sentence_encoder,\n N=trainN, K=K, Q=Q, na_rate=opt.na_rate, batch_size=batch_size, encoder_name=encoder_name)\n # val_data_loader = get_loader_pair(opt.val, sentence_encoder,\n # N=N, K=K, Q=Q, na_rate=opt.na_rate, batch_size=batch_size, encoder_name=encoder_name)\n test_data_loader = get_loader_pair(opt.test, sentence_encoder,\n N=N, K=K, Q=Q, na_rate=opt.na_rate, batch_size=batch_size, encoder_name=encoder_name)\n else:\n train_data_loader = get_loader(opt.train, sentence_encoder,\n N=trainN, K=K, Q=Q, na_rate=opt.na_rate, batch_size=batch_size)\n # val_data_loader = get_loader(opt.val, sentence_encoder,\n # N=N, K=K, Q=Q, na_rate=opt.na_rate, batch_size=batch_size)\n test_data_loader = get_loader(opt.test, sentence_encoder,\n N=N, K=K, Q=Q, na_rate=opt.na_rate, batch_size=batch_size)\n if opt.adv:\n adv_data_loader = get_loader_unsupervised(opt.adv, sentence_encoder,\n N=trainN, K=K, Q=Q, na_rate=opt.na_rate, batch_size=batch_size)\n #print(train_data_loader)\n if opt.optim == 'sgd':\n pytorch_optim = optim.SGD\n elif opt.optim == 'adam':\n pytorch_optim = optim.Adam\n elif opt.optim == 'adamw':\n from transformers import AdamW\n pytorch_optim = AdamW\n else:\n raise NotImplementedError\n if opt.adv:\n d = Discriminator(opt.hidden_size)\n framework = FewShotREFramework(train_data_loader, test_data_loader, adv_data_loader, adv=opt.adv, d=d)\n else:\n framework = FewShotREFramework(train_data_loader, test_data_loader)\n \n prefix = '-'.join([model_name, encoder_name, opt.train, opt.val, str(N), str(K)])\n if opt.adv is not None:\n prefix += '-adv_' + opt.adv\n if opt.na_rate != 0:\n prefix += '-na{}'.format(opt.na_rate)\n if opt.dot:\n prefix += '-dot'\n if opt.cat_entity_rep:\n prefix += '-catentity'\n if len(opt.ckpt_name) > 0:\n prefix += '-' + opt.ckpt_name\n \n if model_name == 'proto':\n model = Proto(sentence_encoder, dot=opt.dot)\n elif model_name == 'gnn':\n model = GNN(sentence_encoder, N, hidden_size=opt.hidden_size)\n elif model_name == 'snail':\n model = SNAIL(sentence_encoder, N, K, hidden_size=opt.hidden_size)\n elif model_name == 'metanet':\n model = MetaNet(N, K, sentence_encoder.embedding, max_length)\n elif model_name == 'siamese':\n model = Siamese(sentence_encoder, hidden_size=opt.hidden_size, dropout=opt.dropout)\n elif model_name == 'pair':\n model = Pair(sentence_encoder, hidden_size=opt.hidden_size)\n elif model_name == 'mtb':\n model = Mtb(sentence_encoder, use_dropout=not opt.no_dropout)\n else:\n raise NotImplementedError\n \n if not os.path.exists('checkpoint'):\n os.mkdir('checkpoint')\n ckpt = 'checkpoint/{}.pth.tar'.format(prefix)\n if opt.save_ckpt:\n ckpt = opt.save_ckpt\n \n if torch.cuda.is_available():\n model.cuda()\n\n if not opt.only_test:\n if encoder_name in ['bert', 'roberta']:\n bert_optim = True\n else:\n bert_optim = False\n\n if opt.lr == -1:\n if bert_optim:\n opt.lr = 2e-5\n else:\n opt.lr = 1e-1\n \n opt.train_iter = opt.train_iter * opt.grad_iter\n framework.train(model, prefix, batch_size, trainN, N, K, Q,\n pytorch_optim=pytorch_optim, load_ckpt=opt.load_ckpt, save_ckpt=ckpt,\n na_rate=opt.na_rate, val_step=opt.val_step, fp16=opt.fp16, pair=opt.pair, \n train_iter=opt.train_iter, val_iter=opt.val_iter, bert_optim=bert_optim, \n learning_rate=opt.lr, use_sgd_for_bert=opt.use_sgd_for_bert, grad_iter=opt.grad_iter)\n else:\n ckpt = opt.load_ckpt\n if ckpt is None:\n print(\"Warning: --load_ckpt is not specified. Will load Hugginface pre-trained checkpoint.\")\n ckpt = 'none'\n\n acc = framework.eval(model, batch_size, N, K, Q, opt.test_iter, na_rate=opt.na_rate, ckpt=ckpt, pair=opt.pair)\n print(\"RESULT: %.2f\" % (acc * 100))\n with open('results.txt', 'a') as f:\n print(ckpt, file=f)\n print('RESULT: %.2f' % (acc * 100), file=f)\n print('\\n', file=f)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.load", "torch.cuda.is_available" ] ]
v-pooja/vmaf
[ "6c6a7b085213a90c080510035546eceb21284008" ]
[ "python/src/vmaf/core/niqe_train_test_model.py" ]
[ "__copyright__ = \"Copyright 2016-2018, Netflix, Inc.\"\n__license__ = \"Apache, Version 2.0\"\n\nimport numpy as np\nimport scipy.linalg\n\nfrom vmaf.core.train_test_model import TrainTestModel, RegressorMixin\n\n\nclass NiqeTrainTestModel(TrainTestModel, RegressorMixin):\n\n TYPE = 'NIQE'\n VERSION = \"0.1\"\n\n @classmethod\n def _assert_dimension(cls, feature_names, results):\n # Override TrainTestModel._assert_dimension. allow input to be list\n # For each result, the dimension of each result[feature_name]\n # should be consistent\n assert isinstance(results[0][feature_names[0]], list)\n for result in results:\n len0 = len(result[feature_names[0]])\n for name in feature_names[1:]:\n assert len(result[name]) == len0\n\n @classmethod\n def get_xs_from_results(cls, results, indexs=None, aggregate=False):\n \"\"\"\n override TrainTestModel.get_xs_from_results by altering aggregate\n default to False\n \"\"\"\n return super(NiqeTrainTestModel, cls).get_xs_from_results(\n results, indexs, aggregate)\n\n @classmethod\n def get_xys_from_results(cls, results, indexs=None, aggregate=False):\n \"\"\"\n override TrainTestModel.get_xys_from_results by altering aggregate\n default to False\n \"\"\"\n return super(NiqeTrainTestModel, cls).get_xys_from_results(\n results, indexs, aggregate)\n\n def train(self, xys):\n \"\"\"\n override TrainTestModel.train\n \"\"\"\n self.model_type = self.TYPE\n\n assert 'label' in xys\n ys_vec = xys['label'] # for NIQE, ys never used for training\n\n # this makes sure the order of features are normalized, and each\n # dimension of xys_2d is consistent with feature_names\n feature_names = sorted(xys.keys())\n feature_names.remove('label')\n feature_names.remove('content_id')\n self.feature_names = feature_names\n\n num_samples = len(xys[feature_names[0]])\n\n xs_2d = []\n for i_sample in range(num_samples):\n xs_2d_ = np.vstack(map(\n lambda feature_name: xys[feature_name][i_sample], feature_names)\n ).T\n xs_2d.append(xs_2d_)\n xs_2d = np.vstack(xs_2d)\n\n # no normalization for NIQE\n self.norm_type = 'none'\n\n # compute NIQE\n mu = np.mean(xs_2d, axis=0)\n cov = np.cov(xs_2d.T)\n\n self.model = {'mu': mu, 'cov': cov}\n\n def predict(self, xs):\n \"\"\"\n override TrainTestModel.predict\n \"\"\"\n self._assert_trained()\n\n for name in self.feature_names:\n assert name in xs\n\n num_samples = len(xs[self.feature_names[0]])\n\n # predict per sample\n ys_label_pred = []\n for i_sample in range(num_samples):\n xs_2d_ = np.vstack(map(\n lambda feature_name: xs[feature_name][i_sample],\n self.feature_names)\n ).T\n\n # no normalization for NIQE\n\n if xs_2d_.shape[0] < 2:\n ys_label_pred_ = None # NIQE won't work for single patch\n else:\n ys_label_pred_ = self._predict(self.model, xs_2d_)\n\n ys_label_pred.append(ys_label_pred_)\n\n return {'ys_label_pred': ys_label_pred}\n\n @classmethod\n def _predict(cls, model, xs_2d):\n pop_mu = model['mu'].astype(float)\n pop_cov = model['cov'].astype(float)\n feats = xs_2d\n sample_mu = np.mean(feats, axis=0)\n sample_cov = np.cov(feats.T)\n X = sample_mu - pop_mu\n covmat = ((pop_cov+sample_cov)/2.0)\n pinvmat = scipy.linalg.pinv(covmat)\n d1 = np.sqrt(np.dot(np.dot(X, pinvmat), X))\n return d1" ]
[ [ "numpy.vstack", "numpy.dot", "numpy.cov", "numpy.mean" ] ]
cBioCenter/chell-viz-contact
[ "07edd56bea1693da76cf62d2ab7088193c86e9e2" ]
[ "utils/generate_raw_matrix_from_norm.py" ]
[ "import scipy.sparse as ssp\nimport numpy as np\ncounts = ssp.load_npz('./counts_norm.npz')\nnp.savetxt('./counts_norm.csv', counts.todense(), delimiter=',', fmt='%.3f')\n" ]
[ [ "scipy.sparse.load_npz" ] ]
sigeisler/grb
[ "c89e21076dc05d1edb87dfe2eff20c29ba6bd0c1" ]
[ "grb/model/torch/gcn.py" ]
[ "\"\"\"Torch module for GCN.\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom grb.utils.normalize import GCNAdjNorm\n\n\nclass GCN(nn.Module):\n r\"\"\"\n\n Description\n -----------\n Graph Convolutional Networks (`GCN <https://arxiv.org/abs/1609.02907>`__)\n\n Parameters\n ----------\n in_features : int\n Dimension of input features.\n out_features : int\n Dimension of output features.\n hidden_features : int or list of int\n Dimension of hidden features. List if multi-layer.\n n_layers : int\n Number of layers.\n layer_norm : bool, optional\n Whether to use layer normalization. Default: ``False``.\n activation : func of torch.nn.functional, optional\n Activation function. Default: ``torch.nn.functional.relu``.\n residual : bool, optional\n Whether to use residual connection. Default: ``False``.\n feat_norm : str, optional\n Type of features normalization, choose from [\"arctan\", \"tanh\", None]. Default: ``None``.\n adj_norm_func : func of utils.normalize, optional\n Function that normalizes adjacency matrix. Default: ``GCNAdjNorm``.\n dropout : float, optional\n Dropout rate during training. Default: ``0.0``.\n\n \"\"\"\n\n def __init__(self,\n in_features,\n out_features,\n hidden_features,\n n_layers,\n activation=F.relu,\n layer_norm=False,\n residual=False,\n feat_norm=None,\n adj_norm_func=GCNAdjNorm,\n dropout=0.0):\n super(GCN, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.feat_norm = feat_norm\n self.adj_norm_func = adj_norm_func\n if type(hidden_features) is int:\n hidden_features = [hidden_features] * (n_layers - 1)\n elif type(hidden_features) is list or type(hidden_features) is tuple:\n assert len(hidden_features) == (n_layers - 1), \"Incompatible sizes between hidden_features and n_layers.\"\n n_features = [in_features] + hidden_features + [out_features]\n\n self.layers = nn.ModuleList()\n for i in range(n_layers):\n if layer_norm:\n self.layers.append(nn.LayerNorm(n_features[i]))\n self.layers.append(GCNConv(in_features=n_features[i],\n out_features=n_features[i + 1],\n activation=activation if i != n_layers - 1 else None,\n residual=residual if i != n_layers - 1 else False,\n dropout=dropout if i != n_layers - 1 else 0.0))\n self.reset_parameters()\n\n @property\n def model_type(self):\n \"\"\"Indicate type of implementation.\"\"\"\n return \"torch\"\n\n @property\n def model_name(self):\n return \"gcn\"\n\n def reset_parameters(self):\n \"\"\"Reset parameters.\"\"\"\n for layer in self.layers:\n layer.reset_parameters()\n\n def forward(self, x, adj):\n r\"\"\"\n\n Parameters\n ----------\n x : torch.Tensor\n Tensor of input features.\n adj : torch.SparseTensor\n Sparse tensor of adjacency matrix.\n\n Returns\n -------\n x : torch.Tensor\n Output of model (logits without activation).\n\n \"\"\"\n\n for layer in self.layers:\n if isinstance(layer, nn.LayerNorm):\n x = layer(x)\n else:\n x = layer(x, adj)\n\n return x\n\n\nclass GCNGC(nn.Module):\n r\"\"\"\n\n Description\n -----------\n Graph Convolutional Networks (`GCN <https://arxiv.org/abs/1609.02907>`__)\n\n Parameters\n ----------\n in_features : int\n Dimension of input features.\n out_features : int\n Dimension of output features.\n hidden_features : int or list of int\n Dimension of hidden features. List if multi-layer.\n n_layers : int\n Number of layers.\n layer_norm : bool, optional\n Whether to use layer normalization. Default: ``False``.\n activation : func of torch.nn.functional, optional\n Activation function. Default: ``torch.nn.functional.relu``.\n residual : bool, optional\n Whether to use residual connection. Default: ``False``.\n feat_norm : str, optional\n Type of features normalization, choose from [\"arctan\", \"tanh\", None]. Default: ``None``.\n adj_norm_func : func of utils.normalize, optional\n Function that normalizes adjacency matrix. Default: ``GCNAdjNorm``.\n dropout : float, optional\n Dropout rate during training. Default: ``0.0``.\n\n \"\"\"\n\n def __init__(self,\n in_features,\n out_features,\n hidden_features,\n n_layers,\n activation=F.relu,\n layer_norm=False,\n residual=False,\n feat_norm=None,\n adj_norm_func=GCNAdjNorm,\n dropout=0.0):\n super(GCNGC, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.feat_norm = feat_norm\n self.adj_norm_func = adj_norm_func\n if type(hidden_features) is int:\n hidden_features = [hidden_features] * (n_layers - 1)\n elif type(hidden_features) is list or type(hidden_features) is tuple:\n assert len(hidden_features) == (n_layers - 1), \"Incompatible sizes between hidden_features and n_layers.\"\n n_features = [in_features] + hidden_features\n\n self.layers = nn.ModuleList()\n for i in range(n_layers - 1):\n if layer_norm:\n self.layers.append(nn.LayerNorm(n_features[i]))\n self.layers.append(GCNConv(in_features=n_features[i],\n out_features=n_features[i + 1],\n activation=activation,\n residual=residual,\n dropout=dropout))\n self.linear = nn.Linear(hidden_features[-1], out_features)\n self.dropout = nn.Dropout(dropout)\n self.reset_parameters()\n\n @property\n def model_type(self):\n \"\"\"Indicate type of implementation.\"\"\"\n return \"torch\"\n\n @property\n def model_name(self):\n return \"gcn\"\n\n def reset_parameters(self):\n \"\"\"Reset parameters.\"\"\"\n for layer in self.layers:\n layer.reset_parameters()\n\n def forward(self, x, adj, batch_index=None):\n r\"\"\"\n\n Parameters\n ----------\n x : torch.Tensor\n Tensor of input features.\n adj : torch.SparseTensor\n Sparse tensor of adjacency matrix.\n\n Returns\n -------\n x : torch.Tensor\n Output of model (logits without activation).\n\n \"\"\"\n\n for layer in self.layers:\n if isinstance(layer, nn.LayerNorm):\n x = layer(x)\n else:\n x = layer(x, adj)\n if batch_index is not None:\n batch_size = int(torch.max(batch_index)) + 1\n out = torch.zeros(batch_size, x.shape[1]).to(x.device)\n out = out.scatter_add_(dim=0, index=batch_index.view(-1, 1).repeat(1, x.shape[1]), src=x)\n else:\n out = torch.sum(x, dim=0)\n out = self.dropout(self.linear(out))\n\n return out\n\n\nclass GCNConv(nn.Module):\n r\"\"\"\n\n Description\n -----------\n GCN convolutional layer.\n\n Parameters\n ----------\n in_features : int\n Dimension of input features.\n out_features : int\n Dimension of output features.\n activation : func of torch.nn.functional, optional\n Activation function. Default: ``None``.\n residual : bool, optional\n Whether to use residual connection. Default: ``False``.\n dropout : float, optional\n Dropout rate during training. Default: ``0.0``.\n\n \"\"\"\n\n def __init__(self,\n in_features,\n out_features,\n activation=None,\n residual=False,\n dropout=0.0):\n super(GCNConv, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.linear = nn.Linear(in_features, out_features)\n\n if residual:\n self.residual = nn.Linear(in_features, out_features)\n else:\n self.residual = None\n self.activation = activation\n\n if dropout > 0.0:\n self.dropout = nn.Dropout(dropout)\n else:\n self.dropout = None\n\n self.reset_parameters()\n\n def reset_parameters(self):\n \"\"\"Reset parameters.\"\"\"\n if self.activation == F.leaky_relu:\n gain = nn.init.calculate_gain('leaky_relu')\n else:\n gain = nn.init.calculate_gain('relu')\n nn.init.xavier_normal_(self.linear.weight, gain=gain)\n\n def forward(self, x, adj):\n r\"\"\"\n\n Parameters\n ----------\n x : torch.Tensor\n Tensor of input features.\n adj : torch.SparseTensor\n Sparse tensor of adjacency matrix.\n\n Returns\n -------\n x : torch.Tensor\n Output of layer.\n\n \"\"\"\n\n x = self.linear(x)\n x = torch.sparse.mm(adj, x)\n if self.activation is not None:\n x = self.activation(x)\n if self.residual is not None:\n x = x + self.residual(x)\n if self.dropout is not None:\n x = self.dropout(x)\n\n return x\n" ]
[ [ "torch.sum", "torch.nn.init.calculate_gain", "torch.nn.Linear", "torch.nn.init.xavier_normal_", "torch.nn.LayerNorm", "torch.nn.ModuleList", "torch.max", "torch.zeros", "torch.sparse.mm", "torch.nn.Dropout" ] ]
gucci-j/intro-deep-learning-keras
[ "ef79eb44b6080918067fe6fc38e0b79ecf88189c" ]
[ "chapter6/compare_weight_decay.py" ]
[ "from keras import Model, optimizers, initializers, regularizers\nfrom keras.layers import Input, Dense, Activation\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.utils import to_categorical\nfrom keras.datasets import fashion_mnist\nimport matplotlib.pyplot as plt\n\n# パラメータ + ハイパーパラメータ\nimg_shape = (28 * 28, )\nhidden_dim = 100\noutput_dim = 10\nbatch_size = 128\nlearning_rate = 0.01\nepochs = 15\n_init = initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None)\n_wd = regularizers.l2(0.1)\n\ndef build_model(wd):\n # モデルを定義する\n if wd is True:\n _input = Input(shape=img_shape)\n _hidden = Dense(hidden_dim, kernel_initializer=_init, kernel_regularizer=_wd)(_input)\n _hidden = BatchNormalization()(_hidden)\n _hidden = Activation('relu')(_hidden)\n _hidden = Dense(hidden_dim, kernel_initializer=_init, kernel_regularizer=_wd)(_hidden)\n _hidden = BatchNormalization()(_hidden)\n _hidden = Activation('relu')(_hidden)\n _hidden = Dense(hidden_dim, kernel_initializer=_init, kernel_regularizer=_wd)(_hidden)\n _hidden = BatchNormalization()(_hidden)\n _hidden = Activation('relu')(_hidden)\n _hidden = Dense(hidden_dim, kernel_initializer=_init, kernel_regularizer=_wd)(_hidden)\n _hidden = BatchNormalization()(_hidden)\n _hidden = Activation('relu')(_hidden)\n _output = Dense(output_dim, activation='softmax')(_hidden)\n\n model = Model(inputs=_input, outputs=_output)\n return model\n\n else:\n _input = Input(shape=img_shape)\n _hidden = Dense(hidden_dim, kernel_initializer=_init)(_input)\n _hidden = BatchNormalization()(_hidden)\n _hidden = Activation('relu')(_hidden)\n _hidden = Dense(hidden_dim, kernel_initializer=_init)(_hidden)\n _hidden = BatchNormalization()(_hidden)\n _hidden = Activation('relu')(_hidden)\n _hidden = Dense(hidden_dim, kernel_initializer=_init)(_hidden)\n _hidden = BatchNormalization()(_hidden)\n _hidden = Activation('relu')(_hidden)\n _hidden = Dense(hidden_dim, kernel_initializer=_init)(_hidden)\n _hidden = BatchNormalization()(_hidden)\n _hidden = Activation('relu')(_hidden)\n _output = Dense(output_dim, activation='softmax')(_hidden)\n\n model = Model(inputs=_input, outputs=_output)\n return model\n\n\n\ndef load_data():\n # データを読み込む\n (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()\n x_train = x_train[:30000]\n y_train = y_train[:30000]\n x_train = x_train.reshape(30000, 784)\n x_test = x_test.reshape(10000, 784)\n x_train = x_train.astype('float') / 255.\n x_test = x_test.astype('float') / 255.\n print(f'Before: {y_train.shape}')\n print(f'y_train[0]: {y_train[0]}')\n y_train = to_categorical(y_train, num_classes=output_dim)\n print(f'After: {y_train.shape}')\n print(f'y_train[0]: {y_train[0]}')\n y_test = to_categorical(y_test, num_classes=output_dim)\n\n return x_train, y_train, x_test, y_test\n\n\ndef set_flag():\n # バッチ正規化フラグの定義\n flag = {}\n flag['With Weight decay'] = True\n flag['Without Weight decay'] = False\n\n return flag\n\n\ndef main():\n x_train, y_train, x_test, y_test = load_data()\n flag = set_flag()\n\n results = {}\n for key in flag.keys():\n print(f'---Now running: {key} model---')\n model = build_model(flag[key])\n model.compile(optimizer=optimizers.SGD(lr=learning_rate), loss='categorical_crossentropy', metrics=['accuracy'])\n results[key] = model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))\n\n plt.figure()\n for key in flag.keys():\n acc = results[key].history['acc']\n val_acc = results[key].history['val_acc']\n plt.plot(range(1, epochs+1), acc, marker='.', label='train')\n plt.plot(range(1, epochs+1), val_acc, marker='.', label='test')\n plt.legend(loc='best', fontsize=10)\n plt.grid()\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.savefig('acc_' + key + '.png')\n plt.clf()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.grid", "matplotlib.pyplot.savefig", "matplotlib.pyplot.clf", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel" ] ]
vuanvin/pytorch
[ "9267fd8d7395074001ad7cf2a8f28082dbff6b0b" ]
[ "test/distributed/fsdp/test_fsdp_checkpoint.py" ]
[ "# Owner(s): [\"oncall: distributed\"]\n\nimport contextlib\nfrom copy import deepcopy\nfrom functools import partial\n\nimport torch\nimport torch.nn as nn\nfrom torch.distributed._fsdp.fully_sharded_data_parallel import (\n FullyShardedDataParallel as FSDP,\n CPUOffload,\n)\nfrom torch.distributed.algorithms._checkpoint._checkpoint_wrapper import (\n checkpoint_wrapper,\n)\nfrom torch.testing._internal.common_distributed import (\n skip_if_lt_x_gpu,\n)\nfrom torch.testing._internal.common_fsdp import (\n FSDPTest,\n _maybe_wrap_fsdp,\n)\nfrom torch.testing._internal.common_utils import (\n run_tests,\n parametrize,\n instantiate_parametrized_tests,\n)\nfrom torch.utils.checkpoint import checkpoint\n\n\nclass TestFSDPCheckpoint(FSDPTest):\n class SequentialModule(nn.Module):\n def __init__(\n self,\n checkpoint_layer=False,\n offload_activations=False,\n wrap_fsdp=False,\n *fsdp_args,\n **fsdp_kwargs,\n ):\n torch.manual_seed(0)\n torch.cuda.manual_seed(0)\n super().__init__()\n l1 = nn.Linear(3, 3).cuda()\n l2 = nn.Linear(3, 3).cuda()\n l3 = nn.Linear(3, 3).cuda()\n\n if checkpoint_layer:\n ckpt_wrapper = partial(\n checkpoint_wrapper, offload_to_cpu=offload_activations\n )\n\n l1 = ckpt_wrapper(l1)\n l2 = ckpt_wrapper(l2)\n l3 = ckpt_wrapper(l3)\n\n fsdp_wrapper = partial(\n _maybe_wrap_fsdp, wrap_fsdp=wrap_fsdp, *fsdp_args, **fsdp_kwargs\n )\n self.ffn = nn.Sequential(\n fsdp_wrapper(l1),\n fsdp_wrapper(l2),\n fsdp_wrapper(l3),\n )\n\n def forward(self, x):\n return self.ffn(x)\n\n def _verify_parity(self, losses, outputs, models):\n assert losses\n assert outputs\n assert models\n\n for (l, o) in zip(losses[1:], outputs[1:]):\n self.assertEqual(losses[0], l)\n self.assertEqual(outputs[0], o)\n\n # Verify grads\n ref_model = models[0]\n ref_grads = [p.grad for p in ref_model.parameters()]\n for m in models[1:]:\n grads = [p.grad for p in m.parameters()]\n for ref_g, g in zip(ref_grads, grads):\n self.assertEqual(ref_g, g)\n\n @skip_if_lt_x_gpu(2)\n @parametrize(\n \"cpu_offload\",\n [CPUOffload(offload_params=True), CPUOffload(offload_params=False)],\n )\n @parametrize(\"offload_activations\", [True, False])\n def test_checkpoint_fsdp_wrapping(self, cpu_offload, offload_activations):\n # Test checkpoint(FSDP(layer1), FSDP(layer2), ....)\n ckpt_sequential_wrapped_fsdp = checkpoint_wrapper(\n TestFSDPCheckpoint.SequentialModule(\n wrap_fsdp=True, cpu_offload=cpu_offload\n ),\n offload_to_cpu=offload_activations,\n )\n # Test FSDP(checkpoint(layer1)), FSDP(checkpoint(layer2)), ....\n inner_ckpt = TestFSDPCheckpoint.SequentialModule(\n checkpoint_layer=True,\n offload_activations=offload_activations,\n wrap_fsdp=True,\n cpu_offload=cpu_offload,\n )\n\n baseline = TestFSDPCheckpoint.SequentialModule(\n wrap_fsdp=True, cpu_offload=cpu_offload\n )\n\n # note that reentrant-based checkpointing requires inputs to have grad\n # flag set.\n inp = torch.randn(10, 3, device=torch.cuda.current_device(), requires_grad=True)\n\n models = [ckpt_sequential_wrapped_fsdp, inner_ckpt, baseline]\n\n offload_to_cpu_event = \"Memcpy DtoH\"\n\n for i in range(2):\n losses = []\n outputs = []\n for m in models:\n check_offload = m != baseline and i == 0 and offload_activations\n profiler_ctx = (\n torch.profiler.profile(use_cuda=True)\n if check_offload\n else contextlib.suppress()\n )\n with profiler_ctx as prof:\n out = m(inp)\n\n if check_offload:\n event_names = [event.name for event in prof.events()]\n offload_occured = any(\n offload_to_cpu_event in name for name in event_names\n )\n self.assertTrue(offload_occured)\n loss = out.sum()\n loss.backward()\n losses.append(loss)\n outputs.append(out)\n\n self._verify_parity(losses, outputs, models)\n\n @skip_if_lt_x_gpu(2)\n @parametrize(\n \"cpu_offload\",\n [CPUOffload(offload_params=True), CPUOffload(offload_params=False)],\n )\n @parametrize(\"offload_activations\", [True, False])\n def test_basic_checkpoint_end_to_end(self, cpu_offload, offload_activations):\n seq = TestFSDPCheckpoint.SequentialModule().to(torch.cuda.current_device())\n # Runs FSDP with no checkpointing\n fsdp_only_seq = FSDP(deepcopy(seq), cpu_offload=cpu_offload)\n # Runs checkpoint-wrapped FSDP\n checkpointed_fsdp = checkpoint_wrapper(\n FSDP(deepcopy(seq), cpu_offload=cpu_offload),\n offload_to_cpu=offload_activations,\n )\n # Runs FSDP-wrapped checkpointed module\n fsdp_wrapped_checkpoint = FSDP(\n checkpoint_wrapper(deepcopy(seq), offload_to_cpu=offload_activations),\n cpu_offload=cpu_offload,\n )\n # Runs FSDP with manual calls to checkpoint.\n fsdp_call_checkpoint = FSDP(deepcopy(seq), cpu_offload=cpu_offload)\n # note that reentrant-based checkpointing requires inputs to have grad\n # flag set.\n\n inp = torch.randn(10, 3, device=torch.cuda.current_device(), requires_grad=True)\n\n models = [\n fsdp_only_seq,\n checkpointed_fsdp,\n fsdp_wrapped_checkpoint,\n fsdp_call_checkpoint,\n ]\n\n offload_to_cpu_event = \"Memcpy DtoH\"\n\n for i in range(6):\n losses = []\n outputs = []\n for m in models:\n check_offload = m != fsdp_only_seq and i == 0 and offload_activations\n profiler_ctx = (\n torch.profiler.profile(use_cuda=True)\n if check_offload\n else contextlib.suppress()\n )\n with profiler_ctx as prof:\n if m == fsdp_call_checkpoint:\n offload_ctx = (\n torch.autograd.graph.save_on_cpu(pin_memory=True)\n if offload_activations\n else contextlib.suppress()\n )\n with offload_ctx:\n out = checkpoint(m, inp)\n else:\n out = m(inp)\n\n if check_offload:\n event_names = [event.name for event in prof.events()]\n offload_occured = any(\n offload_to_cpu_event in name for name in event_names\n )\n self.assertTrue(offload_occured)\n loss = out.sum()\n loss.backward()\n losses.append(loss)\n outputs.append(out)\n\n self._verify_parity(losses, outputs, models)\n\ninstantiate_parametrized_tests(TestFSDPCheckpoint)\n\nif __name__ == \"__main__\":\n run_tests()\n" ]
[ [ "torch.testing._internal.common_utils.instantiate_parametrized_tests", "torch.utils.checkpoint.checkpoint", "torch.nn.Linear", "torch.testing._internal.common_utils.parametrize", "torch.profiler.profile", "torch.cuda.manual_seed", "torch.testing._internal.common_utils.run_tests", "torch.manual_seed", "torch.cuda.current_device", "torch.autograd.graph.save_on_cpu", "torch.distributed._fsdp.fully_sharded_data_parallel.CPUOffload", "torch.testing._internal.common_distributed.skip_if_lt_x_gpu" ] ]
AngeloMono/yolact
[ "3be8b635972cfec845eaf7e0fa7d380d7a002c27" ]
[ "train.py" ]
[ "from data import *\nfrom yolact_utils.augmentations import SSDAugmentation, BaseTransform\nfrom yolact_utils.functions import MovingAverage, SavePath\nfrom yolact_utils.logger import Log\nfrom yolact_utils import timer\nfrom layers.modules import MultiBoxLoss\nfrom yolact import Yolact\nimport os\nimport sys\nimport time\nimport math, random\nfrom pathlib import Path\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torch.nn.init as init\nimport torch.utils.data as data\nimport numpy as np\nimport argparse\nimport datetime\n\n# Oof\nimport eval as eval_script\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\nparser = argparse.ArgumentParser(\n description='Yolact Training Script')\nparser.add_argument('--batch_size', default=8, type=int,\n help='Batch size for training')\nparser.add_argument('--resume', default=None, type=str,\n help='Checkpoint state_dict file to resume training from. If this is \"interrupt\"'\\\n ', the model will resume training from the interrupt file.')\nparser.add_argument('--start_iter', default=-1, type=int,\n help='Resume training at this iter. If this is -1, the iteration will be'\\\n 'determined from the file name.')\nparser.add_argument('--num_workers', default=4, type=int,\n help='Number of workers used in dataloading')\nparser.add_argument('--cuda', default=True, type=str2bool,\n help='Use CUDA to train model')\nparser.add_argument('--lr', '--learning_rate', default=None, type=float,\n help='Initial learning rate. Leave as None to read this from the config.')\nparser.add_argument('--momentum', default=None, type=float,\n help='Momentum for SGD. Leave as None to read this from the config.')\nparser.add_argument('--decay', '--weight_decay', default=None, type=float,\n help='Weight decay for SGD. Leave as None to read this from the config.')\nparser.add_argument('--gamma', default=None, type=float,\n help='For each lr step, what to multiply the lr by. Leave as None to read this from the config.')\nparser.add_argument('--save_folder', default='weights/',\n help='Directory for saving checkpoint models.')\nparser.add_argument('--log_folder', default='logs/',\n help='Directory for saving logs.')\nparser.add_argument('--config', default=None,\n help='The config object to use.')\nparser.add_argument('--save_interval', default=10000, type=int,\n help='The number of iterations between saving the model.')\nparser.add_argument('--validation_size', default=5000, type=int,\n help='The number of images to use for validation.')\nparser.add_argument('--validation_epoch', default=2, type=int,\n help='Output validation information every n iterations. If -1, do no validation.')\nparser.add_argument('--keep_latest', dest='keep_latest', action='store_true',\n help='Only keep the latest checkpoint instead of each one.')\nparser.add_argument('--keep_latest_interval', default=100000, type=int,\n help='When --keep_latest is on, don\\'t delete the latest file at these intervals. This should be a multiple of save_interval or 0.')\nparser.add_argument('--dataset', default=None, type=str,\n help='If specified, override the dataset specified in the config with this one (example: coco2017_dataset).')\nparser.add_argument('--no_log', dest='log', action='store_false',\n help='Don\\'t log per iteration information into log_folder.')\nparser.add_argument('--log_gpu', dest='log_gpu', action='store_true',\n help='Include GPU information in the logs. Nvidia-smi tends to be slow, so set this with caution.')\nparser.add_argument('--no_interrupt', dest='interrupt', action='store_false',\n help='Don\\'t save an interrupt when KeyboardInterrupt is caught.')\nparser.add_argument('--batch_alloc', default=None, type=str,\n help='If using multiple GPUS, you can set this to be a comma separated list detailing which GPUs should get what local batch size (It should add up to your total batch size).')\nparser.add_argument('--no_autoscale', dest='autoscale', action='store_false',\n help='YOLACT will automatically scale the lr and the number of iterations depending on the batch size. Set this if you want to disable that.')\n\nparser.set_defaults(keep_latest=False, log=True, log_gpu=False, interrupt=True, autoscale=True)\nargs = parser.parse_args()\n\nif args.config is not None:\n set_cfg(args.config)\n\nif args.dataset is not None:\n set_dataset(args.dataset)\n\nif args.autoscale and args.batch_size != 8:\n factor = args.batch_size / 8\n if __name__ == '__main__':\n print('Scaling parameters by %.2f to account for a batch size of %d.' % (factor, args.batch_size))\n\n cfg.lr *= factor\n cfg.max_iter //= factor\n cfg.lr_steps = [x // factor for x in cfg.lr_steps]\n\n# Update training parameters from the config if necessary\ndef replace(name):\n if getattr(args, name) == None: setattr(args, name, getattr(cfg, name))\nreplace('lr')\nreplace('decay')\nreplace('gamma')\nreplace('momentum')\n\n# This is managed by set_lr\ncur_lr = args.lr\n\nif torch.cuda.device_count() == 0:\n print('No GPUs detected. Exiting...')\n exit(-1)\n\nif args.batch_size // torch.cuda.device_count() < 6:\n if __name__ == '__main__':\n print('Per-GPU batch size is less than the recommended limit for batch norm. Disabling batch norm.')\n cfg.freeze_bn = True\n\nloss_types = ['B', 'C', 'M', 'P', 'D', 'E', 'S', 'I']\n\nif torch.cuda.is_available():\n if args.cuda:\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n if not args.cuda:\n print(\"WARNING: It looks like you have a CUDA device, but aren't \" +\n \"using CUDA.\\nRun with --cuda for optimal training speed.\")\n torch.set_default_tensor_type('torch.FloatTensor')\nelse:\n torch.set_default_tensor_type('torch.FloatTensor')\n\nclass NetLoss(nn.Module):\n \"\"\"\n A wrapper for running the network and computing the loss\n This is so we can more efficiently use DataParallel.\n \"\"\"\n \n def __init__(self, net:Yolact, criterion:MultiBoxLoss):\n super().__init__()\n\n self.net = net\n self.criterion = criterion\n \n def forward(self, images, targets, masks, num_crowds):\n preds = self.net(images)\n losses = self.criterion(self.net, preds, targets, masks, num_crowds)\n return losses\n\nclass CustomDataParallel(nn.DataParallel):\n \"\"\"\n This is a custom version of DataParallel that works better with our training data.\n It should also be faster than the general case.\n \"\"\"\n\n def scatter(self, inputs, kwargs, device_ids):\n # More like scatter and data prep at the same time. The point is we prep the data in such a way\n # that no scatter is necessary, and there's no need to shuffle stuff around different GPUs.\n devices = ['cuda:' + str(x) for x in device_ids]\n splits = prepare_data(inputs[0], devices, allocation=args.batch_alloc)\n\n return [[split[device_idx] for split in splits] for device_idx in range(len(devices))], \\\n [kwargs] * len(devices)\n\n def gather(self, outputs, output_device):\n out = {}\n\n for k in outputs[0]:\n out[k] = torch.stack([output[k].to(output_device) for output in outputs])\n \n return out\n\ndef train():\n if not os.path.exists(args.save_folder):\n os.mkdir(args.save_folder)\n\n dataset = COCODetection(image_path=cfg.dataset.train_images,\n info_file=cfg.dataset.train_info,\n transform=SSDAugmentation(MEANS))\n \n if args.validation_epoch > 0:\n setup_eval()\n val_dataset = COCODetection(image_path=cfg.dataset.valid_images,\n info_file=cfg.dataset.valid_info,\n transform=BaseTransform(MEANS))\n\n # Parallel wraps the underlying module, but when saving and loading we don't want that\n yolact_net = Yolact()\n net = yolact_net\n net.train()\n\n if args.log:\n log = Log(cfg.name, args.log_folder, dict(args._get_kwargs()),\n overwrite=(args.resume is None), log_gpu_stats=args.log_gpu)\n\n # I don't use the timer during training (I use a different timing method).\n # Apparently there's a race condition with multiple GPUs, so disable it just to be safe.\n timer.disable_all()\n\n # Both of these can set args.resume to None, so do them before the check \n if args.resume == 'interrupt':\n args.resume = SavePath.get_interrupt(args.save_folder)\n elif args.resume == 'latest':\n args.resume = SavePath.get_latest(args.save_folder, cfg.name)\n\n if args.resume is not None:\n print('Resuming training, loading {}...'.format(args.resume))\n yolact_net.load_weights(args.resume)\n\n if args.start_iter == -1:\n args.start_iter = SavePath.from_str(args.resume).iteration\n else:\n print('Initializing weights...')\n yolact_net.init_weights(backbone_path=args.save_folder + cfg.backbone.path)\n\n optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,\n weight_decay=args.decay)\n criterion = MultiBoxLoss(num_classes=cfg.num_classes,\n pos_threshold=cfg.positive_iou_threshold,\n neg_threshold=cfg.negative_iou_threshold,\n negpos_ratio=cfg.ohem_negpos_ratio)\n\n if args.batch_alloc is not None:\n args.batch_alloc = [int(x) for x in args.batch_alloc.split(',')]\n if sum(args.batch_alloc) != args.batch_size:\n print('Error: Batch allocation (%s) does not sum to batch size (%s).' % (args.batch_alloc, args.batch_size))\n exit(-1)\n\n net = CustomDataParallel(NetLoss(net, criterion))\n if args.cuda:\n net = net.cuda()\n \n # Initialize everything\n if not cfg.freeze_bn: yolact_net.freeze_bn() # Freeze bn so we don't kill our means\n yolact_net(torch.zeros(1, 3, cfg.max_size, cfg.max_size).cuda())\n if not cfg.freeze_bn: yolact_net.freeze_bn(True)\n\n # loss counters\n loc_loss = 0\n conf_loss = 0\n iteration = max(args.start_iter, 0)\n last_time = time.time()\n\n epoch_size = len(dataset) // args.batch_size\n num_epochs = math.ceil(cfg.max_iter / epoch_size)\n \n # Which learning rate adjustment step are we on? lr' = lr * gamma ^ step_index\n step_index = 0\n\n data_loader = data.DataLoader(dataset, args.batch_size,\n num_workers=args.num_workers,\n shuffle=True, collate_fn=detection_collate,\n pin_memory=True)\n \n \n save_path = lambda epoch, iteration: SavePath(cfg.name, epoch, iteration).get_path(root=args.save_folder)\n time_avg = MovingAverage()\n\n global loss_types # Forms the print order\n loss_avgs = { k: MovingAverage(100) for k in loss_types }\n\n print('Begin training!')\n print()\n # try-except so you can use ctrl+c to save early and stop training\n try:\n for epoch in range(num_epochs):\n # Resume from start_iter\n if (epoch+1)*epoch_size < iteration:\n continue\n \n for datum in data_loader:\n # Stop if we've reached an epoch if we're resuming from start_iter\n if iteration == (epoch+1)*epoch_size:\n break\n\n # Stop at the configured number of iterations even if mid-epoch\n if iteration == cfg.max_iter:\n break\n\n # Change a config setting if we've reached the specified iteration\n changed = False\n for change in cfg.delayed_settings:\n if iteration >= change[0]:\n changed = True\n cfg.replace(change[1])\n\n # Reset the loss averages because things might have changed\n for avg in loss_avgs:\n avg.reset()\n \n # If a config setting was changed, remove it from the list so we don't keep checking\n if changed:\n cfg.delayed_settings = [x for x in cfg.delayed_settings if x[0] > iteration]\n\n # Warm up by linearly interpolating the learning rate from some smaller value\n if cfg.lr_warmup_until > 0 and iteration <= cfg.lr_warmup_until:\n set_lr(optimizer, (args.lr - cfg.lr_warmup_init) * (iteration / cfg.lr_warmup_until) + cfg.lr_warmup_init)\n\n # Adjust the learning rate at the given iterations, but also if we resume from past that iteration\n while step_index < len(cfg.lr_steps) and iteration >= cfg.lr_steps[step_index]:\n step_index += 1\n set_lr(optimizer, args.lr * (args.gamma ** step_index))\n \n # Zero the grad to get ready to compute gradients\n optimizer.zero_grad()\n\n # Forward Pass + Compute loss at the same time (see CustomDataParallel and NetLoss)\n losses = net(datum)\n \n losses = { k: (v).mean() for k,v in losses.items() } # Mean here because Dataparallel\n loss = sum([losses[k] for k in losses])\n \n # no_inf_mean removes some components from the loss, so make sure to backward through all of it\n # all_loss = sum([v.mean() for v in losses.values()])\n\n # Backprop\n loss.backward() # Do this to free up vram even if loss is not finite\n if torch.isfinite(loss).item():\n optimizer.step()\n \n # Add the loss to the moving average for bookkeeping\n for k in losses:\n loss_avgs[k].add(losses[k].item())\n\n cur_time = time.time()\n elapsed = cur_time - last_time\n last_time = cur_time\n\n # Exclude graph setup from the timing information\n if iteration != args.start_iter:\n time_avg.add(elapsed)\n\n if iteration % 10 == 0:\n eta_str = str(datetime.timedelta(seconds=(cfg.max_iter-iteration) * time_avg.get_avg())).split('.')[0]\n \n total = sum([loss_avgs[k].get_avg() for k in losses])\n loss_labels = sum([[k, loss_avgs[k].get_avg()] for k in loss_types if k in losses], [])\n \n print(('[%3d] %7d ||' + (' %s: %.3f |' * len(losses)) + ' T: %.3f || ETA: %s || timer: %.3f')\n % tuple([epoch, iteration] + loss_labels + [total, eta_str, elapsed]), flush=True)\n\n if args.log:\n precision = 5\n loss_info = {k: round(losses[k].item(), precision) for k in losses}\n loss_info['T'] = round(loss.item(), precision)\n\n if args.log_gpu:\n log.log_gpu_stats = (iteration % 10 == 0) # nvidia-smi is sloooow\n \n log.log('train', loss=loss_info, epoch=epoch, iter=iteration,\n lr=round(cur_lr, 10), elapsed=elapsed)\n\n log.log_gpu_stats = args.log_gpu\n \n iteration += 1\n\n if iteration % args.save_interval == 0 and iteration != args.start_iter:\n if args.keep_latest:\n latest = SavePath.get_latest(args.save_folder, cfg.name)\n\n print('Saving state, iter:', iteration)\n yolact_net.save_weights(save_path(epoch, iteration))\n\n if args.keep_latest and latest is not None:\n if args.keep_latest_interval <= 0 or iteration % args.keep_latest_interval != args.save_interval:\n print('Deleting old save...')\n os.remove(latest)\n \n # This is done per epoch\n if args.validation_epoch > 0:\n if epoch % args.validation_epoch == 0 and epoch > 0:\n compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)\n \n # Compute validation mAP after training is finished\n compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)\n except KeyboardInterrupt:\n if args.interrupt:\n print('Stopping early. Saving network...')\n \n # Delete previous copy of the interrupted network so we don't spam the weights folder\n SavePath.remove_interrupt(args.save_folder)\n \n yolact_net.save_weights(save_path(epoch, repr(iteration) + '_interrupt'))\n exit()\n\n yolact_net.save_weights(save_path(epoch, iteration))\n\n\ndef set_lr(optimizer, new_lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr\n \n global cur_lr\n cur_lr = new_lr\n\ndef gradinator(x):\n x.requires_grad = False\n return x\n\ndef prepare_data(datum, devices:list=None, allocation:list=None):\n with torch.no_grad():\n if devices is None:\n devices = ['cuda:0'] if args.cuda else ['cpu']\n if allocation is None:\n allocation = [args.batch_size // len(devices)] * (len(devices) - 1)\n allocation.append(args.batch_size - sum(allocation)) # The rest might need more/less\n \n images, (targets, masks, num_crowds) = datum\n\n cur_idx = 0\n for device, alloc in zip(devices, allocation):\n for _ in range(alloc):\n images[cur_idx] = gradinator(images[cur_idx].to(device))\n targets[cur_idx] = gradinator(targets[cur_idx].to(device))\n masks[cur_idx] = gradinator(masks[cur_idx].to(device))\n cur_idx += 1\n\n if cfg.preserve_aspect_ratio:\n # Choose a random size from the batch\n _, h, w = images[random.randint(0, len(images)-1)].size()\n\n for idx, (image, target, mask, num_crowd) in enumerate(zip(images, targets, masks, num_crowds)):\n images[idx], targets[idx], masks[idx], num_crowds[idx] \\\n = enforce_size(image, target, mask, num_crowd, w, h)\n \n cur_idx = 0\n split_images, split_targets, split_masks, split_numcrowds \\\n = [[None for alloc in allocation] for _ in range(4)]\n\n for device_idx, alloc in enumerate(allocation):\n split_images[device_idx] = torch.stack(images[cur_idx:cur_idx+alloc], dim=0)\n split_targets[device_idx] = targets[cur_idx:cur_idx+alloc]\n split_masks[device_idx] = masks[cur_idx:cur_idx+alloc]\n split_numcrowds[device_idx] = num_crowds[cur_idx:cur_idx+alloc]\n\n cur_idx += alloc\n\n return split_images, split_targets, split_masks, split_numcrowds\n\ndef no_inf_mean(x:torch.Tensor):\n \"\"\"\n Computes the mean of a vector, throwing out all inf values.\n If there are no non-inf values, this will return inf (i.e., just the normal mean).\n \"\"\"\n\n no_inf = [a for a in x if torch.isfinite(a)]\n\n if len(no_inf) > 0:\n return sum(no_inf) / len(no_inf)\n else:\n return x.mean()\n\ndef compute_validation_loss(net, data_loader, criterion):\n global loss_types\n\n with torch.no_grad():\n losses = {}\n \n # Don't switch to eval mode because we want to get losses\n iterations = 0\n for datum in data_loader:\n images, targets, masks, num_crowds = prepare_data(datum)\n out = net(images)\n\n wrapper = ScatterWrapper(targets, masks, num_crowds)\n _losses = criterion(out, wrapper, wrapper.make_mask())\n \n for k, v in _losses.items():\n v = v.mean().item()\n if k in losses:\n losses[k] += v\n else:\n losses[k] = v\n\n iterations += 1\n if args.validation_size <= iterations * args.batch_size:\n break\n \n for k in losses:\n losses[k] /= iterations\n \n \n loss_labels = sum([[k, losses[k]] for k in loss_types if k in losses], [])\n print(('Validation ||' + (' %s: %.3f |' * len(losses)) + ')') % tuple(loss_labels), flush=True)\n\ndef compute_validation_map(epoch, iteration, yolact_net, dataset, log:Log=None):\n with torch.no_grad():\n yolact_net.eval()\n \n start = time.time()\n print()\n print(\"Computing validation mAP (this may take a while)...\", flush=True)\n val_info = eval_script.evaluate(yolact_net, dataset, train_mode=True)\n end = time.time()\n\n if log is not None:\n log.log('val', val_info, elapsed=(end - start), epoch=epoch, iter=iteration)\n\n yolact_net.train()\n\ndef setup_eval():\n eval_script.parse_args(['--no_bar', '--max_images='+str(args.validation_size)])\n\nif __name__ == '__main__':\n train()\n" ]
[ [ "torch.utils.data.DataLoader", "torch.stack", "torch.no_grad", "torch.cuda.device_count", "torch.set_default_tensor_type", "torch.cuda.is_available", "torch.zeros", "torch.isfinite" ] ]
IandRover/meta-gradient_RL
[ "5d2539aceb9fa68b1849feac7d37741f9e5f83a3" ]
[ "models.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass ActorNetwork(nn.Module):\n\n def __init__(self,input_size,hidden_size,action_size):\n super(ActorNetwork, self).__init__()\n self.fc1 = nn.Linear(input_size,hidden_size)\n self.fc2 = nn.Linear(hidden_size,hidden_size)\n self.fc3 = nn.Linear(hidden_size,action_size)\n\n def forward(self,x):\n out = F.relu(self.fc1(x))\n out = F.relu(self.fc2(out))\n out = F.log_softmax(self.fc3(out))\n return out\n\nclass ValueNetwork(nn.Module):\n\n def __init__(self,input_size,hidden_size,output_size):\n super(ValueNetwork, self).__init__()\n self.fc1 = nn.Linear(input_size,hidden_size)\n self.fc2 = nn.Linear(hidden_size,hidden_size)\n self.fc3 = nn.Linear(hidden_size,output_size)\n\n def forward(self,x):\n out = F.relu(self.fc1(x))\n out = F.relu(self.fc2(out))\n out = self.fc3(out)\n return out" ]
[ [ "torch.nn.Linear" ] ]
data-umbrella/sprints-dashboard
[ "f8cdbe640fbc8001172b096065a79a9efe5c2829" ]
[ "scripts/app.py" ]
[ "import dash\n#import dash_core_components as dcc\nfrom dash import dcc\nfrom dash import html\n#import dash_html_components as html\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output\nimport plotly.express as px\nfrom plotly import graph_objects as go\nimport pandas as pd\n\n# https://dash-bootstrap-components.opensource.faculty.ai/docs/components/tabs\n# check version of library\n# https://dash.gallery/Portal/\n# https://plotly.com/python/templates/\n# https://github.com/plotly/dash-sample-apps/tree/main/apps/dash-opioid-epidemic\n# Bootstrap themes: https://dash-bootstrap-components.opensource.faculty.ai/docs/themes/explorer/\n# Layout: https://dash-bootstrap-components.opensource.faculty.ai/docs/components/layout/\n\n# to do: play with layout\n# bar: give a default value\n# add a title at the top\n# id once per sheet\n# do one copy/ paste at a time\n# stylesheets: https://bootswatch.com/\n# [\"plotly\", \"plotly_white\", \"plotly_dark\", \"ggplot2\", \"seaborn\", \"simple_white\", \"none\"]\n\ntemplate = \"seaborn\"\nexternal_stylesheets = [dbc.themes.BOOTSTRAP, dbc.themes.SOLAR]\n\ndata_url = \"https://raw.githubusercontent.com/data-umbrella/data-umbrella-sprints-dashboard/main/data/data_derived/afme2_derived.csv\"\ndf = pd.read_csv(data_url)\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\n\npie_div = dbc.Row([\n dbc.Row([dbc.Col(html.H4(\"Dimensions:\"), width=3), \n dbc.Col(dcc.Dropdown(\n id='names', \n value='status_c', \n options=[{'value': x, 'label': x} \n for x in ['gender', 'contributor_status', 'status_c','continent_o', 'python_experience', 'country']],\n clearable=False\n )),\n dbc.Col(html.H4(\"Count:\"), width=3),\n dbc.Col(dcc.Dropdown(\n id='values', \n value='count_rows', \n options=[{'value': x, 'label': x} \n for x in ['count_rows']],\n clearable=False), width=3\n ) \n ])\n ,html.P(html.Br()), \n dcc.Graph(id=\"pie-chart\"),\n])\n\nbar_div = dbc.Row([\n dbc.Row([dbc.Col(html.H4(\"Dimensions:\"), width=3),\n dbc.Col(dcc.Dropdown(\n id='names_bar',\n value='gender',\n options=[{'value': x, 'label': x} \n for x in ['gender', 'contributor_status', 'continent_o', 'country', 'python_experience', 'primary_spoken_language', 'attendance_status','role']],\n clearable=False\n )),\n dbc.Col(html.H4(\"Count:\"), width=3),\n dbc.Col(dcc.Dropdown(\n id='values_bar',\n value='count_rows',\n options=[{'value': x, 'label': x} \n for x in ['count_rows']],\n clearable=False), width=3\n )\n ])\n ,html.P(html.Br()),\n dcc.Graph(id=\"bar-chart\"),\n])\n\n# FUNNEL CHART\nfig_funnel = go.Figure()\nfig_funnel.add_trace(go.Funnel(\n name = 'Women',\n y = [\"Applied\", \"RSVPd\", \"Attended\"],\n x = [25, 20, 14],\n textinfo = \"value+percent initial\"))\n\nfig_funnel.add_trace(go.Funnel(\n name = 'Men',\n y = [\"Applied\", \"RSVPd\", \"Attended\"],\n x = [49, 35, 26],\n textinfo = \"value+percent previous\",))\n\nfunnel_div = dbc.Row([\n dbc.Row([dbc.Col(html.H4(\"Dimensions:\"), width=3),\n dcc.Graph(figure=fig_funnel)\n ])\n])\n\n# MAP\nvariable = \"attendance_status\"\n\ndf['text'] = df['city'] + ', ' + df['country'] + ', '\n#+ df['state-province'] + ', ' \n#+ df['continent_o'] \n\nfig_map = px.scatter_geo(df, \n #locations=\"iso_alpha\", \n lat = \"lat\",\n lon = \"lng\",\n color=variable,\n hover_name=\"city\", #\"location\", \n #hover_name=\"country\", \n hover_data=[\"city\", \"country\",\"continent_o\"],\n #hover_data=[\"text\"],\n #text=\"city\",\n #text = dict (\n # size=8,\n #)\n #size=8,\n #mode = \"markers\",\n #marker = dict(\n # size = 8,\n # opacity = 0.8,\n # reversescale = True,\n # autocolorscale = False,\n # symbol = 'square',\n #),\n projection=\"natural earth\",\n title=f\"Chart: Geomap of {variable}\",\n )\n\nfig_map.update_geos(\n visible=True, resolution=50,\n showcountries=True, countrycolor=\"LightYellow\"\n)\nfig_map.update_layout(height=600, margin={\"r\":10,\"t\":0,\"l\":10,\"b\":0})\nfig_map.update_traces(marker_symbol=[\"circle-x\",\"circle-open-dot\",\"circle-dot\"]\n , selector=dict(type='scattergeo'))\n\ncard = dbc.Card(\n [\n dbc.CardHeader(\n dbc.Tabs(\n [\n dbc.Tab(label=\"AFME2 (Oct 2021)\", tab_id=\"AFME2\", tab_style={\"marginLeft\": \"left\"}),\n dbc.Tab(label=\"Tab 2\", tab_id=\"tab-2\", label_style={\"color\": \"#00AEF9\"}),\n ],\n id=\"card-tabs\",\n active_tab=\"tab-1\",\n ),\n ),\n html.Br(),\n # dbc.Tabs(\n # [\n # dbc.Tab(label=\"Tab 1\", activeTabClassName=\"fw-bold fst-italic\"),\n # dbc.Tab(label=\"Tab 2\", activeLabelClassName=\"text-success\"),\n # ]\n # ),\n dbc.CardBody(html.P(id=\"card-content\", className=\"card-text\")\n ),\n dbc.CardBody(\n [\n html.H4(\"Data Umbrella\", id=\"card-title\"),\n html.H2(\"Africa & Middle East scikit-learn Sprint\", id=\"card-value\"),\n html.P(\"October 2021\", id=\"card-description\")\n ]\n )\n ],\n)\napp.layout = html.Div([ \n dbc.Row([\n dbc.Col([card], width=12),\n ],\n align=\"center\",\n justify=\"center\"),\n dbc.Row([\n dbc.Col(html.H1(\"Pie Chart\"), width=4), \n dbc.Col(html.H1(\"Bar Chart\"), width=4),\n ],\n align=\"center\",\n justify=\"center\"), \n dbc.Row([\n dbc.Col(pie_div, width=4), \n dbc.Col(bar_div, width=4)\n ],\n align=\"center\",\n justify=\"center\"), \n # dbc.Row([\n # dbc.Col(\n # html.Div(\"A single, half-width column\"),\n # width={\"size\": 6, \"offset\": 3},\n # ),\n # dbc.Col(html.H1(\"Funnel Chart\"), width=4),\n # ],\n # align=\"center\",\n # justify=\"center\"),\n dbc.Row([\n dbc.Col(html.H1(\"Funnel Chart\"), width=4), \n dbc.Col(html.H1(\"Map\"), width=4),\n ],\n align=\"center\",\n justify=\"center\"), \n dbc.Row([\n dbc.Col(dcc.Graph(figure=fig_funnel), width=4),\n dbc.Col(dcc.Graph(figure=fig_map), width=4),\n ],\n align=\"center\",\n justify=\"center\"),\n # dbc.Row([\n # dbc.Col(html.H1(\"Map\"), width={\"size\": 6, \"offset\": 3})\n # ]), \n # dbc.Row([\n # dcc.Graph(figure=fig_map),\n # ]),\n ],\n className=\"pad-row\",\n)\n\n\n# PIE CHART\[email protected](\n Output(\"pie-chart\", \"figure\"), \n [Input(\"names\", \"value\"), \n Input(\"values\", \"value\")])\ndef generate_chart(names, values):\n fig = px.pie(df, values=values, names=names, template=template)\n fig.update_traces(textposition='inside', textinfo='value+percent+label')\n return fig\n\n# BAR CHART\[email protected](\n Output(\"bar-chart\", \"figure\"), \n [Input(\"names_bar\", \"value\"), \n Input(\"values_bar\", \"value\")])\ndef generate_chart(names, values):\n # Try 4: try to remove warning\n grouped_yr_status = df.groupby([names, 'status_c']).count().reset_index()\n df2 = grouped_yr_status[[names, 'status_c', values]] \n fig = px.bar(df2, x=values, y=names, color=\"status_c\", \n barmode=\"stack\", orientation=\"h\", text=values, template=template) \n \n fig.update_layout(barmode='stack', xaxis={'categoryorder':'total descending'})\n return fig\n\[email protected](\n Output(\"card-content\", \"children\"), [Input(\"card-tabs\", \"active_tab\")]\n)\ndef tab_content(active_tab):\n return \"This is tab: {}\".format(active_tab)\n\napp.run_server(debug=True)\n" ]
[ [ "pandas.read_csv" ] ]
Zilleplus/HML
[ "ab9510e27103bb7c14e801606bb25b7c4e17e8ea" ]
[ "HML/chapter6/ex7.py" ]
[ "# Train and fine-tune a Decision Tree for the oons dataset\n# by following these steps:\n# a. Use make_moons(n_samples=10000, noise=0.4)\n#\n# b. Use train_test_split() to split the datset in to a training\n# set and test set.\n#\n# c. Use grid search with cross-validation (with the help of the\n# GridSearchCV class) to find good hyperparameter values for a\n# DecisionTreeClassifier\n#\n# d. Train it on the ful training set using these hyper paremeters,\n# and measure your model's performance on the test set.\n# You should get roughtly 85% to 87% accuracy.\n\nfrom sklearn.datasets import make_moons\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.tree import DecisionTreeClassifier\n\nX, y = make_moons(n_samples=10000, noise=0.4)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\nparam_grid = {\n 'cf__max_depth': list(range(2, 10)),\n 'cf__min_samples_split': list(range(2, 10)),\n 'cf__min_samples_leaf': list(range(1, 10)),\n 'cf__min_weight_fraction_leaf': [0.0, 0.1, 0.2]\n}\npipeline = Pipeline([\n ('cf', DecisionTreeClassifier())\n])\nsearch = GridSearchCV(pipeline, param_grid, verbose=3)\nsearch.fit(X_train, y_train)\n\nbest_estimator = search.best_estimator_\n# It runs pretty quick, you get the result:\n# max_depth=7, min_samples_leaf=5\nbest_estimator\n\ny_pred_test = best_estimator.predict(X_test)\nsucc_rate = sum([1 if x == 0 else 0 for x in (y_pred_test - y_test)])\\\n / len(y_pred_test)\nsucc_rate\n" ]
[ [ "sklearn.tree.DecisionTreeClassifier", "sklearn.datasets.make_moons", "sklearn.model_selection.GridSearchCV", "sklearn.model_selection.train_test_split" ] ]
tjpuzhaolei/yolact
[ "d27ab4d5150d7ca5d12a950f0075b0886d9b9171" ]
[ "layers/modules/multibox_loss.py" ]
[ "# -*- coding: utf-8 -*-\nimport torch\nimport pdb\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom ..box_utils import match, log_sum_exp, decode, center_size, crop\n\nfrom data import cfg, mask_type, activation_func\n\nclass MultiBoxLoss(nn.Module):\n \"\"\"SSD Weighted Loss Function\n Compute Targets:\n 1) Produce Confidence Target Indices by matching ground truth boxes\n with (default) 'priorboxes' that have jaccard index > threshold parameter\n (default threshold: 0.5).\n 2) Produce localization target by 'encoding' variance into offsets of ground\n truth boxes and their matched 'priorboxes'.\n 3) Hard negative mining to filter the excessive number of negative examples\n that comes with using a large number of default bounding boxes.\n (default negative:positive ratio 3:1)\n Objective Loss:\n L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss\n weighted by α which is set to 1 by cross val.\n Args:\n c: class confidences,\n l: predicted boxes,\n g: ground truth boxes\n N: number of matched default boxes\n See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n \"\"\"\n\n def __init__(self, num_classes, pos_threshold, neg_threshold, negpos_ratio):\n super(MultiBoxLoss, self).__init__()\n self.num_classes = num_classes\n \n self.pos_threshold = pos_threshold\n self.neg_threshold = neg_threshold\n self.negpos_ratio = negpos_ratio\n\n # If you output a proto mask with this area, your l1 loss will be l1_alpha\n # Note that the area is relative (so 1 would be the entire image)\n self.l1_expected_area = 20*20/70/70\n self.l1_alpha = 0.1\n\n def forward(self, predictions, wrapper, wrapper_mask):\n \"\"\"Multibox Loss\n Args:\n predictions (tuple): A tuple containing loc preds, conf preds,\n mask preds, and prior boxes from SSD net.\n loc shape: torch.size(batch_size,num_priors,4)\n conf shape: torch.size(batch_size,num_priors,num_classes)\n masks shape: torch.size(batch_size,num_priors,mask_dim)\n priors shape: torch.size(num_priors,4)\n proto* shape: torch.size(batch_size,mask_h,mask_w,mask_dim)\n\n targets (list<tensor>): Ground truth boxes and labels for a batch,\n shape: [batch_size][num_objs,5] (last idx is the label).\n\n masks (list<tensor>): Ground truth masks for each object in each image,\n shape: [batch_size][num_objs,im_height,im_width]\n\n num_crowds (list<int>): Number of crowd annotations per batch. The crowd\n annotations should be the last num_crowds elements of targets and masks.\n \n * Only if mask_type == lincomb\n \"\"\"\n\n loc_data = predictions['loc']\n conf_data = predictions['conf']\n mask_data = predictions['mask']\n priors = predictions['priors']\n\n if cfg.mask_type == mask_type.lincomb:\n proto_data = predictions['proto']\n \n if cfg.use_instance_coeff:\n inst_data = predictions['inst']\n else:\n inst_data = None\n \n targets, masks, num_crowds = wrapper.get_args(wrapper_mask)\n labels = [None] * len(targets) # Used in sem segm loss\n\n batch_size = loc_data.size(0)\n # This is necessary for training on multiple GPUs because\n # DataParallel will cat the priors from each GPU together\n priors = priors[:loc_data.size(1), :]\n num_priors = (priors.size(0))\n num_classes = self.num_classes\n\n # Match priors (default boxes) and ground truth boxes\n # These tensors will be created with the same device as loc_data\n loc_t = loc_data.new(batch_size, num_priors, 4)\n gt_box_t = loc_data.new(batch_size, num_priors, 4)\n conf_t = loc_data.new(batch_size, num_priors).long()\n idx_t = loc_data.new(batch_size, num_priors).long()\n\n defaults = priors.data\n\n if cfg.use_class_existence_loss:\n class_existence_t = loc_data.new(batch_size, num_classes-1)\n\n for idx in range(batch_size):\n truths = targets[idx][:, :-1].data\n labels[idx] = targets[idx][:, -1].data.long()\n\n if cfg.use_class_existence_loss:\n # Construct a one-hot vector for each object and collapse it into an existence vector with max\n # Also it's fine to include the crowd annotations here\n class_existence_t[idx, :] = torch.eye(num_classes-1, device=conf_t.get_device())[labels[idx]].max(dim=0)[0]\n\n # Split the crowd annotations because they come bundled in\n cur_crowds = num_crowds[idx]\n if cur_crowds > 0:\n split = lambda x: (x[-cur_crowds:], x[:-cur_crowds])\n crowd_boxes, truths = split(truths)\n\n # We don't use the crowd labels or masks\n _, labels[idx] = split(labels[idx])\n _, masks[idx] = split(masks[idx])\n else:\n crowd_boxes = None\n\n \n match(self.pos_threshold, self.neg_threshold,\n truths, defaults, labels[idx], crowd_boxes,\n loc_t, conf_t, idx_t, idx, loc_data[idx])\n \n gt_box_t[idx, :, :] = truths[idx_t[idx]]\n\n # wrap targets\n loc_t = Variable(loc_t, requires_grad=False)\n conf_t = Variable(conf_t, requires_grad=False)\n idx_t = Variable(idx_t, requires_grad=False)\n\n pos = conf_t > 0\n num_pos = pos.sum(dim=1, keepdim=True)\n \n # Shape: [batch,num_priors,4]\n pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)\n \n losses = {}\n\n # Localization Loss (Smooth L1)\n if cfg.train_boxes:\n loc_p = loc_data[pos_idx].view(-1, 4)\n loc_t = loc_t[pos_idx].view(-1, 4)\n losses['B'] = F.smooth_l1_loss(loc_p, loc_t, reduction='sum') * cfg.bbox_alpha\n\n if cfg.train_masks:\n if cfg.mask_type == mask_type.direct:\n if cfg.use_gt_bboxes:\n pos_masks = []\n for idx in range(batch_size):\n pos_masks.append(masks[idx][idx_t[idx, pos[idx]]])\n masks_t = torch.cat(pos_masks, 0)\n masks_p = mask_data[pos, :].view(-1, cfg.mask_dim)\n losses['M'] = F.binary_cross_entropy(torch.clamp(masks_p, 0, 1), masks_t, reduction='sum') * cfg.mask_alpha\n else:\n losses['M'] = self.direct_mask_loss(pos_idx, idx_t, loc_data, mask_data, priors, masks)\n elif cfg.mask_type == mask_type.lincomb:\n losses.update(self.lincomb_mask_loss(pos, idx_t, loc_data, mask_data, priors, proto_data, masks, gt_box_t, inst_data))\n \n if cfg.mask_proto_loss is not None:\n if cfg.mask_proto_loss == 'l1':\n losses['P'] = torch.mean(torch.abs(proto_data)) / self.l1_expected_area * self.l1_alpha\n elif cfg.mask_proto_loss == 'disj':\n losses['P'] = -torch.mean(torch.max(F.log_softmax(proto_data, dim=-1), dim=-1)[0])\n\n # Confidence loss\n if cfg.use_focal_loss:\n if cfg.use_sigmoid_focal_loss:\n losses['C'] = self.focal_conf_sigmoid_loss(conf_data, conf_t)\n elif cfg.use_objectness_score:\n losses['C'] = self.focal_conf_objectness_loss(conf_data, conf_t)\n else:\n losses['C'] = self.focal_conf_loss(conf_data, conf_t)\n else:\n losses['C'] = self.ohem_conf_loss(conf_data, conf_t, pos, batch_size)\n\n # These losses also don't depend on anchors\n if cfg.use_class_existence_loss:\n losses['E'] = self.class_existence_loss(predictions['classes'], class_existence_t)\n if cfg.use_semantic_segmentation_loss:\n losses['S'] = self.semantic_segmentation_loss(predictions['segm'], masks, labels)\n\n # Divide all losses by the number of positives.\n # Don't do it for loss[P] because that doesn't depend on the anchors.\n total_num_pos = num_pos.data.sum().float()\n for k in losses:\n if k not in ('P', 'E', 'S'):\n losses[k] /= total_num_pos\n else:\n losses[k] /= batch_size\n\n # Loss Key:\n # - B: Box Localization Loss\n # - C: Class Confidence Loss\n # - M: Mask Loss\n # - P: Prototype Loss\n # - D: Coefficient Diversity Loss\n # - E: Class Existence Loss\n # - S: Semantic Segmentation Loss\n return losses\n\n def class_existence_loss(self, class_data, class_existence_t):\n return cfg.class_existence_alpha * F.binary_cross_entropy_with_logits(class_data, class_existence_t, reduction='sum')\n\n def semantic_segmentation_loss(self, segment_data, mask_t, class_t, interpolation_mode='bilinear'):\n # Note num_classes here is without the background class so cfg.num_classes-1\n batch_size, num_classes, mask_h, mask_w = segment_data.size()\n loss_s = 0\n \n for idx in range(batch_size):\n cur_segment = segment_data[idx]\n cur_class_t = class_t[idx]\n\n with torch.no_grad():\n downsampled_masks = F.interpolate(mask_t[idx].unsqueeze(0), (mask_h, mask_w),\n mode=interpolation_mode, align_corners=False).squeeze(0)\n downsampled_masks = downsampled_masks.gt(0.5).float()\n \n # Construct Semantic Segmentation\n segment_t = torch.zeros_like(cur_segment, requires_grad=False)\n for obj_idx in range(downsampled_masks.size(0)):\n segment_t[cur_class_t[obj_idx]] = torch.max(segment_t[cur_class_t[obj_idx]], downsampled_masks[obj_idx])\n \n loss_s += F.binary_cross_entropy_with_logits(cur_segment, segment_t, reduction='sum')\n \n return loss_s / mask_h / mask_w * cfg.semantic_segmentation_alpha\n\n\n def ohem_conf_loss(self, conf_data, conf_t, pos, num):\n # Compute max conf across batch for hard negative mining\n\n batch_conf = conf_data.view(-1, self.num_classes)\n if cfg.ohem_use_most_confident:\n # i.e. max(softmax) along classes > 0 \n batch_conf = F.softmax(batch_conf, dim=1)\n loss_c, _ = batch_conf[:, 1:].max(dim=1)\n else:\n # i.e. -softmax(class 0 confidence)\n loss_c = log_sum_exp(batch_conf) - batch_conf[:, 0]\n\n # Hard Negative Mining\n # loss_c = loss_c.view(pos.size()[0], pos.size()[1]) #add line\n loss_c = loss_c.view(num, -1)\n loss_c[pos] = 0 # filter out pos boxes\n loss_c[conf_t < 0] = 0 # filter out neutrals (conf_t = -1)\n _, loss_idx = loss_c.sort(1, descending=True)\n _, idx_rank = loss_idx.sort(1)\n num_pos = pos.long().sum(1, keepdim=True)\n num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)\n neg = idx_rank < num_neg.expand_as(idx_rank)\n \n # Just in case there aren't enough negatives, don't start using positives as negatives\n neg[pos] = 0\n neg[conf_t < 0] = 0 # Filter out neutrals\n\n # Confidence Loss Including Positive and Negative Examples\n pos_idx = pos.unsqueeze(2).expand_as(conf_data)\n neg_idx = neg.unsqueeze(2).expand_as(conf_data)\n conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)\n targets_weighted = conf_t[(pos+neg).gt(0)]\n loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum')\n \n return cfg.conf_alpha * loss_c\n\n def focal_conf_loss(self, conf_data, conf_t):\n \"\"\"\n Focal loss as described in https://arxiv.org/pdf/1708.02002.pdf\n Adapted from https://github.com/clcarwin/focal_loss_pytorch/blob/master/focalloss.py\n Note that this uses softmax and not the original sigmoid from the paper.\n \"\"\"\n conf_t = conf_t.view(-1) # [batch_size*num_priors]\n conf_data = conf_data.view(-1, conf_data.size(-1)) # [batch_size*num_priors, num_classes]\n\n # Ignore neutral samples (class < 0)\n keep = (conf_t >= 0).float()\n conf_t[conf_t < 0] = 0 # so that gather doesn't drum up a fuss\n\n logpt = F.log_softmax(conf_data, dim=-1)\n logpt = logpt.gather(1, conf_t.unsqueeze(-1))\n logpt = logpt.view(-1)\n pt = logpt.exp()\n\n # I adapted the alpha_t calculation here from\n # https://github.com/pytorch/pytorch/blob/master/modules/detectron/softmax_focal_loss_op.cu\n # You'd think you want all the alphas to sum to one, but in the original implementation they\n # just give background an alpha of 1-alpha and each forground an alpha of alpha.\n background = (conf_t == 0).float()\n at = (1 - cfg.focal_loss_alpha) * background + cfg.focal_loss_alpha * (1 - background)\n\n loss = -at * (1 - pt) ** cfg.focal_loss_gamma * logpt\n\n # See comment above for keep\n return cfg.conf_alpha * (loss * keep).sum()\n \n def focal_conf_sigmoid_loss(self, conf_data, conf_t):\n \"\"\"\n Focal loss but using sigmoid like the original paper.\n Note: To make things mesh easier, the network still predicts 81 class confidences in this mode.\n Because retinanet originally only predicts 80, we simply just don't use conf_data[..., 0]\n \"\"\"\n num_classes = conf_data.size(-1)\n\n conf_t = conf_t.view(-1) # [batch_size*num_priors]\n conf_data = conf_data.view(-1, num_classes) # [batch_size*num_priors, num_classes]\n\n # Ignore neutral samples (class < 0)\n keep = (conf_t >= 0).float()\n conf_t[conf_t < 0] = 0 # can't mask with -1, so filter that out\n\n # Compute a one-hot embedding of conf_t\n # From https://github.com/kuangliu/pytorch-retinanet/blob/master/utils.py\n conf_one_t = torch.eye(num_classes, device=conf_t.get_device())[conf_t]\n conf_pm_t = conf_one_t * 2 - 1 # -1 if background, +1 if forground for specific class\n\n logpt = F.logsigmoid(conf_data * conf_pm_t) # note: 1 - sigmoid(x) = sigmoid(-x)\n pt = logpt.exp()\n\n at = cfg.focal_loss_alpha * conf_one_t + (1 - cfg.focal_loss_alpha) * (1 - conf_one_t)\n at[..., 0] = 0 # Set alpha for the background class to 0 because sigmoid focal loss doesn't use it\n\n loss = -at * (1 - pt) ** cfg.focal_loss_gamma * logpt\n loss = keep * loss.sum(dim=-1)\n\n return cfg.conf_alpha * loss.sum()\n \n def focal_conf_objectness_loss(self, conf_data, conf_t):\n \"\"\"\n Instead of using softmax, use class[0] to be the objectness score and do sigmoid focal loss on that.\n Then for the rest of the classes, softmax them and apply CE for only the positive examples.\n\n If class[0] = 1 implies forground and class[0] = 0 implies background then you achieve something\n similar during test-time to softmax by setting class[1:] = softmax(class[1:]) * class[0] and invert class[0].\n \"\"\"\n\n conf_t = conf_t.view(-1) # [batch_size*num_priors]\n conf_data = conf_data.view(-1, conf_data.size(-1)) # [batch_size*num_priors, num_classes]\n\n # Ignore neutral samples (class < 0)\n keep = (conf_t >= 0).float()\n conf_t[conf_t < 0] = 0 # so that gather doesn't drum up a fuss\n\n background = (conf_t == 0).float()\n at = (1 - cfg.focal_loss_alpha) * background + cfg.focal_loss_alpha * (1 - background)\n\n logpt = F.logsigmoid(conf_data[:, 0]) * (1 - background) + F.logsigmoid(-conf_data[:, 0]) * background\n pt = logpt.exp()\n\n obj_loss = -at * (1 - pt) ** cfg.focal_loss_gamma * logpt\n\n # All that was the objectiveness loss--now time for the class confidence loss\n pos_mask = conf_t > 0\n conf_data_pos = (conf_data[:, 1:])[pos_mask] # Now this has just 80 classes\n conf_t_pos = conf_t[pos_mask] - 1 # So subtract 1 here\n\n class_loss = F.cross_entropy(conf_data_pos, conf_t_pos, reduction='sum')\n\n return cfg.conf_alpha * (class_loss + (obj_loss * keep).sum())\n\n\n def direct_mask_loss(self, pos_idx, idx_t, loc_data, mask_data, priors, masks):\n \"\"\" Crops the gt masks using the predicted bboxes, scales them down, and outputs the BCE loss. \"\"\"\n loss_m = 0\n for idx in range(mask_data.size(0)):\n with torch.no_grad():\n cur_pos_idx = pos_idx[idx, :, :]\n cur_pos_idx_squeezed = cur_pos_idx[:, 1]\n\n # Shape: [num_priors, 4], decoded predicted bboxes\n pos_bboxes = decode(loc_data[idx, :, :], priors.data, cfg.use_yolo_regressors)\n pos_bboxes = pos_bboxes[cur_pos_idx].view(-1, 4).clamp(0, 1)\n pos_lookup = idx_t[idx, cur_pos_idx_squeezed]\n\n cur_masks = masks[idx]\n pos_masks = cur_masks[pos_lookup, :, :]\n \n # Convert bboxes to absolute coordinates\n num_pos, img_height, img_width = pos_masks.size()\n\n # Take care of all the bad behavior that can be caused by out of bounds coordinates\n x1, x2 = sanitize_coordinates(pos_bboxes[:, 0], pos_bboxes[:, 2], img_width)\n y1, y2 = sanitize_coordinates(pos_bboxes[:, 1], pos_bboxes[:, 3], img_height)\n\n # Crop each gt mask with the predicted bbox and rescale to the predicted mask size\n # Note that each bounding box crop is a different size so I don't think we can vectorize this\n scaled_masks = []\n for jdx in range(num_pos):\n tmp_mask = pos_masks[jdx, y1[jdx]:y2[jdx], x1[jdx]:x2[jdx]]\n\n # Restore any dimensions we've left out because our bbox was 1px wide\n while tmp_mask.dim() < 2:\n tmp_mask = tmp_mask.unsqueeze(0)\n\n new_mask = F.adaptive_avg_pool2d(tmp_mask.unsqueeze(0), cfg.mask_size)\n scaled_masks.append(new_mask.view(1, -1))\n\n mask_t = torch.cat(scaled_masks, 0).gt(0.5).float() # Threshold downsampled mask\n \n pos_mask_data = mask_data[idx, cur_pos_idx_squeezed, :]\n loss_m += F.binary_cross_entropy(torch.clamp(pos_mask_data, 0, 1), mask_t, reduction='sum') * cfg.mask_alpha\n\n return loss_m\n \n\n def coeff_diversity_loss(self, coeffs, instance_t):\n \"\"\"\n coeffs should be size [num_pos, num_coeffs]\n instance_t should be size [num_pos] and be values from 0 to num_instances-1\n \"\"\"\n num_pos = coeffs.size(0)\n instance_t = instance_t.view(-1) # juuuust to make sure\n\n coeffs_norm = F.normalize(coeffs, dim=1)\n cos_sim = coeffs_norm @ coeffs_norm.t()\n\n inst_eq = (instance_t[:, None].expand_as(cos_sim) == instance_t[None, :].expand_as(cos_sim)).float()\n\n # Rescale to be between 0 and 1\n cos_sim = (cos_sim + 1) / 2\n\n # If they're the same instance, use cosine distance, else use cosine similarity\n loss = (1 - cos_sim) * inst_eq + cos_sim * (1 - inst_eq)\n\n # Only divide by num_pos once because we're summing over a num_pos x num_pos tensor\n # and all the losses will be divided by num_pos at the end, so just one extra time.\n return cfg.mask_proto_coeff_diversity_alpha * loss.sum() / num_pos\n\n\n def lincomb_mask_loss(self, pos, idx_t, loc_data, mask_data, priors, proto_data, masks, gt_box_t, inst_data, interpolation_mode='bilinear'):\n mask_h = proto_data.size(1)\n mask_w = proto_data.size(2)\n\n process_gt_bboxes = cfg.mask_proto_normalize_emulate_roi_pooling or cfg.mask_proto_crop\n\n if cfg.mask_proto_remove_empty_masks:\n # Make sure to store a copy of this because we edit it to get rid of all-zero masks\n pos = pos.clone()\n\n loss_m = 0\n loss_d = 0 # Coefficient diversity loss\n\n for idx in range(mask_data.size(0)):\n with torch.no_grad():\n downsampled_masks = F.interpolate(masks[idx].unsqueeze(0), (mask_h, mask_w),\n mode=interpolation_mode, align_corners=False).squeeze(0)\n downsampled_masks = downsampled_masks.permute(1, 2, 0).contiguous()\n\n if cfg.mask_proto_binarize_downsampled_gt:\n downsampled_masks = downsampled_masks.gt(0.5).float()\n\n if cfg.mask_proto_remove_empty_masks:\n # Get rid of gt masks that are so small they get downsampled away\n very_small_masks = (downsampled_masks.sum(dim=(0,1)) <= 0.0001)\n for i in range(very_small_masks.size(0)):\n if very_small_masks[i]:\n pos[idx, idx_t[idx] == i] = 0\n\n if cfg.mask_proto_reweight_mask_loss:\n # Ensure that the gt is binary\n if not cfg.mask_proto_binarize_downsampled_gt:\n bin_gt = downsampled_masks.gt(0.5).float()\n else:\n bin_gt = downsampled_masks\n\n gt_foreground_norm = bin_gt / (torch.sum(bin_gt, dim=(0,1), keepdim=True) + 0.0001)\n gt_background_norm = (1-bin_gt) / (torch.sum(1-bin_gt, dim=(0,1), keepdim=True) + 0.0001)\n\n mask_reweighting = gt_foreground_norm * cfg.mask_proto_reweight_coeff + gt_background_norm\n mask_reweighting *= mask_h * mask_w\n\n cur_pos = pos[idx]\n pos_idx_t = idx_t[idx, cur_pos]\n \n if process_gt_bboxes:\n # Note: this is in point-form\n pos_gt_box_t = gt_box_t[idx, cur_pos]\n\n if pos_idx_t.size(0) == 0:\n continue\n\n proto_masks = proto_data[idx]\n proto_coef = mask_data[idx, cur_pos, :]\n\n if cfg.mask_proto_coeff_diversity_loss:\n if inst_data is not None:\n div_coeffs = inst_data[idx, cur_pos, :]\n else:\n div_coeffs = proto_coef\n\n loss_d += self.coeff_diversity_loss(div_coeffs, pos_idx_t)\n \n # If we have over the allowed number of masks, select a random sample\n old_num_pos = proto_coef.size(0)\n if old_num_pos > cfg.masks_to_train:\n perm = torch.randperm(proto_coef.size(0))\n select = perm[:cfg.masks_to_train]\n\n proto_coef = proto_coef[select, :]\n pos_idx_t = pos_idx_t[select]\n \n if process_gt_bboxes:\n pos_gt_box_t = pos_gt_box_t[select, :]\n\n num_pos = proto_coef.size(0)\n mask_t = downsampled_masks[:, :, pos_idx_t] \n\n # Size: [mask_h, mask_w, num_pos]\n pred_masks = proto_masks @ proto_coef.t()\n pred_masks = cfg.mask_proto_mask_activation(pred_masks)\n\n if cfg.mask_proto_double_loss:\n if cfg.mask_proto_mask_activation == activation_func.sigmoid:\n pre_loss = F.binary_cross_entropy(torch.clamp(pred_masks, 0, 1), mask_t, reduction='sum')\n else:\n pre_loss = F.smooth_l1_loss(pred_masks, mask_t, reduction='sum')\n \n loss_m += cfg.mask_proto_double_loss_alpha * pre_loss\n\n if cfg.mask_proto_crop:\n pred_masks = crop(pred_masks, pos_gt_box_t)\n \n if cfg.mask_proto_mask_activation == activation_func.sigmoid:\n pre_loss = F.binary_cross_entropy(torch.clamp(pred_masks, 0, 1), mask_t, reduction='none')\n else:\n pre_loss = F.smooth_l1_loss(pred_masks, mask_t, reduction='none')\n\n if cfg.mask_proto_normalize_mask_loss_by_sqrt_area:\n gt_area = torch.sum(mask_t, dim=(0, 1), keepdim=True)\n pre_loss = pre_loss / (torch.sqrt(gt_area) + 0.0001)\n \n if cfg.mask_proto_reweight_mask_loss:\n pre_loss = pre_loss * mask_reweighting[:, :, pos_idx_t]\n \n if cfg.mask_proto_normalize_emulate_roi_pooling:\n weight = mask_h * mask_w if cfg.mask_proto_crop else 1\n pos_get_csize = center_size(pos_gt_box_t)\n gt_box_width = pos_get_csize[:, 2] * mask_w\n gt_box_height = pos_get_csize[:, 3] * mask_h\n pre_loss = pre_loss.sum(dim=(0, 1)) / gt_box_width / gt_box_height * weight\n\n\n # If the number of masks were limited scale the loss accordingly\n if old_num_pos > num_pos:\n pre_loss *= old_num_pos / num_pos\n\n loss_m += torch.sum(pre_loss)\n \n losses = {'M': loss_m * cfg.mask_alpha / mask_h / mask_w}\n \n if cfg.mask_proto_coeff_diversity_loss:\n losses['D'] = loss_d\n\n return losses\n" ]
[ [ "torch.sum", "torch.nn.functional.log_softmax", "torch.nn.functional.logsigmoid", "torch.nn.functional.normalize", "torch.nn.functional.softmax", "torch.autograd.Variable", "torch.no_grad", "torch.zeros_like", "torch.nn.functional.smooth_l1_loss", "torch.sqrt", "torch.abs", "torch.nn.functional.cross_entropy", "torch.max", "torch.cat", "torch.clamp", "torch.nn.functional.binary_cross_entropy_with_logits" ] ]
harrisonzhu508/data
[ "a3b95ced4abad6653d20f67f3f285abeeb0c2b25" ]
[ "src/pipelines/weather/weather_pipeline.py" ]
[ "import re\nimport sys\nimport math\nfrom random import shuffle\nfrom functools import partial\nfrom typing import Any, Dict, List, Tuple\nfrom multiprocessing import cpu_count\nfrom multiprocessing.pool import ThreadPool as Pool\n\nimport numpy\nfrom tqdm.contrib import concurrent\nfrom pandas import DataFrame, Series, Int64Dtype, merge, read_csv, concat, isna\n\nfrom lib.cast import safe_int_cast\nfrom lib.pipeline import DataPipeline, DefaultPipeline, PipelineChain\nfrom lib.time import datetime_isoformat\nfrom lib.utils import ROOT\n\n\nclass WeatherPipeline(DefaultPipeline):\n\n # A bit of a circular dependency but we need the latitude and longitude to compute weather\n def fetch(self, cache: Dict[str, str], **fetch_opts) -> List[str]:\n return [ROOT / \"output\" / \"tables\" / \"geography.csv\"]\n\n @staticmethod\n def haversine_distance(\n stations: DataFrame, lat: float, lon: float, radius: float = 6373.0\n ) -> Series:\n \"\"\" Compute the distance between two <latitude, longitude> pairs in kilometers \"\"\"\n\n # Compute the pairwise deltas\n lat_diff = stations.lat - lat\n lon_diff = stations.lon - lon\n\n # Apply Haversine formula\n a = numpy.sin(lat_diff / 2) ** 2\n a += math.cos(lat) * numpy.cos(stations.lat) * numpy.sin(lon_diff / 2) ** 2\n c = numpy.arctan2(numpy.sqrt(a), numpy.sqrt(1 - a)) * 2\n\n return radius * c\n\n @staticmethod\n def nearest_station(stations, lat: float, lon: float):\n # Compute the distance with each station\n distances = WeatherPipeline.haversine_distance(stations, lat, lon)\n\n # Return the closest station and its distance\n idxmin = distances.idxmin()\n return distances.loc[idxmin], stations.loc[idxmin]\n\n @staticmethod\n def fix_temp(value: int):\n value = safe_int_cast(value)\n return None if value is None else \"%.1f\" % (value / 10.0)\n\n @staticmethod\n def station_records(station_cache: Dict[str, DataFrame], stations: DataFrame, location: Series):\n\n # Get the nearest station from our list of stations given lat and lon\n distance, nearest = WeatherPipeline.nearest_station(stations, location.lat, location.lon)\n\n # Query the cache and pull data only if not already cached\n if nearest.id not in station_cache:\n\n # Read the records from the nearest station\n station_url = (\n \"https://www.ncei.noaa.gov/data\"\n \"/global-historical-climatology-network-daily/access/{}.csv\"\n ).format(nearest.id)\n column_mapping = {\n \"DATE\": \"date\",\n \"STATION\": \"noaa_station\",\n \"TMIN\": \"minimum_temperature\",\n \"TMAX\": \"maximum_temperature\",\n \"PRCP\": \"rainfall\",\n \"SNOW\": \"snowfall\",\n }\n data = read_csv(station_url, usecols=lambda column: column in column_mapping.keys())\n data = data.rename(columns=column_mapping)\n\n # Convert temperature to correct values\n data[\"minimum_temperature\"] = data[\"minimum_temperature\"].apply(\n WeatherPipeline.fix_temp\n )\n data[\"maximum_temperature\"] = data[\"maximum_temperature\"].apply(\n WeatherPipeline.fix_temp\n )\n\n # Get only data for 2020 and add location values\n data = data[data.date > \"2019-12-31\"]\n\n # Save into the cache\n station_cache[nearest.id] = data\n\n # Get station records from the cache\n data = station_cache[nearest.id].copy()\n\n # Return all the available data from the records\n output_columns = [\n \"date\",\n \"key\",\n \"noaa_station\",\n \"noaa_distance\",\n \"minimum_temperature\",\n \"maximum_temperature\",\n \"rainfall\",\n \"snowfall\",\n ]\n data[\"key\"] = location.key\n data[\"noaa_distance\"] = \"%.03f\" % distance\n return data[[col for col in output_columns if col in data.columns]]\n\n def parse_dataframes(\n self, dataframes: List[DataFrame], aux: Dict[str, DataFrame], **parse_opts\n ):\n\n # Get all the weather stations with data up until 2020\n stations_url = \"https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/ghcnd-inventory.txt\"\n stations = read_csv(\n stations_url,\n sep=r\"\\s+\",\n names=(\"id\", \"lat\", \"lon\", \"measurement\", \"year_start\", \"year_end\"),\n )\n stations = stations[stations.year_end == 2020][[\"id\", \"lat\", \"lon\", \"measurement\"]]\n\n # Filter stations that at least provide max and min temps\n measurements = [\"TMIN\", \"TMAX\"]\n stations = stations.groupby([\"id\", \"lat\", \"lon\"]).agg(lambda x: \"|\".join(x))\n stations = stations[stations.measurement.apply(lambda x: all(m in x for m in measurements))]\n stations = stations.reset_index()\n\n # Get all the POI from metadata and go through each key\n metadata = dataframes[0][[\"key\", \"latitude\", \"longitude\"]].dropna()\n\n # Convert all coordinates to radians\n stations[\"lat\"] = stations.lat.apply(math.radians)\n stations[\"lon\"] = stations.lon.apply(math.radians)\n metadata[\"lat\"] = metadata.latitude.apply(math.radians)\n metadata[\"lon\"] = metadata.longitude.apply(math.radians)\n\n # Use a cache to avoid having to query the same station multiple times\n station_cache: Dict[str, DataFrame] = {}\n\n # Make sure the stations and the cache are sent to each function call\n map_func = partial(WeatherPipeline.station_records, station_cache, stations)\n\n # We don't care about the index while iterating over each metadata item\n map_iter = [record for _, record in metadata.iterrows()]\n\n # Shuffle the iterables to try to make better use of the caching\n shuffle(map_iter)\n\n # Bottleneck is network so we can use lots of threads in parallel\n records = concurrent.thread_map(map_func, map_iter, total=len(metadata))\n\n return concat(records)\n\n\nclass WeatherPipelineChain(PipelineChain):\n\n schema: Dict[str, type] = {\n \"date\": str,\n \"key\": str,\n \"noaa_station\": str,\n \"noaa_distance\": float,\n \"minimum_temperature\": float,\n \"maximum_temperature\": float,\n \"rainfall\": float,\n \"snowfall\": float,\n }\n\n pipelines: List[Tuple[DataPipeline, Dict[str, Any]]] = [(WeatherPipeline(), {})]\n" ]
[ [ "pandas.read_csv", "numpy.cos", "pandas.concat", "numpy.sqrt", "numpy.sin" ] ]
DCAN-Labs/abcd-bids-tfmri-pipeline
[ "358581a244887a7fc385bc73c3c22e4683a22ad5" ]
[ "src/pipeline_utilities.py" ]
[ "#!/usr/bin/env python3\n# coding: utf-8\n\n\"\"\"\nCommon source for utility functions used by ABCD-BIDS task-fmri-pipeline\nGreg Conan: [email protected]\nCreated: 2021-01-15\nUpdated: 2021-11-12\n\"\"\"\n\n# Import standard libraries\nimport argparse\nfrom datetime import datetime # for seeing how long scripts take to run\nfrom glob import glob\nimport json\nimport multiprocessing as mp\nimport os\nimport pandas as pd\nimport random # only used by rand_string\nimport shutil\nimport string # only used by rand_string\nimport subprocess\nimport sys\nimport time\n\n# Constants: Name of scanner-info command-line argument, directory containing\n# the main pipeline script, SLURM-/SBATCH-related arguments' default names, and\n# name of the argument to get the directory containing the main wrapper script\nSCAN_ARG = 'scanners_info'\nSCRIPT_DIR = os.path.dirname(os.path.dirname(__file__))\nSLURM_ARGS = ('account', 'cpus', 'memory', 'print_progress', 'sleep', 'time')\nWRAPPER_LOC = 'wrapper_location'\n\n\ndef add_arg_if_in_arg_names(arg_name, all_args, parser, *shortnames, **kwargs):\n \"\"\"\n Wrapper for argparse.ArgumentParser.add_argument. Nearly identical, but \n will only add the argument to the parser if arg_name is in all_args.\n :param arg_name: String naming the argument to (maybe) add to parser\n :param all_args: Set of strings; each names a command-line argument\n :param parser: argparse.ArgumentParser\n :param shortnames: Unpacked list of strings; each is arg_name shortened\n :param kwargs: Unpacked dictionary of argparse attributes to give the arg\n :return: parser, but (maybe) with the argument named arg_name added\n \"\"\"\n if arg_name in all_args:\n cli_arg = as_cli_arg(arg_name)\n parser.add_argument(\n cli_arg[1:], cli_arg, *shortnames, **kwargs\n )\n return parser\n\n\ndef add_lvl_args_to(parser):\n \"\"\"\n :param parser: argparse.ArgumentParser with all command-line arguments \n that the user gave to pipeline_wrapper.py\n :return: parser with all command-line arguments needed for level X analysis\n \"\"\"\n # 1) Top-level directory with pipeline_wrapper.py 2) Run number 3) Path to\n # .json file which stores the 'paths' dictionary \n # parser.add_argument('--code-dir', type=valid_readable_dir, required=True)\n parser.add_argument('--run-number', type=valid_whole_number, required=True)\n parser.add_argument('--temp-json', type=valid_readable_json, required=True)\n return parser\n\n\ndef add_slurm_args_to(parser):\n \"\"\"\n :param parser: argparse.ArgumentParser with some command-line arguments \n :return: parser with all CLI arguments needed to run parallel SLURM jobs\n \"\"\"\n default_CPUs = 1\n default_gb_mem = 8\n default_sleep = 10\n default_time_limit = \"01:00:00\"\n parser.add_argument(\n '-A', '--account',\n help=\"Name of the account to submit the SBATCH job under.\"\n )\n parser.add_argument(\n '-c', '--cpus', type=valid_whole_number, default=default_CPUs,\n help=('Number of CPUs to use for each Python job. By default, this '\n 'argument\\'s value will be {}.'.format(default_CPUs))\n )\n parser.add_argument(\n '-mem', '--memory', type=valid_whole_number, default=default_gb_mem,\n help=(\"Memory in gigabytes (GB) to assign to each sbatch job. The \"\n \"default number is {} GB.\".format(default_gb_mem))\n )\n parser.add_argument(\n '-progress', '--print-progress', action='store_true',\n help=('Include this flag for the script to print updates about its '\n 'progress at intervals defined by --sleep. This will also print '\n 'every command that is run to submit a pipeline batch job.')\n )\n parser.add_argument(\n '-sleep', '--sleep', type=valid_whole_number, default=default_sleep,\n help=(\"Number of seconds to wait between batch job submissions. The \"\n \"default number is {}.\".format(default_sleep))\n )\n parser.add_argument(\n '-time', '--time', metavar=\"SLURM_JOB_TIME_LIMIT\",\n type=valid_time_str, default=default_time_limit,\n help=(\"Time limit for each automated_subset_analysis batch job. The \"\n \"time limit must be formatted specifically as HH:MM:SS where HH \"\n \"is hours, MM is minutes, and SS is seconds. {} is the default \"\n \"time limit.\".format(default_time_limit))\n )\n return parser\n\n\ndef argify(argname, argval):\n \"\"\"\n :param argname: String naming a parameter for a script called from terminal\n :param argval: Object to assign in string form as the value of the argument\n :return: String, a parameter assignment for a script called from terminal\n \"\"\"\n return \"--{}={}\".format(argname, argval)\n\n\ndef as_cli_arg(arg_str):\n \"\"\"\n :param arg_str: String naming a stored argument taken from the command line\n :return: String which is the command-line argument form of arg_str\n \"\"\"\n return \"--\" + arg_str.replace(\"_\", \"-\")\n\n\ndef copy_and_rename_file(old_file, new_file):\n \"\"\"\n Rename a file and copy it to a new location\n :param old_file: String, valid path to an existing file to copy\n :param new_file: String, valid path to what will be a copy of old_file\n \"\"\"\n os.rename(shutil.copy2(old_file, os.path.dirname(new_file)), new_file)\n\n\ndef copy_event_files_to_default_dir(cli_args, all_event_files):\n \"\"\"\n Copy all event files into the default event files directory\n :param cli_args: Dictionary containing all command-line arguments from user\n :param all_event_files: List of strings that are valid paths to real files\n \"\"\"\n for each_EV_file in all_event_files:\n try: shutil.copy(each_EV_file, cli_args['events_dir'])\n except shutil.SameFileError: pass\n\n\ndef count_lines_in_txt_file(filepath):\n \"\"\"\n Quickly count how many lines are in a text file.\n Taken from pynative.com/python-count-number-of-lines-in-file\n :param filepath: String, valid path to an existing readable text file\n :return: Int, the number of lines in the file at filepath\n \"\"\"\n with open(filepath, 'r') as infile: # open file in read mode\n for count, _ in enumerate(infile):\n pass\n return count + 1\n\n\ndef dict_has(a_dict, a_key):\n \"\"\"\n :param a_dict: Dictionary (any)\n :param a_key: Object (any)\n :return: True if and only if a_key is mapped to something truthy in a_dict\n \"\"\"\n return a_key in a_dict and a_dict[a_key]\n\n\ndef ensure_dict_has(a_dict, a_key, new_value):\n \"\"\"\n :param a_dict: Dictionary (any)\n :param a_key: Object which will be a key in a_dict\n :param new_value: Object to become the value mapped to a_key in a_dict\n unless a_key is already mapped to a value\n :return: a_dict, but with a_key mapped to some value\n \"\"\"\n if not dict_has(a_dict, a_key):\n a_dict[a_key] = new_value\n return a_dict\n\n\ndef exit_with_time_info(start_time, exit_code=0):\n \"\"\"\n Terminate the pipeline after displaying a message showing how long it ran\n :param start_time: datetime.datetime object of when the script started\n \"\"\"\n print('The pipeline for this subject took this long to run {}: {}'\n .format('successfully' if exit_code == 0 else 'and then crashed',\n datetime.now() - start_time))\n sys.exit(exit_code)\n\n\ndef extract_from_json(json_path):\n \"\"\"\n :param json_path: String, a valid path to a real readable .json file\n :return: Dictionary, the contents of the file at json_path\n \"\"\"\n with open(json_path, 'r') as infile:\n return json.load(infile)\n\n\ndef get_all_analysis_paths(cli_args):\n \"\"\"\n Build and save paths for various variables called throughout the pipeline\n :param cli_args: Dictionary containing all command-line arguments from user\n :return: Dictionary containing paths to all of the following variables:\n AROI2, BIDS, dir_lvl, feat_name, final_smooth, lvl_2_paths,\n sub_paths, templates\n \"\"\"\n paths = {'dir_lvl': {str(lvl): os.path.join( # Feature dirs for all levels\n cli_args['output'], 'Level{}_feats'.format(lvl)\n ) for lvl in (1, 2)},\n 'feat_name': '{}.feat'.format(cli_args['study_name']),\n 'final_smooth': ('_smoothed_{}mm' # Spatial smoothing variable \n .format(cli_args['spat_smooth']))}\n for lvl in cli_args['levels']:\n tmpllv = 'template{}'.format(lvl)\n paths[tmpllv] = os.path.join(cli_args['templates'], cli_args[tmpllv])\n paths['lvl_2'] = get_lvl_paths(\n paths['dir_lvl']['2'], get_sub_base(cli_args),\n cli_args['study_name'] + '.gfeat', cli_args['runs'], 'fsf'\n )\n paths['sub_ses'] = {f_or_a: os.path.join( # Subject anat & func directories\n cli_args['study_dir'], 'derivatives',\n cli_args['bids_dir'], cli_args['subject'],\n cli_args['ses'], f_or_a\n ) for f_or_a in ('anat', 'func')} \n paths['AROI2'] = os.path.join(cli_args['templates'], 'Atlas_ROIs.2.nii.gz')\n return paths\n\n\ndef get_and_print_time_since(event_name, event_time):\n \"\"\"\n Print and return a string showing how much time has passed since the\n current running script reached a certain part of its process\n :param event_name: String to print after 'Time elapsed since '\n :param event_time: datetime object representing a time in the past\n :return: String with an easily human-readable message showing how much time\n has passed since {event_time} when {event_name} happened.\n \"\"\"\n timestamp = (\"\\nTime elapsed since {}: {}\"\n .format(event_name, datetime.now() - event_time))\n print(timestamp)\n return timestamp\n\n\ndef get_args_to_run_film_gls(**kwargs):\n \"\"\"\n :return: List of strings which are a Bash command calling film_gls\n \"\"\"\n in_arg = kwargs.pop('in_arg')\n to_call = ['film_gls', '--sa', argify('in', in_arg)] \n for argname, argval in kwargs.items():\n to_call.append(argify(argname, argval))\n return to_call\n\n\ndef get_default_ext_command(cmd_name):\n \"\"\"\n Try to get valid path to external software command file without user input\n :param cmd_name: String naming the executable command file\n :return: String, path to the command if the user has the command alias in\n their .bashrc / $PATH; otherwise None\n \"\"\"\n try: # If the command path is already defined, then use it\n cmd = subprocess.check_output((\"which\", cmd_name)\n ).decode('utf-8').split()[-1]\n except subprocess.CalledProcessError:\n cmd = None\n return cmd\n\n\ndef get_LR_functions(cli_args, paths):\n \"\"\"\n :param cli_args: Dictionary containing all command-line arguments from user\n :param paths: Dictionary of path strings, and of dictionaries of path\n strings, used throughout processing in both levels\n :return: Dictionary mapping 'surf' to a function which returns the file\n path string to a .surf.gii file, and mapping 'shape' to a function\n which returns the file path string to a .shape.gii file\n \"\"\"\n return {'surf': lambda x: os.path.join(\n paths['sub_ses']['anat'], get_subj_ses(cli_args) +\n '_hemi-{}_space-MNI_mesh-fsLR32k_midthickness.surf.gii'.format(x)\n ), 'shape': lambda y: os.path.join(\n cli_args['templates'], y + '.atlasroi.32k_fs_LR.shape.gii'\n )}\n\n\ndef get_lvl_paths(lvl_dir, sub_base, feat_name, runs, *extra_subdirs):\n \"\"\"\n Get a dictionary of paths to analysis-level-specific files for paths dict\n :param lvl_dir: String, path to the feat directory for level 1 or 2\n :param sub_base: String identifying a subject, session, and task\n :param feat_name: String naming a feature\n :param runs: List of strings or integers, each identifying a run\n :param extra_subdirs: Unpacked list of strings naming subdirectories of \n the level parent directory\n :return: Dictionary mapping string keys to string paths\n \"\"\"\n lvl_paths = {'parent': os.path.join(lvl_dir, sub_base + '_' + feat_name)}\n for run in runs:\n lvl_paths[run] = os.path.join(lvl_paths['parent'],\n 'level1_run-{}'.format(run))\n for subdr in extra_subdirs:\n lvl_paths[subdr] = os.path.join(lvl_paths['parent'], subdr + '_files')\n return lvl_paths\n\n\ndef get_main_pipeline_arg_names():\n \"\"\"\n :return: Set containing strings naming all command-line arguments included\n by default in the main script, pipeline_wrapper.py\n \"\"\"\n return {'bids_dir', 'censor', 'events_dir', 'fd', 'filter', 'fsl_dir',\n 'keep_all', 'levels', 'no_parallel', 'output', 'runs', 'ses',\n 'spat_smooth', 'subject', 'surf_smooth', 'study_dir', 'study_name',\n 'task', 'temp_dir', 'templates', 'template1', 'template2',\n 'vol_smooth', 'wb_command', WRAPPER_LOC}\n\n\ndef get_optional_cli_args(cli_args, drop_slurm=False):\n \"\"\"\n :param cli_args: Dictionary with all validated command-line arguments,\n all of which are used by this function\n :param drop_slurm: True to exclude SLURM arguments; else False\n :return: List of most cli_args optional arguments and their values\n \"\"\"\n optional_args = list()\n for arg in cli_args.keys():\n if cli_args[arg] and not (drop_slurm and arg in SLURM_ARGS):\n optional_args.append(as_cli_arg(arg))\n if isinstance(cli_args[arg], list):\n for el in cli_args[arg]:\n optional_args.append(str(el))\n elif not isinstance(cli_args[arg], bool):\n optional_args.append(str(cli_args[arg]))\n return optional_args\n \n\ndef get_pipeline_cli_argparser(arg_names=get_main_pipeline_arg_names()):\n \"\"\"\n :param arg_names: Set containing strings naming all command-line arguments\n :return: argparse.ArgumentParser with all command-line arguments \n needed to run pipeline_wrapper.py\n \"\"\"\n # Default values for user input arguments\n default_BIDS_dir = 'abcd-hcp-pipeline'\n default_censor_num = 0 # 2\n default_fd = 0.9\n default_smooth = 0\n default_study_name = 'ABCD'\n default_runs_lvls = [1, 2]\n default_temporal_filter = 100\n default_wb_command = get_default_ext_command('wb_command')\n generic_dtseries_path = os.path.join(\n '(--study-dir)', 'derivatives', '(--bids-dir)',\n '(--subject)', '(--ses)', 'func',\n 'sub-(--subject)_ses-(--ses)_task-(--task)_'\n 'run-(--runs)_bold_timeseries.dtseries.nii'\n )\n generic_output_dirpath = os.path.join('(--study-dir)', 'derivatives',\n 'abcd-bids-tfmri-pipeline',\n '(--subject)', '(--ses)')\n\n # Strings used in multiple help messages\n msg_default = ' By default, this argument\\'s value(s) will be {}.'\n msg_pipeline = 'Name of the {} that you are running the pipeline on.'\n msg_smooth = ('Millimeters of {} smoothing that has already been applied '\n 'in the minimal processing steps.')\n msg_template = 'Name (not full path) of the Level {} .fsf template file.'\n msg_whole_num = ' This argument must be a positive integer.'\n\n # Create parser with command-line arguments from user\n parser = argparse.ArgumentParser(description=(\n 'ABCD fMRI Task Prep pipeline. Inputs must be in the same format '\n 'as ABCD-HCP-Pipeline outputs after running filemap.'\n ))\n parser = add_arg_if_in_arg_names('bids_dir', arg_names, parser,\n metavar='NAME_OF_BIDS_DERIVATIVES_PIPELINE_DIRECTORY',\n default=default_BIDS_dir,\n help=('Name of the BIDS-standard file-mapped directory with subject '\n 'data in the \"derivatives\" subdirectory of your --study-dir. '\n 'This path should be valid: ' + generic_dtseries_path +\n msg_default.format(default_BIDS_dir))\n )\n # Specify how many initial frames/volumes to censor\n parser = add_arg_if_in_arg_names('censor', arg_names, parser,\n metavar='INITIAL_NUMER_OF_TIMEPOINTS_TO_CENSOR', \n default=default_censor_num, type=valid_whole_number,\n help=('The number of initial frames/volumes to censor.'\n + msg_whole_num + msg_default.format(default_censor_num))\n )\n parser = add_arg_if_in_arg_names('events_dir', arg_names, parser, \n metavar='EVENT_FILES_DIRECTORY',\n type=valid_readable_dir, \n help='Valid path to a real directory containing event .tsv files.' \n )\n # Specify framewise displacement threshold to censor volumes with high motion\n parser = add_arg_if_in_arg_names('fd', arg_names, parser, \n metavar='FRAMEWISE_DISPLACEMENT_THRESHOLD',\n default=default_fd, type=valid_float_0_to_1,\n help=('The framewise displace threshold for censoring volumes with '\n 'high motion. This must be a decimal between 0 and 1.{}'\n .format(msg_default.format(default_fd)))\n )\n # High pass temporal filter cutoff number value\n parser = add_arg_if_in_arg_names('filter', arg_names, parser, \n metavar='HIGH_PASS_TEMPORAL_FILTER_CUTOFF',\n default=default_temporal_filter, type=valid_whole_number,\n help=('High pass filter cutoff (in seconds).{}{}'.format(\n msg_whole_num, msg_default.format(default_temporal_filter)\n ))\n )\n parser = add_arg_if_in_arg_names('fsl_dir', arg_names, parser, \n '-fsl', '--fsl', dest='fsl_dir', type=valid_readable_dir,\n help=('Valid path to an existing directory containing the executable '\n 'files fsl, fslmerge, fslmaths, flameo, and feat_model from '\n 'the FMRIB Software Library (FSL).')\n )\n parser = add_arg_if_in_arg_names('keep_all', arg_names, parser, \n action='store_true',\n help=('Include this flag to keep all files generated during the '\n 'pipeline. By default, the pipeline will only keep dtseries, '\n 'dof, log, and event files.')\n )\n # Which analysis levels to run\n parser = add_arg_if_in_arg_names('levels', arg_names, parser, \n metavar='ANALYSIS_LEVELS_TO_RUN',\n nargs='*', choices=default_runs_lvls, type=valid_whole_number,\n help=('Levels to conduct the analysis on: {0} for one run, and/or '\n '{1} to merge multiple runs.'.format(*default_runs_lvls))\n )\n parser = add_arg_if_in_arg_names('no_parallel', arg_names, parser,\n action='store_true',\n help=('Include this flag to process level 1 analysis runs '\n 'sequentially. By default, the script will process the analyses '\n 'in parallel simultaneously.')\n )\n parser = add_arg_if_in_arg_names('output', arg_names, parser, \n '-out', metavar='OUTPUT_DIRECTORY', type=valid_output_dir, # required=True, \n help=('Directory path to save pipeline outputs into.'\n + msg_default.format(generic_output_dirpath))\n )\n # Specify the number of runs each subject has\n parser = add_arg_if_in_arg_names('runs', arg_names, parser, \n metavar='RUN', \n default=default_runs_lvls, type=valid_whole_number, nargs=\"+\",\n help=('Each subject\\'s number of runs. This argument must be 1 or '\n 'more positive integers provided as a space-delimited list. '\n 'For example: 1 2 3 4. By default, this argument\\'s value(s) '\n 'will be 1 2.')\n )\n parser = add_arg_if_in_arg_names(SCAN_ARG, arg_names, parser, \n type=valid_readable_file,\n help=('Path to existing .csv file listing all scanners\\' parameters. '\n + msg_default.format('scan_info/{}.csv in the code directory.'\n .format(SCAN_ARG)))\n )\n # Which session to run the pipeline on\n parser = add_arg_if_in_arg_names('ses', arg_names, parser, \n metavar='SESSION', required=True, # default=default_ses,\n type=lambda x: valid_subj_ses(x, 'ses-', 'session'), #, 'ses'),\n help=msg_pipeline.format('session')\n )\n # Desired spatial smoothing number\n parser = add_arg_if_in_arg_names('spat_smooth', arg_names, parser, \n metavar='DESIRED_SPATIAL_SMOOTHING', \n default=default_smooth, type=valid_whole_number,\n help=('Millimeters of spatial smoothing that you want for the surface '\n 'and volume data.'\n + msg_whole_num + msg_default.format(default_smooth))\n )\n parser = add_arg_if_in_arg_names('subject', arg_names, parser, \n metavar='SUBJECT_ID', required=True,\n type=lambda x: valid_subj_ses(x, 'sub-', 'subject'), #, 'NDAR', 'INV'),\n help='ID of subject to process.'\n )\n # Surface smoothing number\n parser = add_arg_if_in_arg_names('surf_smooth', arg_names, parser, \n metavar='CURRENT_SURFACE_SMOOTHING', \n default=default_smooth, type=valid_whole_number,\n help=''.join((msg_smooth.format('surface'), msg_whole_num,\n msg_default.format(default_smooth)))\n )\n # Set file path for base directory and BIDS directory\n parser = add_arg_if_in_arg_names('study_dir', arg_names, parser, \n metavar='BIDS_BASE_STUDY_DIRECTORY',\n type=valid_readable_dir, required=True, \n help='Valid path to existing base study directory.'\n )\n parser = add_arg_if_in_arg_names('study_name', arg_names, parser, \n metavar='STUDY_NAME', default=default_study_name,\n help=msg_pipeline.format('study')\n )\n # Which task you are running the pipeline on\n parser = add_arg_if_in_arg_names('task', arg_names, parser, \n metavar='TASK_NAME', required=True,\n help=msg_pipeline.format('task') # + msg_choices(choices_tasks)\n )\n parser = add_arg_if_in_arg_names('temp_dir', arg_names, parser, \n type=valid_readable_dir, metavar='TEMPORARY_DIRECTORY',\n help=('Valid path to existing directory to save temporary files into.')\n )\n parser = add_arg_if_in_arg_names('templates', arg_names, parser, \n type=valid_readable_dir, \n help='Valid path to existing directory with template .fsf files.'\n )\n for lvl in default_runs_lvls: # Specify the .fsf template files' names\n parser = add_arg_if_in_arg_names(\n 'template{}'.format(lvl), arg_names, parser, \n metavar='LEVEL_{}_TEMPLATE_NAME'.format(lvl),\n type=valid_template_filename, help=msg_template.format(lvl)\n )\n # Volume smoothing number\n parser = add_arg_if_in_arg_names('vol_smooth', arg_names, parser, \n metavar='CURRENT_VOLUME_SMOOTHING',\n default=default_smooth, type=valid_whole_number,\n help=''.join((msg_smooth.format('volume'), msg_whole_num,\n msg_default.format(default_smooth)))\n )\n # Specify path to wb_command\n parser = add_arg_if_in_arg_names('wb_command', arg_names, parser, \n default=default_wb_command, type=valid_readable_file,\n help=('Path to wb_command file to run Workbench Command. If this flag '\n 'is excluded, then the script will try to guess the path to '\n 'the wb_command file by checking the user\\'s BASH aliases. '\n 'Your default wb_command is \"{}\". If '\n 'that says \"None\", then you need to include this argument.'\n .format(default_wb_command))\n )\n # Argument used to get this script's dir\n parser = add_arg_if_in_arg_names(WRAPPER_LOC, arg_names, parser,\n type=valid_readable_dir, required=True,\n help=('Valid path to existing ABCD-BIDS-task-fmri-pipeline directory '\n 'that contains pipeline_wrapper.py')\n ) \n return parser\n\n\ndef get_region_path_vars(cli_args, paths, run):\n \"\"\"\n Build and return paths to particular brain region images' files/dirs\n by filling in the unique parts of generic path strings\n :param cli_args: Dictionary containing all command-line arguments from user\n :param paths: Dictionary of path strings, and of dictionaries of path\n strings, used throughout processing in both levels\n :param run: Whole number (as an int or a string) defining which run this is\n :return: Tuple of string generic paths: design, func_str, subcort, surf_str\n \"\"\"\n # Paths to design file base and subcortical volume stats directory\n design = os.path.join(paths['lvl_1']['fsf'],\n get_sub_base(cli_args, run) + '_level1')\n subcort = os.path.join(paths['lvl_1']['parent'], 'SubcorticalVolumeStats')\n\n # Generic strings used as templates for paths later \n func_str = os.path.join(paths['lvl_1']['intermediate'], \n '{}{}_filtered.atlasroi{}.{}.32k_fs_LR.func.gii')\n surf_str = os.path.join(paths['sub_ses']['anat'], (\n '{}_hemi-{}_space-MNI_mesh-fsLR32k_midthickness.surf.gii'\n ))\n return design, func_str, subcort, surf_str\n\n\ndef get_replacements(cli_args, **kwargs):\n \"\"\"\n :param cli_args: Dictionary containing all command-line arguments from user\n :return: Dictionary mapping variables' generic names in template files to \n those variables' actual values provided by the user\n \"\"\"\n replacements = {'SUBID': cli_args['subject'],\n 'FEAT_NAME': cli_args['study_name'], # Not paths['feat_name']\n 'FIN_SMOOTH': str(cli_args['spat_smooth']),\n 'HP_FILTER': str(cli_args['filter']), \n 'SESSION': cli_args['ses'], 'TASK': cli_args['task'],\n 'OUTPUT_DIR': cli_args['output'],\n 'EVENTS_DIR': cli_args['events_dir'],\n 'STUDY_DIR': cli_args['study_dir']}\n replacements.update(kwargs)\n return replacements\n\n\ndef get_sbatch_args(cli_args, job):\n \"\"\"\n :param cli_args: Dictionary containing all command-line arguments from user\n :param job: String 1-8 characters long naming the SBATCH job\n :return: List of strings, SLURM-related arguments to pass to the main\n script or level 1 analysis script for parallelization\n \"\"\"\n return [argify('time', cli_args['time']), '-c', str(cli_args['cpus']),\n '-J', job, argify('mem', '{}gb'.format(cli_args[\"memory\"]))]\n\n\ndef get_sub_base(cli_args, run_num=None):\n \"\"\"\n :param cli_args: Dictionary containing all command-line arguments from user\n :param run_num: Whole number as an int or string defining which run this is\n :return: String identifying a subject, session, task, and maybe run\n \"\"\"\n parts = [get_subj_ses(cli_args), 'task-' + cli_args['task']]\n if run_num is not None:\n parts.append('run-{}'.format(run_num))\n return '_'.join(parts)\n\n\ndef get_subj_ses(cli_args):\n \"\"\"\n :param cli_args: Dictionary containing all command-line arguments from user\n :return: String which combines --subject and --ses from command line\n \"\"\"\n return '_'.join((cli_args['subject'], cli_args['ses']))\n\n\ndef get_TR_and_ntpts(dtseries_path, wb_command_path):\n \"\"\"\n :param dtseries_path: String, the full path to a .dtseries.nii file\n :param wb_command_path: String, the full path to the wb_command executable\n :return: Tuple of 2 numbers, the number of timepoints and repetition time\n \"\"\"\n if not os.path.exists(dtseries_path):\n sys.exit('Error: {} does not exist'.format(dtseries_path))\n else:\n ntpts = wb_command_get_info(wb_command_path, dtseries_path,\n 'number-of-maps')\n rep_time = wb_command_get_info(wb_command_path, dtseries_path,\n 'step-interval')\n return rep_time, ntpts\n\n\ndef glob_and_copy(dest_dirpath, *path_parts_to_glob):\n \"\"\"\n Collect all files matching a glob string, then copy those files\n :param dest_dirpath: String, a valid path of a directory to copy files into\n :param path_parts_to_glob: Unpacked list of strings which join to form a\n glob string of a path to copy files from\n \"\"\"\n for file_src in glob(os.path.join(*path_parts_to_glob)):\n shutil.copy(file_src, dest_dirpath)\n\n\ndef make_and_save_confound_matrix(cli_args, desc_tsv_file, lvl_paths,\n sub_run_basename):\n \"\"\"\n Create the confound matrix and copy it to subjects fsf_paths for each run\n :param cli_args: Dictionary containing all command-line arguments from user\n :param desc_tsv_file: String naming a .tsv file in intermediate_files/ dir\n :param lvl_paths: Dictionary mapping keys to dir path strings\n :param sub_run_basename: String naming the subject and the run number (?)\n :return: String, the base name of the confound matrix .csv file\n \"\"\"\n # Local variables: File paths, step filename, adjusted variable to censor\n # initial frames based on user-specification, and result (confounds fname)\n in_file = os.path.join(lvl_paths['intermediate'], desc_tsv_file)\n def tsv_file_for_step(stepnum):\n return os.path.join(lvl_paths['intermediate'],\n ('{0}_desc-filteredincludingFD_motion_step{1}.tsv'\n .format(sub_run_basename, stepnum)))\n censor_volumes = list(range(0, cli_args['censor']))\n confounds_name = str(sub_run_basename + '_confound_matrix.tsv')\n \n # Read and write framewise displacement step1 .csv file\n df = pd.read_csv(in_file, sep='\\s+')\n df.framewise_displacement.iloc[[censor_volumes]] = 1\n df.framewise_displacement[df.framewise_displacement < cli_args['fd']] = 0\n df.framewise_displacement[df.framewise_displacement > 0] = 1\n df.framewise_displacement.to_csv(tsv_file_for_step(1), header=False,\n encoding='utf-8', sep='\\t', index=False)\n \n # Read and write step2 .csv file\n df = pd.read_csv(in_file, sep='\\s+')\n cols = ['trans_x_mm', 'trans_y_mm', 'trans_z_mm', 'rot_x_degrees',\n 'rot_y_degrees', 'rot_z_degrees', 'trans_x_mm_dt',\n 'trans_y_mm_dt', 'trans_z_mm_dt', 'rot_x_degrees_dt',\n 'rot_y_degrees_dt', 'rot_z_degrees_dt']\n df = df[cols] # the 'cols' intermediate variable is needed to avoid error\n df.to_csv(tsv_file_for_step(2), sep='\\t', encoding='utf-8',\n index=False, header=False)\n\n # Read and write step3 .csv file\n df = pd.read_csv(tsv_file_for_step(1), names=['A'], sep='\\t')\n df = pd.concat([pd.get_dummies(df[df['A'] == 1].index)\n .transpose(), df], axis=1).fillna(0)\n del df['A']\n df.to_csv(tsv_file_for_step(3), sep='\\t', encoding='utf-8',\n index=False, header=False)\n \n # Read and write confound matrix .csv file; return its name\n pd.concat([pd.read_csv(tsv_file_for_step(x), header=None, sep='\\t')\n for x in (2, 3)], axis=1).to_csv(\n os.path.join(lvl_paths['fsf'], confounds_name),\n sep='\\t', encoding='utf-8', header=None, index=False\n )\n return confounds_name\n\n\ndef individualize_subprocess_run(run_args, run, to_replace):\n \"\"\"\n Cycle through every argument in run_args and replace instances of the\n to_replace string with run, then return the arguments.\n :param run_args: List of strings, all arguments to call via subprocess\n :param run: Whole number (as an int or a string) defining which run this is\n :param to_replace: String to find and replace with each run name/id\n :return: run_args, but with to_replace replaced by run in them all\n \"\"\"\n for i in range(len(run_args)):\n run_args[i] = str(run_args[i]).replace(to_replace, str(run))\n return run_args\n\n\ndef make_fake_nifti(cli_args, generic, old_smoothed, unique_part, cmd, *args):\n \"\"\"\n Create a fake nifti from the smoothed dtseries for high-pass filtering \n :param cli_args: Dictionary containing all command-line arguments from user\n :param generic: String, new smoothed nifti file path but with a '{}' in it\n :param old_smoothed: String, the path to a real old smoothed nifti file\n :param unique_part: String/etc inserted into generic to make a valid path\n :param cmd: String which is a Bash command but with '{}'s in it to replace\n :return: String, the valid path to the now-real new smoothed nifti file\n \"\"\"\n started = datetime.now()\n new_smoothed = generic.format(unique_part)\n cmd_args = cmd.format(old_smoothed, *args, new_smoothed).split()\n if cmd_args[0] == 'wb_command':\n wb_command(cli_args, *cmd_args[1:])\n else: # if cmd_args[0] in ('fsl', 'feat_model', 'film_gls', ):\n run_fsl_tool(cli_args, *cmd_args)\n if cli_args['print_progress']:\n get_and_print_time_since('started making '\n + os.path.basename(new_smoothed), started)\n return new_smoothed\n\n\ndef merge_files_in_range(cli_args, file_names, range_to, args):\n \"\"\"\n :param cli_args: Dictionary containing all command-line arguments from user\n :param file_names: List of strings where each is a filename\n :param range_to: Integer, the number of files to merge\n :param args: List, the rest of the arguments to call merge_to_make_dtseries\n \"\"\"\n for r in range(0, range_to):\n for f in file_names:\n merge_to_make_dtseries(cli_args, str(f) + str(r + 1), *args)\n\n\ndef merge_to_make_dtseries(cli_args, fname, lvl_paths, substats, AROI2, shape):\n \"\"\"\n :param fname: String, base name of the files to merge into a dtseries\n :param lvl_paths: Dictionary mapping keys to dir path strings\n :param substats: String, the path to the subcortical stats directory\n :param AROI2: String, path to Atlas ROI file\n :param shape: Function takes 'L' or 'R' & returns path to shape.gii file\n \"\"\"\n cii_out = os.path.join(lvl_paths['GOStats'], fname + '.dtseries.nii')\n subcort_in = os.path.join(substats, fname + '.nii.gz')\n func = lambda x: os.path.join(lvl_paths['parent'], x + '_SurfaceStats',\n fname + '.func.gii')\n fake_nifti = os.path.join(lvl_paths['GOStats'], fname + '.nii.gz')\n wb_command(cli_args, '-cifti-create-dense-timeseries', cii_out, '-volume',\n subcort_in, AROI2, '-left-metric', func('L'), '-roi-left', \n shape('L'), '-right-metric', func('R'), '-roi-right', shape('R'))\n wb_command(cli_args, '-cifti-convert', '-to-nifti', cii_out, fake_nifti)\n\n\ndef organize_lvl_paths(lvl_paths, *keys_to_remove):\n \"\"\"\n :param lvl_paths: Dictionary mapping keys to dir path strings\n :param keys_to_remove: Unpacked list of strings which are lvl_paths keys\n to exclude from the return list\n :return: List of all values in lvl_paths (except the ones mapped to\n keys_to_remove), sorted alphabetically\n \"\"\"\n lvl_paths = lvl_paths.copy()\n for each_key in keys_to_remove:\n lvl_paths.pop(each_key)\n to_return = list(lvl_paths.values())\n to_return.sort(reverse=False)\n return to_return\n\n\ndef overwrite_dirs(dirs_to_overwrite, mkdir=False):\n \"\"\"\n :param dirs_to_overwrite: List of strings which are paths to directories\n to create or overwrite with empty directories\n :param mkdir: True to remake all the dirs after overwrite, else False\n \"\"\"\n for each_dir in dirs_to_overwrite:\n if os.path.isdir(each_dir):\n shutil.rmtree(each_dir)\n elif os.path.exists(each_dir):\n os.remove(each_dir)\n if mkdir:\n os.makedirs(each_dir)\n\n\ndef rand_string(L):\n \"\"\"\n :param L: Integer, length of the string to randomly generate\n :return: String (of the given length L) of random characters\n \"\"\"\n return ''.join(random.choices(string.ascii_lowercase + string.digits, k=L))\n \n\ndef rename_template_file_vars(old_template, new_template, replacements):\n \"\"\"\n :param old_template: String, path to existing template file\n :param new_template: String, path to new template file which will be \n written with old_template variables but renamed\n :param replacements: Dictionary mapping each string in old_template to the\n string to replace it with in new_template\n \"\"\"\n with open(old_template) as infile: # Open the level 1 or 2 template\n\n # Create new .fsf file; name the output \"sub-*_ses-*_task-*_level*.fsf\"\n with open(new_template, 'w') as outfile:\n for line in infile:\n\n # Use replacements dict to replace variables in the .fsf file\n for src, target in replacements.items():\n line = line.replace(src, target)\n\n # Output the new subject-, (run-,) and task-specific .fsf file\n outfile.write(line)\n\n\ndef run_fsl_tool(cli_args, toolname, *args):\n \"\"\"\n :param cli_args: Dictionary containing all command-line arguments from user\n :param toolname: String naming the executable tool in --fsl-dir to run\n :param args: Unpacked list of arguments to run toolname with\n \"\"\"\n subprocess.check_call([\n valid_readable_file(os.path.join(cli_args['fsl_dir'], toolname)), *args\n ])\n\n\ndef run_parallel_or_sequential(script_path, cli_args, runs, to_replace, \n extra_args, second_fn=None, second_args=None):\n \"\"\"\n Run a Python script via subprocess, either sequentially or in parallel\n depending on cli_args --no-parallel\n :param script_path: String, valid path to real script to run in parallel\n :param cli_args: Dictionary containing all command-line arguments from user\n :param runs: List of unique strings identifying differences between scripts\n :param to_replace: String to find and replace with each job name/id\n \"\"\"\n if cli_args['no_parallel']: # Run processes serially/sequentially\n for run in runs:\n run_python_subscript(script_path, run, to_replace, *extra_args)\n\n else: # Run processes in parallel using Python multiprocessing module\n to_run = list()\n all_args = list()\n for run in runs:\n all_args.append([script_path, run, to_replace, *extra_args])\n to_run.append(mp.Process(args=all_args[-1], name=all_args[-1][0],\n target=run_python_subscript))\n if second_fn and second_args:\n all_args.append(second_args)\n to_run.append(mp.Process(target=second_fn, args=second_args, \n name=second_args[0]))\n if dict_has(cli_args, 'print_progress'):\n print('Running parallel:\\n' + '\\n\\n'.join(str(x) for x in all_args))\n try:\n run_parallel(os.path.basename(script_path), to_run,\n cli_args['sleep'], cli_args['print_progress'])\n except Exception as e:\n sys.exit(e)\n\n\ndef run_parallel(scriptname, processes, sleep, show):\n \"\"\"\n Run a script multiple times in parallel\n :param scriptname: String describing the script being run in parallel\n :param processes: List of multiprocessing.Process objects ready to run\n :param sleep_secs: Integer, how many seconds to wait between (a) process\n submissions and (b) checking if all processes finished\n :param show: True to show the user what's running at sleep_secs intervals;\n otherwise False\n \"\"\"\n started = datetime.now()\n submitted = list()\n failed = False\n for each_process in processes:\n submitted.append(each_process.start())\n time.sleep(sleep)\n while any((p.exitcode is None) for p in processes):\n time.sleep(sleep)\n if show:\n get_and_print_time_since(scriptname + ' started', started)\n if not all(p.exitcode is None or p.exitcode == 0 for p in processes):\n failed = True\n for p in processes:\n p.terminate()\n if failed:\n sys.exit('Error: {} subprocess failed.'.format(scriptname))\n\n\ndef run_python_subscript(path_to_subscript, run, to_replace, *args):\n \"\"\"\n Use subprocess to run a Python 3.6+ script from this code base\n :param path_to_subscript: String, valid path to real Python 3.6+ script\n :param cli_args: Dictionary containing all command-line arguments from user\n :param run: Whole number (as an int or a string) defining which run this is\n :param to_replace: String to find and replace with each run name/id\n :param args: Unpacked list of parameters to run subscript with\n \"\"\"\n start_time = datetime.now()\n try:\n subprocess.check_call(individualize_subprocess_run(\n ['python3', path_to_subscript, *args], run, to_replace\n )) \n except subprocess.CalledProcessError:\n err_type, err_msg, _ = sys.exc_info() # TODO make this into a reusable function? See run_level_1_analysis.get_events_make_template\n sys.exit('\\n\\n{}: {}\\n\\n'.format(err_type.__name__, err_msg))\n get_and_print_time_since(os.path.basename(path_to_subscript)\n + ' started', start_time)\n return # Explicitly end this function so multiprocessing knows it's done\n\n\ndef save_to_json_and_get_path(a_dict, dict_name, out_dir):\n \"\"\"\n :param a_dict: Dictionary with only string keys\n :param dict_name: String naming a_dict\n :param out_dir: String, a valid path to a real directory to save\n the .json file containing a_dict into\n :return: String, the full path to the .json file containing a_dict\n \"\"\"\n json_path = os.path.join(out_dir, 'abcd-bids-pipeline-{}_{}.json'.format(\n dict_name, datetime.now().strftime('%Y-%b-%d_%H-%M')\n ))\n with open(json_path, 'w+') as json_file:\n json_file.write(json.dumps(a_dict))\n return json_path\n\n\ndef valid_float_0_to_1(val):\n \"\"\"\n :param val: Object to check, then throw an error if it is invalid\n :return: val if it is a float between 0 and 1 (otherwise invalid)\n \"\"\"\n return validate(val, lambda x: 0 <= float(x) <= 1, float,\n 'Value must be a number between 0 and 1')\n\n\ndef valid_output_dir(path):\n \"\"\"\n Try to make a folder for new files at path; throw exception if that fails\n :param path: String which is a valid (not necessarily real) folder path\n :return: String which is a validated absolute path to real writeable folder\n \"\"\"\n return validate(path, lambda x: os.access(x, os.W_OK),\n valid_readable_dir, 'Cannot create directory at {}', \n lambda y: os.makedirs(y, exist_ok=True))\n\n\ndef valid_readable_dir(path):\n \"\"\"\n :param path: Parameter to check if it represents a valid directory path\n :return: String representing a valid directory path\n \"\"\"\n return validate(path, os.path.isdir, valid_readable_file,\n 'Cannot read directory at {}')\n\n\ndef valid_readable_file(path):\n \"\"\"\n Throw exception unless parameter is a valid readable filepath string. Use\n this, not argparse.FileType('r') which leaves an open file handle.\n :param path: Parameter to check if it represents a valid filepath\n :return: String representing a valid filepath\n \"\"\"\n return validate(path, lambda x: os.access(x, os.R_OK),\n os.path.abspath, 'Cannot read file at {}')\n\n\ndef valid_readable_json(path):\n \"\"\"\n :param path: Parameter to check if it represents a valid .json file path\n :return: String representing a valid .json file path\n \"\"\"\n return validate(path, lambda x: os.path.splitext(path)[-1] == '.json',\n valid_readable_file, '{} is not a readable .json filepath')\n\n\ndef valid_subj_ses(in_arg, prefix, name): #, *keywords):\n \"\"\"\n :param in_arg: Object to check if it is a valid subject ID or session name\n :param prefix: String, 'sub-' or 'ses-'\n :param name: String describing what in_arg should be (e.g. 'subject')\n :return: True if in_arg is a valid subject ID or session name; else False\n \"\"\"\n return validate(in_arg, lambda _: True, # lambda x: any([key in x for key in [prefix, *keywords]]),\n lambda y: (y if y[:len(prefix)] == prefix else prefix + y),\n '{}' + ' is not a valid {}'.format(name))\n\n\ndef valid_template_filename(fname):\n \"\"\"\n :param fname: Parameter to check if it represents a .fsf file name\n :return: String representing the .fsf file name\n \"\"\"\n return validate(fname, lambda x: os.path.splitext(x)[-1] == '.fsf',\n lambda y: y, '{} is not an .fsf file name')\n\n\ndef valid_time_str(in_arg):\n \"\"\"\n :param in_arg: Object to check if it's a time string in the HH:MM:SS format\n :return: True if in_arg is a time limit string in that format; else False\n \"\"\"\n try:\n split = in_arg.split(\":\")\n assert len(split) == 3\n for each_num in split:\n assert each_num.isdigit()\n assert int(each_num) >= 0\n return in_arg\n except (TypeError, AssertionError, ValueError):\n raise argparse.ArgumentTypeError('Invalid time string.')\n\n\ndef valid_whole_number(to_validate):\n \"\"\"\n Throw argparse exception unless to_validate is a positive integer\n :param to_validate: Object to test whether it is a positive integer\n :return: to_validate if it is a positive integer\n \"\"\"\n return validate(to_validate, lambda x: int(x) >= 0, int, \n '{} is not a positive integer')\n\n\ndef validate(to_validate, is_real, make_valid, err_msg, prepare=None):\n \"\"\"\n Parent/base function used by different type validation functions. Raises an\n argparse.ArgumentTypeError if the input object is somehow invalid.\n :param to_validate: String to check if it represents a valid object \n :param is_real: Function which returns true iff to_validate is real\n :param make_valid: Function which returns a fully validated object\n :param err_msg: String to show to user to tell them what is invalid\n :param prepare: Function to run before validation\n :return: to_validate, but fully validated\n \"\"\"\n try:\n if prepare:\n prepare(to_validate)\n assert is_real(to_validate)\n return make_valid(to_validate)\n except (OSError, TypeError, AssertionError, ValueError, \n argparse.ArgumentTypeError):\n raise argparse.ArgumentTypeError(err_msg.format(to_validate))\n\n\ndef validate_cli_args(cli_args, parser, arg_names=set()):\n \"\"\"\n Validate types and set defaults for any arg whose validation depends on\n another arg and therefore was not possible in get_pipeline_cli_argparser\n :param cli_args: Dictionary containing all command-line arguments from user\n :param parser: argparse.ArgumentParser to raise error if anything's invalid\n :param arg_names: Set containing SCAN_ARG if that argument is needed\n :return: cli_args, but fully validated\n \"\"\"\n # Default levels, template file directory, and scanner info file path\n cli_args = ensure_dict_has(cli_args, 'levels', [1, 2]\n if len(cli_args['runs']) > 1 else [1])\n cli_args = ensure_dict_has(cli_args, 'templates',\n os.path.join(SCRIPT_DIR, 'templates'))\n if SCAN_ARG in arg_names:\n cli_args = ensure_dict_has(cli_args, SCAN_ARG, os.path.join(\n SCRIPT_DIR, 'scan_info', SCAN_ARG + '.csv'\n ))\n\n for lvl in cli_args['levels']: # Default template file names\n cli_args = ensure_dict_has(cli_args, 'template{}'.format(lvl), (\n 'template_DCAN_version_{}_level{}_UPDATED_FINAL.fsf'\n .format(cli_args['task'], lvl)\n )) \n validate_template_file(cli_args, lvl, parser)\n \n # Default paths to FSL and wb_command\n ERR_MSG = 'No {} found. Please include the {} argument.'\n if not (dict_has(cli_args, 'wb_command') and\n os.access(cli_args['wb_command'], os.X_OK)):\n parser.error(ERR_MSG.format('wb_command executable', '--wb-command'))\n if not dict_has(cli_args, 'fsl_dir'):\n fsl = get_default_ext_command('fsl')\n cli_args['fsl_dir'] = os.path.dirname(fsl) if fsl else parser.error(\n ERR_MSG.format('FSL directory', '--fsl-dir')\n )\n\n # Default output/temp/event files directories. Avoiding ensure_dict_has to\n if not dict_has(cli_args, 'output'): # prevent permissions error from\n cli_args['output'] = valid_output_dir( # valid_output_dir making dirs.\n os.path.join(cli_args['study_dir'], 'derivatives', 'abcd-bids-tfm'\n 'ri-pipeline', cli_args['subject'], cli_args['ses'])\n )\n for arg in ('temp_dir', 'events_dir'):\n if not dict_has(cli_args, arg):\n cli_args[arg] = valid_output_dir(\n os.path.join(cli_args['output'], 'level-1', arg.split('_')[0])\n )\n return cli_args\n\n\ndef validate_template_file(cli_args, lvl, parser):\n \"\"\"\n Verify that template .fsf file exists\n :param cli_args: Dictionary containing all command-line arguments from user\n :param lvl: String or int defining the analysis level, 1 or 2 or \"1\" or \"2\"\n :param parser: argparse.ArgumentParser to raise error if anything's invalid \n \"\"\"\n tmpl = 'template{}'.format(lvl)\n tmpl_fpath = os.path.join(cli_args['templates'], cli_args[tmpl])\n if not os.path.exists(tmpl_fpath):\n parser.error('{} does not exist. Please re-run with a different --{} '\n 'or --templates argument.'.format(tmpl_fpath, tmpl))\n\n\ndef wb_command(cli_args, *args):\n \"\"\"\n Call wb_command executable with any given parameters\n :param cli_args: Dictionary mapping 'wb_command' key to wb_command filepath\n :param args: List of all parameters to call wb_command with, in order\n \"\"\"\n subprocess.check_call([cli_args['wb_command'], *args])\n\n\ndef wb_command_get_info(wb_command, dtseries, arg_only):\n \"\"\"\n Call wb_command with -file-information and -no-map-info parameters\n :param wb_command: String, path to existing workbench wb_command executable\n :param dtseries: String, the path to a .dtseries.nii file with file info\n :param arg_only: String, the last part of the name of a wb_command\n argument starting with '-only-'\n :return: String representing a numerical value describing the dtseries\n \"\"\"\n return os.popen('{} -file-information {} -no-map-info -only-{}'\n .format(wb_command, dtseries, arg_only)).read().rstrip()\n\n\ndef wb_LR_pair(func_LR, arg_LR=None, after=True):\n \"\"\"\n Get wb_command left- and right- arguments\n :param func_LR: Function which accepts 'L' or 'R' and returns a filepath\n :param arg_LR: String naming the left- or right- argument\n :param after: True if arg_LR goes after the word 'left'/'right'; else False\n :return: List with 4 elements, arg name and then value for left then right\n \"\"\"\n arg_LR = '-' + arg_LR if arg_LR else ''\n arg_fmt = '-{}' + arg_LR if after else arg_LR + '-{}'\n return [arg_fmt.format('left'), func_LR('L'),\n arg_fmt.format('right'), func_LR('R')]\n" ]
[ [ "pandas.read_csv", "pandas.get_dummies" ] ]
pranaynanda/training-data-analyst
[ "f10ab778589129239fd5b277cfdefb41638eded5" ]
[ "courses/machine_learning/deepdive/09_sequence_keras/temperatures/utils/utils_display.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import transforms as plttrans\nimport utils_prettystyle\n\n\ndef picture_this_1(data, datalen):\n plt.subplot(211)\n plt.plot(data[datalen - 512:datalen + 512])\n plt.axvspan(0, 512, color='black', alpha=0.06)\n plt.axvspan(512, 1024, color='grey', alpha=0.04)\n plt.subplot(212)\n plt.plot(data[3 * datalen - 512:3 * datalen + 512])\n plt.axvspan(0, 512, color='grey', alpha=0.04)\n plt.axvspan(512, 1024, color='black', alpha=0.06)\n plt.show()\n\n\ndef picture_this_2(data, batchsize, seqlen):\n samples = np.reshape(data, [-1, batchsize, seqlen])\n rndsample = samples[np.random.choice(samples.shape[0], 8, replace=False)]\n print(\"Tensor shape of a batch of training sequences: \" +\n str(rndsample[0].shape))\n print(\"Random excerpt:\")\n subplot = 241\n for i in range(8):\n plt.subplot(subplot)\n plt.plot(rndsample[i, 0]) # first sequence in random batch\n subplot += 1\n plt.show()\n\n\ndef picture_this_3(Yout_, evaldata, evallabels, seqlen):\n subplot = 241\n colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n for i in range(8):\n plt.subplot(subplot)\n k = int(np.random.rand() * evaldata.shape[0])\n l0, = plt.plot(evaldata[k, 1:], label=\"data\")\n plt.plot([seqlen - 2, seqlen - 1], evallabels[k, -2:])\n l1, = plt.plot([seqlen - 1], [Yout_[k]],\n \"o\",\n color=\"red\",\n label='Predicted')\n l2, = plt.plot([seqlen - 1], [evallabels[k][-1]],\n \"o\",\n color=colors[1],\n label='Ground Truth')\n if i == 0:\n plt.legend(handles=[l0, l1, l2])\n subplot += 1\n plt.show()\n\n\ndef picture_this_4(temperatures, dates):\n min_temps = temperatures[:, 0]\n max_temps = temperatures[:, 1]\n interpolated = temperatures[:, 2]\n\n interpolated_sequence = False\n #plt.plot(dates, max_temps)\n for i, date in enumerate(dates):\n if interpolated[i]:\n if not interpolated_sequence:\n startdate = date\n interpolated_sequence = True\n stopdate = date\n else:\n if interpolated_sequence:\n # light shade of red just for visibility\n plt.axvspan(startdate + np.timedelta64(-5, 'D'),\n stopdate + np.timedelta64(6, 'D'),\n facecolor='#FFCCCC',\n alpha=1)\n # actual interpolated region\n plt.axvspan(startdate + np.timedelta64(-1, 'D'),\n stopdate + np.timedelta64(1, 'D'),\n facecolor='#FF8888',\n alpha=1)\n interpolated_sequence = False\n plt.fill_between(dates, min_temps, max_temps).set_zorder(10)\n plt.show()\n\n\ndef picture_this_5(visu_data, station):\n subplot = 231\n for samples, targets, dates, _, _ in visu_data:\n plt.subplot(subplot)\n h1 = plt.fill_between(dates,\n samples[station, :, 0],\n samples[station, :, 1],\n label=\"features\")\n h2 = plt.fill_between(dates,\n targets[station, :, 0],\n targets[station, :, 1],\n label=\"labels\")\n h2.set_zorder(-1)\n if subplot == 231:\n plt.legend(handles=[h1, h2])\n subplot += 1\n if subplot == 237:\n break\n plt.show()\n\n\ndef picture_this_6(evaldata, evaldates, prime_data, results, primelen, runlen,\n offset, rmselen):\n disp_data = evaldata[offset:offset + primelen + runlen]\n disp_dates = evaldates[offset:offset + primelen + runlen]\n colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n displayresults = np.ma.array(\n np.concatenate((np.zeros([primelen, 2]), results)))\n displayresults = np.ma.masked_where(displayresults == 0, displayresults)\n sp = plt.subplot(212)\n p = plt.fill_between(disp_dates, displayresults[:, 0],\n displayresults[:, 1])\n p.set_alpha(0.8)\n p.set_zorder(10)\n trans = plttrans.blended_transform_factory(sp.transData, sp.transAxes)\n plt.text(disp_dates[primelen],\n 0.05,\n \"DATA |\",\n color=colors[1],\n horizontalalignment=\"right\",\n transform=trans)\n plt.text(disp_dates[primelen],\n 0.05,\n \"| +PREDICTED\",\n color=colors[0],\n horizontalalignment=\"left\",\n transform=trans)\n plt.fill_between(disp_dates, disp_data[:, 0], disp_data[:, 1])\n plt.axvspan(disp_dates[primelen],\n disp_dates[primelen + rmselen],\n color='grey',\n alpha=0.1,\n ymin=0.05,\n ymax=0.95)\n plt.show()\n\n rmse = math.sqrt(\n np.mean((evaldata[offset + primelen:offset + primelen + rmselen] -\n results[:rmselen])**2))\n print(\"RMSE on {} predictions (shaded area): {}\".format(rmselen, rmse))\n\n\ndef picture_this_7(features):\n subplot = 231\n for i in range(6):\n plt.subplot(subplot)\n plt.plot(features[i])\n subplot += 1\n plt.show()\n\n\ndef picture_this_8(data, prime_data, results, offset, primelen, runlen,\n rmselen):\n disp_data = data[offset:offset + primelen + runlen]\n colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n plt.subplot(211)\n plt.text(primelen,\n 2.5,\n \"DATA |\",\n color=colors[1],\n horizontalalignment=\"right\")\n plt.text(primelen,\n 2.5,\n \"| PREDICTED\",\n color=colors[0],\n horizontalalignment=\"left\")\n displayresults = np.ma.array(\n np.concatenate((np.zeros([primelen]), results)))\n displayresults = np.ma.masked_where(displayresults == 0, displayresults)\n plt.plot(displayresults)\n displaydata = np.ma.array(np.concatenate((prime_data, np.zeros([runlen]))))\n displaydata = np.ma.masked_where(displaydata == 0, displaydata)\n plt.plot(displaydata)\n plt.subplot(212)\n plt.text(primelen,\n 2.5,\n \"DATA |\",\n color=colors[1],\n horizontalalignment=\"right\")\n plt.text(primelen,\n 2.5,\n \"| +PREDICTED\",\n color=colors[0],\n horizontalalignment=\"left\")\n plt.plot(displayresults)\n plt.plot(disp_data)\n plt.axvspan(primelen,\n primelen + rmselen,\n color='grey',\n alpha=0.1,\n ymin=0.05,\n ymax=0.95)\n plt.show()\n\n rmse = math.sqrt(\n np.mean((data[offset + primelen:offset + primelen + rmselen] -\n results[:rmselen])**2))\n print(\"RMSE on {} predictions (shaded area): {}\".format(rmselen, rmse))\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.ma.masked_where", "numpy.zeros", "numpy.timedelta64", "numpy.reshape", "numpy.random.choice", "matplotlib.pyplot.axvspan", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.text", "numpy.random.rand", "matplotlib.pyplot.plot", "matplotlib.transforms.blended_transform_factory", "matplotlib.pyplot.fill_between", "numpy.mean" ] ]
josepablocam/common-code-extraction
[ "a6978fae73eee8ece6f1db09f2f38cf92f03b3ad" ]
[ "downloaded_kernels/loan_data/parsed_kernels/kernel_107.py" ]
[ "\n# coding: utf-8\n\n# **Introduction**\n# In this post, you will discover the Keras Python library that provides a clean and convenient way to create a range of deep learning models on top of Theano or TensorFlow.\n# \n# All creidts to -- \"http://machinelearningmastery.com/tutorial-first-neural-network-python-keras/\"\n# \n# Let’s get started.\n\n# **Dependencies**\n# \n# All important libraries and data set are imported below\n# \n# **Python**\n# \n# Please run this script in Python 2 \n\n# In[ ]:\n\n\nimport os, sys, re\n#import cPickle as pickle\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nimport time\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nprint (time.time())\ndataset = pd.read_csv('../input/loan.csv', low_memory=False)\n\n\n# Replace all the missing entries with zeros\n\n# In[ ]:\n\n\ndataset = dataset.fillna(0) ## filling missing values with zeros\n\n\n# **Data Modification**\n# \n# Convert all kind of categorical data into integral values accordingly and 'Date Column' into real values'\n\n# In[ ]:\n\n\ndataset['application_type'] = dataset['application_type'].astype('category').cat.codes\ndataset['addr_state'] = dataset['addr_state'].astype('category').cat.codes\ndataset['earliest_cr_line'] = pd.to_datetime(dataset['earliest_cr_line'])\ndataset['earliest_cr_line'] = (dataset['earliest_cr_line']-dataset['earliest_cr_line'].min())/np.timedelta64(1,'D')\ndataset['emp_length'] = dataset['emp_length'].astype('category').cat.codes\ndataset['grade'] = dataset['grade'].astype('category').cat.codes\ndataset['home_ownership'] = dataset['home_ownership'].astype('category').cat.codes\ndataset['initial_list_status'] = dataset['initial_list_status'].astype('category').cat.codes\ndataset['issue_d'] = pd.to_datetime(dataset['issue_d'])\ndataset['issue_d'] = (dataset['issue_d']-dataset['issue_d'].min())/np.timedelta64(1,'D')\ndataset['last_credit_pull_d'] = pd.to_datetime(dataset['last_credit_pull_d'])\ndataset['last_credit_pull_d'] = (dataset['last_credit_pull_d']-dataset['last_credit_pull_d'].min())/np.timedelta64(1,'D')\ndataset['last_pymnt_d'] = pd.to_datetime(dataset['last_pymnt_d'])\ndataset['last_pymnt_d'] = (dataset['last_pymnt_d']-dataset['last_pymnt_d'].min())/np.timedelta64(1,'D')\ndataset['loan_status'] = dataset['loan_status'].astype('category').cat.codes\ndataset['next_pymnt_d'] = pd.to_datetime(dataset['next_pymnt_d'])\ndataset['next_pymnt_d'] = (dataset['next_pymnt_d']-dataset['next_pymnt_d'].min())/np.timedelta64(1,'D')\ndataset['purpose'] = dataset['purpose'].astype('category').cat.codes\ndataset['pymnt_plan'] = dataset['pymnt_plan'].astype('category').cat.codes\ndataset['sub_grade'] = dataset['sub_grade'].astype('category').cat.codes\ndataset['term'] = dataset['term'].astype('category').cat.codes\ndataset['verification_status'] = dataset['verification_status'].astype('category').cat.codes\ndataset['verification_status_joint'] = dataset['verification_status_joint'].astype('category').cat.codes\n\n\n# Storing non numeric or non real columns name in non_numerics array\n\n# In[ ]:\n\n\nnon_numerics = [x for x in dataset.columns if not (dataset[x].dtype == np.float64 or dataset[x].dtype == np.int8 or dataset[x].dtype == np.int64)]\n\n\n# Droping non_numerics column for easy modeling\n\n# In[ ]:\n\n\ndf = dataset\ndf = df.drop(non_numerics,1)\n\n\n# Converting 'loan result status' into two categories 0 and 1. 0 means loan failed or that type of person should not be given loan in future and 1 means loan passed i.e. they are good for extending the loan.\n\n# In[ ]:\n\n\ndef LoanResult(status):\n if (status == 5) or (status == 1) or (status == 7):\n return 1\n else:\n return 0\n\ndf['loan_status'] = df['loan_status'].apply(LoanResult)\n\n\n# Splitting data into train data and test data with the help of scikit library in the ratio of 3:1\n\n# In[ ]:\n\n\ntrain, test = train_test_split(df, test_size = 0.25)\n\n##running complete data set will take a lot of time, hence reduced the data set\nX_train = train.drop('loan_status',1).values[0:50000, :]\nY_train = train['loan_status'].values[0:50000]\n\nX_test = test.drop('loan_status',1).values[0:1000, :]\nY_test = test['loan_status'].values[0:1000]\n\nX_pred = test.drop('loan_status',1).values[1001:2000, :]\n\n\n# Setting the seed for pseudo random numbers generation\n\n# In[ ]:\n\n\nseed = 8 \nnp.random.seed(seed)\n\n\n# Now we will define a three layered neural network model. We create a Sequential model and add layers one at a time until we are happy with our network topology. After that we will set activation function and number of nets in each layer. These are done by heuristics and training the model several times.\n\n# In[ ]:\n\n\n# Create the model \nmodel = Sequential()\n\n# Define the three layered model\nmodel.add(Dense(110, input_dim = 68, kernel_initializer = \"uniform\", activation = \"relu\"))\nmodel.add(Dense(110, kernel_initializer = \"uniform\", activation = \"relu\"))\nmodel.add(Dense(1, kernel_initializer = \"uniform\", activation = \"sigmoid\"))\n\n\n# Now we will compile the model. In this we have to input three parameters viz. loss function, optimizer function and an evaluation metrics. These choices are again by heuristics. Here we are using \"binary_crossentropy\" as loss func, \"adam\" as optimizer func and \"accuracy\" as evaluation metrics.\n\n# In[ ]:\n\n\n#\n# Compile the model\nmodel.compile(loss=\"binary_crossentropy\", optimizer= \"adam\", metrics=['accuracy'])\n#\n\n\n# Now we have to fit the data into our model. \n# We can train or fit our model on our loaded data by calling the fit() function on the model.\n# \n# The training process will run for a fixed number of iterations through the dataset called epochs, that we must specify using the **epochs** argument. We can also set the number of instances that are evaluated before a weight update in the network is performed, called the batch size and set using the **batch_size** argument.\n\n# In[ ]:\n\n\n# Fit the model\nmodel.fit(X_train, Y_train, epochs= 50, batch_size=200)\n\n\n# **Evaluate Model**\n# \n# We have trained our neural network on the entire dataset and we can evaluate the performance of the network on the test dataset.\n\n# In[ ]:\n\n\nperformance = model.evaluate(X_test, Y_test)\nprint(\"%s: %.2f%%\" % (model.metrics_names[1], performance[1]*100))\n#\n\n\n# **Final Prediction**\n# \n# Predicting using the trained model\n\n# In[ ]:\n\n\n# Predict using the trained model\nprediction = model.predict(X_pred)\nrounded_predictions = [np.round(x) for x in prediction]\nprint(rounded_predictions)\n\n" ]
[ [ "numpy.timedelta64", "pandas.read_csv", "numpy.random.seed", "pandas.to_datetime", "numpy.round", "sklearn.model_selection.train_test_split" ] ]
joelberkeley/GPflow
[ "78230b98f57c64b5ee2932ea0d2752eb9ff102ce" ]
[ "gpflow/models/gpr.py" ]
[ "# Copyright 2016-2020 The GPflow Contributors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional\n\nimport tensorflow as tf\n\nimport gpflow\n\nfrom .. import posteriors\nfrom ..base import InputData, MeanAndVariance, RegressionData\nfrom ..kernels import Kernel\nfrom ..logdensities import multivariate_normal\nfrom ..mean_functions import MeanFunction\nfrom ..utilities.model_utils import add_noise_cov\nfrom .model import GPModel\nfrom .training_mixins import InternalDataTrainingLossMixin\nfrom .util import data_input_to_tensor\n\n\nclass GPR_deprecated(GPModel, InternalDataTrainingLossMixin):\n r\"\"\"\n Gaussian Process Regression.\n\n This is a vanilla implementation of GP regression with a Gaussian\n likelihood. Multiple columns of Y are treated independently.\n\n The log likelihood of this model is given by\n\n .. math::\n \\log p(Y \\,|\\, \\mathbf f) =\n \\mathcal N(Y \\,|\\, 0, \\sigma_n^2 \\mathbf{I})\n\n To train the model, we maximise the log _marginal_ likelihood\n w.r.t. the likelihood variance and kernel hyperparameters theta.\n The marginal likelihood is found by integrating the likelihood\n over the prior, and has the form\n\n .. math::\n \\log p(Y \\,|\\, \\sigma_n, \\theta) =\n \\mathcal N(Y \\,|\\, 0, \\mathbf{K} + \\sigma_n^2 \\mathbf{I})\n \"\"\"\n\n def __init__(\n self,\n data: RegressionData,\n kernel: Kernel,\n mean_function: Optional[MeanFunction] = None,\n noise_variance: float = 1.0,\n ):\n likelihood = gpflow.likelihoods.Gaussian(noise_variance)\n _, Y_data = data\n super().__init__(kernel, likelihood, mean_function, num_latent_gps=Y_data.shape[-1])\n self.data = data_input_to_tensor(data)\n\n # type-ignore is because of changed method signature:\n def maximum_log_likelihood_objective(self) -> tf.Tensor: # type: ignore\n return self.log_marginal_likelihood()\n\n def _add_noise_cov(self, K: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Returns K + σ² I, where σ² is the likelihood noise variance (scalar),\n and I is the corresponding identity matrix.\n \"\"\"\n return add_noise_cov(K, self.likelihood.variance)\n\n def log_marginal_likelihood(self) -> tf.Tensor:\n r\"\"\"\n Computes the log marginal likelihood.\n\n .. math::\n \\log p(Y | \\theta).\n\n \"\"\"\n X, Y = self.data\n K = self.kernel(X)\n ks = self._add_noise_cov(K)\n L = tf.linalg.cholesky(ks)\n m = self.mean_function(X)\n\n # [R,] log-likelihoods for each independent dimension of Y\n log_prob = multivariate_normal(Y, m, L)\n return tf.reduce_sum(log_prob)\n\n def predict_f(\n self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False\n ) -> MeanAndVariance:\n r\"\"\"\n This method computes predictions at X \\in R^{N \\x D} input points\n\n .. math::\n p(F* | Y)\n\n where F* are points on the GP at new data points, Y are noisy observations at training data\n points.\n \"\"\"\n X, Y = self.data\n err = Y - self.mean_function(X)\n\n kmm = self.kernel(X)\n knn = self.kernel(Xnew, full_cov=full_cov)\n kmn = self.kernel(X, Xnew)\n kmm_plus_s = self._add_noise_cov(kmm)\n\n conditional = gpflow.conditionals.base_conditional\n f_mean_zero, f_var = conditional(\n kmn, kmm_plus_s, knn, err, full_cov=full_cov, white=False\n ) # [N, P], [N, P] or [P, N, N]\n f_mean = f_mean_zero + self.mean_function(Xnew)\n return f_mean, f_var\n\n\nclass GPR_with_posterior(GPR_deprecated):\n \"\"\"\n This is an implementation of GPR that provides a posterior() method that\n enables caching for faster subsequent predictions.\n \"\"\"\n\n def posterior(\n self,\n precompute_cache: posteriors.PrecomputeCacheType = posteriors.PrecomputeCacheType.TENSOR,\n ) -> posteriors.GPRPosterior:\n \"\"\"\n Create the Posterior object which contains precomputed matrices for\n faster prediction.\n\n precompute_cache has three settings:\n\n - `PrecomputeCacheType.TENSOR` (or `\"tensor\"`): Precomputes the cached\n quantities and stores them as tensors (which allows differentiating\n through the prediction). This is the default.\n - `PrecomputeCacheType.VARIABLE` (or `\"variable\"`): Precomputes the cached\n quantities and stores them as variables, which allows for updating\n their values without changing the compute graph (relevant for AOT\n compilation).\n - `PrecomputeCacheType.NOCACHE` (or `\"nocache\"` or `None`): Avoids\n immediate cache computation. This is useful for avoiding extraneous\n computations when you only want to call the posterior's\n `fused_predict_f` method.\n \"\"\"\n\n return posteriors.GPRPosterior(\n kernel=self.kernel,\n data=self.data,\n likelihood_variance=self.likelihood.variance,\n mean_function=self.mean_function,\n precompute_cache=precompute_cache,\n )\n\n def predict_f(\n self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False\n ) -> MeanAndVariance:\n \"\"\"\n For backwards compatibility, GPR's predict_f uses the fused (no-cache)\n computation, which is more efficient during training.\n\n For faster (cached) prediction, predict directly from the posterior object, i.e.,:\n model.posterior().predict_f(Xnew, ...)\n \"\"\"\n return self.posterior(posteriors.PrecomputeCacheType.NOCACHE).fused_predict_f(\n Xnew, full_cov=full_cov, full_output_cov=full_output_cov\n )\n\n\nclass GPR(GPR_with_posterior):\n # subclassed to ensure __class__ == \"GPR\"\n pass\n" ]
[ [ "tensorflow.linalg.cholesky", "tensorflow.reduce_sum" ] ]
gkuwanto/RBL_SK5003
[ "7da3b95afeca2aa3413993279fe13d6bb71cd5db" ]
[ "main.py" ]
[ "import argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom lib.file_parser import read_csv\n\n# Set random seed\nnp.random.seed(42)\n\n# Menambahkan argumen dalam cli\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-obj\", \"--objective\",\n help=\"Specify objective function of the optimization. \"\n \"Only MSE and MAE is available\",\n default='MSE')\nparser.add_argument(\"-n_stop\", \"--n_stop\",\n help=\"Specify number of epoch with no improvement \"\n \"as stopping criteria\",\n default=5)\nargs = parser.parse_args()\n\n# Memastikan Nilai input argument\nif (args.objective) not in [\"MSE\", \"MAE\"]:\n raise ValueError(\n f\"Objective Function {args.objective} is not implemented yet\")\n\n# Membaca Data dari file\ndata_train = read_csv('data/train.csv')\ndata_test = read_csv('data/test.csv')\n\nheaders = data_train.keys()\n\n# Mengubah data dari string menjadi float\nfor col in headers:\n if col in ['date', 'id']:\n continue\n data_train[col] = [float(i) for i in data_train[col]]\n if col != 'price':\n data_test[col] = [float(i) for i in data_test[col]]\n\n\ndef onehotencode(data, col):\n key = np.unique(np.array(data[col]))\n inspect = data[col]\n value = [[0 for i in range(len(inspect))] for i in range(len(key))]\n mydict = dict(zip(key, value))\n for i in key:\n for j in range(len(inspect)):\n if inspect[j] == i:\n mydict[i][j] = 1\n del data[col]\n return {**data, **mydict}, key\n\n\ndata_train, key = onehotencode(data_train, 'zipcode')\ndata_test, _ = onehotencode(data_test, 'zipcode')\nheaders = list(headers) + list(key)\n\n# Membaca Data menjadi format numpy\nX_train = np.array([data_train[col] for col in headers\n if col not in ['date', 'id', 'price', 'zipcode']]).T\ny_train = np.array(data_train['price'])\n\n# Preprocessing X_train\nX_mean = np.mean(X_train, axis=0)\nX_std = np.std(X_train, axis=0)\nX_train = (X_train - X_mean)/X_std\nX_train = np.concatenate([X_train, np.ones(X_train[:, 0:1].shape)], axis=1)\n\n# Preprocessing y_train\ny_mean = np.mean(y_train)\ny_std = np.std(y_train)\ny_train = (y_train - y_mean)/y_std\n\n\n# Inisialisasi Parameter Model\ntheta = np.random.normal(size=X_train[0, :].shape)\n\n# Penentuan Loss dan Gradient tergantung input\nif args.objective == 'MSE':\n def loss(x, y, theta): return 0.5 * (np.dot(theta, x) - y)**2\n def gradient(x, y, theta): return (np.dot(theta, x) - y) * x\nelif args.objective == 'MAE':\n def loss(x, y, theta): return (np.abs(np.dot(theta, x)-y))\n def gradient(x, y, theta): return np.sign(np.dot(theta, x)-y) * x\n\n\n# Memulai Proses SGD\nn_epoch = 0\nstopping_criteria = False\neta = 0.00005\nloss_epoch = []\nwhile not stopping_criteria:\n epoch = n_epoch\n total_loss = 0\n total_sample = len(y_train)\n random_seq = np.random.permutation(np.arange(total_sample))\n for i in random_seq:\n total_loss += loss(X_train[i], y_train[i], theta)\n grad = gradient(X_train[i], y_train[i], theta)\n theta = theta - (eta*grad)\n loss_epoch.append(total_loss/total_sample)\n print(f\"Epoch: {epoch} \\t Loss: {total_loss/total_sample}\")\n if n_epoch > args.n_stop:\n is_lower_count = 0\n for i in range(-1, -1*args.n_stop, -1):\n if (loss_epoch[i] - loss_epoch[i-1]) < -1e-5:\n is_lower_count += 1\n stopping_criteria = (is_lower_count == 0)\n n_epoch += 1\n\n\n# Melakukan Preprocessing pada Dataset Test\nX_test = np.array([data_test[col] for col in headers\n if col not in ['date', 'id', 'price', 'zipcode']]).T\nX_test = (X_test - X_mean)/X_std\nX_test = np.concatenate([X_test, np.ones(X_test[:, 0:1].shape)], axis=1)\n\n# Menghitung Prediksi\ny_pred = np.dot(theta, X_test.T)\n# Melakukan postprocessing pada hasil prediksi\ny_pred = y_pred * y_std + y_mean\n\n# Menulis Hasil Prediksi pada suatu file\nwith open('data/prediction.csv', 'w+') as f:\n f.write('id,price\\n')\n for id, price in zip(data_test['id'], y_pred):\n f.write(f'{id},{price}\\n')\n\n# TODO: Evaluate Result\n# Read Prediction\nprediction = read_csv('data/prediction.csv')\nprice_pred = np.array(prediction['price']).astype(float)\n\n# Read True Label\ntrue_label = read_csv('data/test_true_price.csv')\nprice_true = np.array(true_label['price']).astype(float)\n\n# Print MSE / MAE\nMSE_result = np.mean((price_true - price_pred) ** 2)\nMAE_result = np.mean(np.abs(price_true - price_pred))\nprint(\"MSE :\", MSE_result)\nprint(\"MAE :\", MAE_result)\n\nX_headers = [col for col in headers\n if col not in ['date', 'id', 'price', 'zipcode']]\n\ny = y_train\nfor i in range(len(X_headers)):\n x = []\n x_mean = []\n for j in range(len(X_train)):\n input_mean = np.mean(X_train, axis=0)\n input_mean[i] = X_train[j][i]\n\n x.append(X_train[j][i])\n x_mean.append(input_mean)\n y_pred_mean = np.dot(theta, np.array(x_mean).T)\n minimum = min(y_pred_mean)\n maximum = max(y_pred_mean)\n\n plt.figure()\n plt.scatter(x, y)\n plt.ylim([minimum-1, maximum+5])\n plt.plot(x, y_pred_mean, color='r', linewidth=1.5)\n plt.xlabel(X_headers[i])\n plt.ylabel('price')\n plt.savefig(str(X_headers[i]) + ' to price.png')\n plt.close()\n\nplt.figure()\nplt.scatter(range(len(loss_epoch)), loss_epoch)\nplt.xticks(range(len(loss_epoch)))\nplt.xlabel('Epoch')\nplt.ylabel('Loss')\nplt.savefig('loss to epoch.png')\n" ]
[ [ "numpy.ones", "matplotlib.pyplot.plot", "matplotlib.pyplot.scatter", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "numpy.random.seed", "numpy.random.normal", "numpy.abs", "numpy.arange", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.close", "numpy.array", "numpy.std", "numpy.dot", "matplotlib.pyplot.xlabel", "numpy.mean" ] ]
kuonangzhe/gluon-cv
[ "f7d74019210dd0f22cb4543f061339533301c487" ]
[ "gluoncv/model_zoo/yolo/yolo3.py" ]
[ "\"\"\"You Only Look Once Object Detection v3\"\"\"\n# pylint: disable=arguments-differ\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport os\nimport numpy as np\nimport mxnet as mx\nfrom mxnet import gluon\nfrom mxnet import autograd\nfrom mxnet.gluon import nn\nfrom .darknet import _conv2d, darknet53\nfrom .yolo_target import YOLOV3TargetMerger\nfrom ...loss import YOLOV3Loss\n\n__all__ = ['YOLOV3', 'get_yolov3',\n 'yolo3_darknet53_voc', 'yolo3_darknet53_coco', 'yolo3_darknet53_custom']\n\ndef _upsample(x, stride=2):\n \"\"\"Simple upsampling layer by stack pixel alongside horizontal and vertical directions.\n\n Parameters\n ----------\n x : mxnet.nd.NDArray or mxnet.symbol.Symbol\n The input array.\n stride : int, default is 2\n Upsampling stride\n\n \"\"\"\n return x.repeat(axis=-1, repeats=stride).repeat(axis=-2, repeats=stride)\n\n\nclass YOLOOutputV3(gluon.HybridBlock):\n \"\"\"YOLO output layer V3.\n\n Parameters\n ----------\n index : int\n Index of the yolo output layer, to avoid naming confliction only.\n num_class : int\n Number of foreground objects.\n anchors : iterable\n The anchor setting. Reference: https://arxiv.org/pdf/1804.02767.pdf.\n stride : int\n Stride of feature map.\n alloc_size : tuple of int, default is (128, 128)\n For advanced users. Define `alloc_size` to generate large enough anchor\n maps, which will later saved in parameters. During inference, we support arbitrary\n input image by cropping corresponding area of the anchor map. This allow us\n to export to symbol so we can run it in c++, Scalar, etc.\n\n \"\"\"\n def __init__(self, index, num_class, anchors, stride,\n alloc_size=(128, 128), **kwargs):\n super(YOLOOutputV3, self).__init__(**kwargs)\n anchors = np.array(anchors).astype('float32')\n self._classes = num_class\n self._num_pred = 1 + 4 + num_class # 1 objness + 4 box + num_class\n self._num_anchors = anchors.size // 2\n self._stride = stride\n with self.name_scope():\n all_pred = self._num_pred * self._num_anchors\n self.prediction = nn.Conv2D(all_pred, kernel_size=1, padding=0, strides=1)\n # anchors will be multiplied to predictions\n anchors = anchors.reshape(1, 1, -1, 2)\n self.anchors = self.params.get_constant('anchor_%d'%(index), anchors)\n # offsets will be added to predictions\n grid_x = np.arange(alloc_size[1])\n grid_y = np.arange(alloc_size[0])\n grid_x, grid_y = np.meshgrid(grid_x, grid_y)\n # stack to (n, n, 2)\n offsets = np.concatenate((grid_x[:, :, np.newaxis], grid_y[:, :, np.newaxis]), axis=-1)\n # expand dims to (1, 1, n, n, 2) so it's easier for broadcasting\n offsets = np.expand_dims(np.expand_dims(offsets, axis=0), axis=0)\n self.offsets = self.params.get_constant('offset_%d'%(index), offsets)\n\n def reset_class(self, classes):\n \"\"\"Reset class prediction.\n\n Parameters\n ----------\n classes : type\n Description of parameter `classes`.\n\n Returns\n -------\n type\n Description of returned object.\n\n \"\"\"\n self._clear_cached_op()\n self._classes = len(classes)\n self._num_pred = 1 + 4 + len(classes)\n all_pred = self._num_pred * self._num_anchors\n # TODO(zhreshold): reuse box preds, objectness\n self.prediction = nn.Conv2D(\n all_pred, kernel_size=1, padding=0, strides=1, prefix=self.prediction.prefix)\n\n\n def hybrid_forward(self, F, x, anchors, offsets):\n \"\"\"Hybrid Foward of YOLOV3Output layer.\n\n Parameters\n ----------\n F : mxnet.nd or mxnet.sym\n `F` is mxnet.sym if hybridized or mxnet.nd if not.\n x : mxnet.nd.NDArray\n Input feature map.\n anchors : mxnet.nd.NDArray\n Anchors loaded from self, no need to supply.\n offsets : mxnet.nd.NDArray\n Offsets loaded from self, no need to supply.\n\n Returns\n -------\n (tuple of) mxnet.nd.NDArray\n During training, return (bbox, raw_box_centers, raw_box_scales, objness,\n class_pred, anchors, offsets).\n During inference, return detections.\n\n \"\"\"\n # prediction flat to (batch, pred per pixel, height * width)\n pred = self.prediction(x).reshape((0, self._num_anchors * self._num_pred, -1))\n # transpose to (batch, height * width, num_anchor, num_pred)\n pred = pred.transpose(axes=(0, 2, 1)).reshape((0, -1, self._num_anchors, self._num_pred))\n # components\n raw_box_centers = pred.slice_axis(axis=-1, begin=0, end=2)\n raw_box_scales = pred.slice_axis(axis=-1, begin=2, end=4)\n objness = pred.slice_axis(axis=-1, begin=4, end=5)\n class_pred = pred.slice_axis(axis=-1, begin=5, end=None)\n\n # valid offsets, (1, 1, height, width, 2)\n offsets = F.slice_like(offsets, x * 0, axes=(2, 3))\n # reshape to (1, height*width, 1, 2)\n offsets = offsets.reshape((1, -1, 1, 2))\n\n box_centers = F.broadcast_add(F.sigmoid(raw_box_centers), offsets) * self._stride\n box_scales = F.broadcast_mul(F.exp(raw_box_scales), anchors)\n confidence = F.sigmoid(objness)\n class_score = F.broadcast_mul(F.sigmoid(class_pred), confidence)\n wh = box_scales / 2.0\n bbox = F.concat(box_centers - wh, box_centers + wh, dim=-1)\n\n if autograd.is_training():\n # during training, we don't need to convert whole bunch of info to detection results\n return (bbox.reshape((0, -1, 4)), raw_box_centers, raw_box_scales,\n objness, class_pred, anchors, offsets)\n\n # prediction per class\n bboxes = F.tile(bbox, reps=(self._classes, 1, 1, 1, 1))\n scores = F.transpose(class_score, axes=(3, 0, 1, 2)).expand_dims(axis=-1)\n ids = F.broadcast_add(scores * 0, F.arange(0, self._classes).reshape((0, 1, 1, 1, 1)))\n detections = F.concat(ids, scores, bboxes, dim=-1)\n # reshape to (B, xx, 6)\n detections = F.reshape(detections.transpose(axes=(1, 0, 2, 3, 4)), (0, -1, 6))\n return detections\n\n\nclass YOLODetectionBlockV3(gluon.HybridBlock):\n \"\"\"YOLO V3 Detection Block which does the following:\n\n - add a few conv layers\n - return the output\n - have a branch that do yolo detection.\n\n Parameters\n ----------\n channel : int\n Number of channels for 1x1 conv. 3x3 Conv will have 2*channel.\n num_sync_bn_devices : int, default is -1\n Number of devices for training. If `num_sync_bn_devices < 2`, SyncBatchNorm is disabled.\n\n \"\"\"\n def __init__(self, channel, num_sync_bn_devices=-1, **kwargs):\n super(YOLODetectionBlockV3, self).__init__(**kwargs)\n assert channel % 2 == 0, \"channel {} cannot be divided by 2\".format(channel)\n with self.name_scope():\n self.body = nn.HybridSequential(prefix='')\n for _ in range(2):\n # 1x1 reduce\n self.body.add(_conv2d(channel, 1, 0, 1, num_sync_bn_devices))\n # 3x3 expand\n self.body.add(_conv2d(channel * 2, 3, 1, 1, num_sync_bn_devices))\n self.body.add(_conv2d(channel, 1, 0, 1, num_sync_bn_devices))\n self.tip = _conv2d(channel * 2, 3, 1, 1, num_sync_bn_devices)\n\n # pylint: disable=unused-argument\n def hybrid_forward(self, F, x):\n route = self.body(x)\n tip = self.tip(route)\n return route, tip\n\n\nclass YOLOV3(gluon.HybridBlock):\n \"\"\"YOLO V3 detection network.\n Reference: https://arxiv.org/pdf/1804.02767.pdf.\n\n Parameters\n ----------\n stages : mxnet.gluon.HybridBlock\n Staged feature extraction blocks.\n For example, 3 stages and 3 YOLO output layers are used original paper.\n channels : iterable\n Number of conv channels for each appended stage.\n `len(channels)` should match `len(stages)`.\n num_class : int\n Number of foreground objects.\n anchors : iterable\n The anchor setting. `len(anchors)` should match `len(stages)`.\n strides : iterable\n Strides of feature map. `len(strides)` should match `len(stages)`.\n alloc_size : tuple of int, default is (128, 128)\n For advanced users. Define `alloc_size` to generate large enough anchor\n maps, which will later saved in parameters. During inference, we support arbitrary\n input image by cropping corresponding area of the anchor map. This allow us\n to export to symbol so we can run it in c++, Scalar, etc.\n nms_thresh : float, default is 0.45.\n Non-maximum suppression threshold. You can speficy < 0 or > 1 to disable NMS.\n nms_topk : int, default is 400\n Apply NMS to top k detection results, use -1 to disable so that every Detection\n result is used in NMS.\n post_nms : int, default is 100\n Only return top `post_nms` detection results, the rest is discarded. The number is\n based on COCO dataset which has maximum 100 objects per image. You can adjust this\n number if expecting more objects. You can use -1 to return all detections.\n pos_iou_thresh : float, default is 1.0\n IOU threshold for true anchors that match real objects.\n 'pos_iou_thresh < 1' is not implemented.\n ignore_iou_thresh : float\n Anchors that has IOU in `range(ignore_iou_thresh, pos_iou_thresh)` don't get\n penalized of objectness score.\n num_sync_bn_devices : int, default is -1\n Number of devices for training. If `num_sync_bn_devices < 2`, SyncBatchNorm is disabled.\n\n \"\"\"\n def __init__(self, stages, channels, anchors, strides, classes, alloc_size=(128, 128),\n nms_thresh=0.45, nms_topk=400, post_nms=100, pos_iou_thresh=1.0,\n ignore_iou_thresh=0.7, num_sync_bn_devices=-1, **kwargs):\n super(YOLOV3, self).__init__(**kwargs)\n self._classes = classes\n self.nms_thresh = nms_thresh\n self.nms_topk = nms_topk\n self.post_nms = post_nms\n self._pos_iou_thresh = pos_iou_thresh\n self._ignore_iou_thresh = ignore_iou_thresh\n if pos_iou_thresh >= 1:\n self._target_generator = YOLOV3TargetMerger(len(classes), ignore_iou_thresh)\n else:\n raise NotImplementedError(\n \"pos_iou_thresh({}) < 1.0 is not implemented!\".format(pos_iou_thresh))\n self._loss = YOLOV3Loss()\n with self.name_scope():\n self.stages = nn.HybridSequential()\n self.transitions = nn.HybridSequential()\n self.yolo_blocks = nn.HybridSequential()\n self.yolo_outputs = nn.HybridSequential()\n # note that anchors and strides should be used in reverse order\n for i, stage, channel, anchor, stride in zip(\n range(len(stages)), stages, channels, anchors[::-1], strides[::-1]):\n self.stages.add(stage)\n block = YOLODetectionBlockV3(channel, num_sync_bn_devices)\n self.yolo_blocks.add(block)\n output = YOLOOutputV3(i, len(classes), anchor, stride, alloc_size=alloc_size)\n self.yolo_outputs.add(output)\n if i > 0:\n self.transitions.add(_conv2d(channel, 1, 0, 1, num_sync_bn_devices))\n\n @property\n def num_class(self):\n \"\"\"Number of (non-background) categories.\n\n Returns\n -------\n int\n Number of (non-background) categories.\n\n \"\"\"\n return self._num_class\n\n @property\n def classes(self):\n \"\"\"Return names of (non-background) categories.\n\n Returns\n -------\n iterable of str\n Names of (non-background) categories.\n\n \"\"\"\n return self._classes\n\n def hybrid_forward(self, F, x, *args):\n \"\"\"YOLOV3 network hybrid forward.\n\n Parameters\n ----------\n F : mxnet.nd or mxnet.sym\n `F` is mxnet.sym if hybridized or mxnet.nd if not.\n x : mxnet.nd.NDArray\n Input data.\n *args : optional, mxnet.nd.NDArray\n During training, extra inputs are required:\n (gt_boxes, obj_t, centers_t, scales_t, weights_t, clas_t)\n These are generated by YOLOV3PrefetchTargetGenerator in dataloader transform function.\n\n Returns\n -------\n (tuple of) mxnet.nd.NDArray\n During inference, return detections in shape (B, N, 6)\n with format (cid, score, xmin, ymin, xmax, ymax)\n During training, return losses only: (obj_loss, center_loss, scale_loss, cls_loss).\n\n\n \"\"\"\n all_box_centers = []\n all_box_scales = []\n all_objectness = []\n all_class_pred = []\n all_anchors = []\n all_offsets = []\n all_feat_maps = []\n all_detections = []\n routes = []\n for stage, block, output in zip(self.stages, self.yolo_blocks, self.yolo_outputs):\n x = stage(x)\n routes.append(x)\n\n # the YOLO output layers are used in reverse order, i.e., from very deep layers to shallow\n for i, block, output in zip(range(len(routes)), self.yolo_blocks, self.yolo_outputs):\n x, tip = block(x)\n if autograd.is_training():\n dets, box_centers, box_scales, objness, class_pred, anchors, offsets = output(tip)\n all_box_centers.append(box_centers.reshape((0, -3, -1)))\n all_box_scales.append(box_scales.reshape((0, -3, -1)))\n all_objectness.append(objness.reshape((0, -3, -1)))\n all_class_pred.append(class_pred.reshape((0, -3, -1)))\n all_anchors.append(anchors)\n all_offsets.append(offsets)\n # here we use fake featmap to reduce memory consuption, only shape[2, 3] is used\n fake_featmap = F.zeros_like(tip.slice_axis(\n axis=0, begin=0, end=1).slice_axis(axis=1, begin=0, end=1))\n all_feat_maps.append(fake_featmap)\n else:\n dets = output(tip)\n all_detections.append(dets)\n if i >= len(routes) - 1:\n break\n # add transition layers\n x = self.transitions[i](x)\n # upsample feature map reverse to shallow layers\n upsample = _upsample(x, stride=2)\n route_now = routes[::-1][i + 1]\n x = F.concat(F.slice_like(upsample, route_now * 0, axes=(2, 3)), route_now, dim=1)\n\n if autograd.is_training():\n # during training, the network behaves differently since we don't need detection results\n if autograd.is_recording():\n # generate losses and return them directly\n box_preds = F.concat(*all_detections, dim=1)\n all_preds = [F.concat(*p, dim=1) for p in [\n all_objectness, all_box_centers, all_box_scales, all_class_pred]]\n all_targets = self._target_generator(box_preds, *args)\n return self._loss(*(all_preds + all_targets))\n\n # return raw predictions, this is only used in DataLoader transform function.\n return (F.concat(*all_detections, dim=1), all_anchors, all_offsets, all_feat_maps,\n F.concat(*all_box_centers, dim=1), F.concat(*all_box_scales, dim=1),\n F.concat(*all_objectness, dim=1), F.concat(*all_class_pred, dim=1))\n\n # concat all detection results from different stages\n result = F.concat(*all_detections, dim=1)\n # apply nms per class\n if self.nms_thresh > 0 and self.nms_thresh < 1:\n result = F.contrib.box_nms(\n result, overlap_thresh=self.nms_thresh, valid_thresh=0.01,\n topk=self.nms_topk, id_index=0, score_index=1, coord_start=2, force_suppress=False)\n if self.post_nms > 0:\n result = result.slice_axis(axis=1, begin=0, end=self.post_nms)\n ids = result.slice_axis(axis=-1, begin=0, end=1)\n scores = result.slice_axis(axis=-1, begin=1, end=2)\n bboxes = result.slice_axis(axis=-1, begin=2, end=None)\n return ids, scores, bboxes\n\n def set_nms(self, nms_thresh=0.45, nms_topk=400, post_nms=100):\n \"\"\"Set non-maximum suppression parameters.\n\n Parameters\n ----------\n nms_thresh : float, default is 0.45.\n Non-maximum suppression threshold. You can speficy < 0 or > 1 to disable NMS.\n nms_topk : int, default is 400\n Apply NMS to top k detection results, use -1 to disable so that every Detection\n result is used in NMS.\n post_nms : int, default is 100\n Only return top `post_nms` detection results, the rest is discarded. The number is\n based on COCO dataset which has maximum 100 objects per image. You can adjust this\n number if expecting more objects. You can use -1 to return all detections.\n\n Returns\n -------\n None\n\n \"\"\"\n self._clear_cached_op()\n self.nms_thresh = nms_thresh\n self.nms_topk = nms_topk\n self.post_nms = post_nms\n\n def reset_class(self, classes):\n \"\"\"Reset class categories and class predictors.\n\n Parameters\n ----------\n classes : iterable of str\n The new categories. ['apple', 'orange'] for example.\n\n \"\"\"\n self._clear_cached_op()\n self._classes = classes\n if self._pos_iou_thresh >= 1:\n self._target_generator = YOLOV3TargetMerger(len(classes), self._ignore_iou_thresh)\n for outputs in self.yolo_outputs:\n outputs.reset_class(classes)\n\ndef get_yolov3(name, stages, filters, anchors, strides, classes,\n dataset, pretrained=False, ctx=mx.cpu(),\n root=os.path.join('~', '.mxnet', 'models'), **kwargs):\n \"\"\"Get YOLOV3 models.\n\n Parameters\n ----------\n name : str or None\n Model name, if `None` is used, you must specify `features` to be a `HybridBlock`.\n stages : iterable of str or `HybridBlock`\n List of network internal output names, in order to specify which layers are\n used for predicting bbox values.\n If `name` is `None`, `features` must be a `HybridBlock` which generate mutliple\n outputs for prediction.\n filters : iterable of float or None\n List of convolution layer channels which is going to be appended to the base\n network feature extractor. If `name` is `None`, this is ignored.\n sizes : iterable fo float\n Sizes of anchor boxes, this should be a list of floats, in incremental order.\n The length of `sizes` must be len(layers) + 1. For example, a two stage SSD\n model can have ``sizes = [30, 60, 90]``, and it converts to `[30, 60]` and\n `[60, 90]` for the two stages, respectively. For more details, please refer\n to original paper.\n ratios : iterable of list\n Aspect ratios of anchors in each output layer. Its length must be equals\n to the number of SSD output layers.\n steps : list of int\n Step size of anchor boxes in each output layer.\n classes : iterable of str\n Names of categories.\n dataset : str\n Name of dataset. This is used to identify model name because models trained on\n differnet datasets are going to be very different.\n pretrained : bool or str\n Boolean value controls whether to load the default pretrained weights for model.\n String value represents the hashtag for a certain version of pretrained weights.\n pretrained_base : bool or str, optional, default is True\n Load pretrained base network, the extra layers are randomized. Note that\n if pretrained is `Ture`, this has no effect.\n ctx : mxnet.Context\n Context such as mx.cpu(), mx.gpu(0).\n root : str\n Model weights storing path.\n\n Returns\n -------\n HybridBlock\n A YOLOV3 detection network.\n \"\"\"\n net = YOLOV3(stages, filters, anchors, strides, classes=classes, **kwargs)\n if pretrained:\n from ..model_store import get_model_file\n full_name = '_'.join(('yolo3', name, dataset))\n net.load_params(get_model_file(full_name, tag=pretrained, root=root), ctx=ctx)\n return net\n\ndef yolo3_darknet53_voc(pretrained_base=True, pretrained=False, num_sync_bn_devices=-1, **kwargs):\n \"\"\"YOLO3 multi-scale with darknet53 base network on VOC dataset.\n\n Parameters\n ----------\n pretrained_base : bool or str\n Boolean value controls whether to load the default pretrained weights for model.\n String value represents the hashtag for a certain version of pretrained weights.\n pretrained : bool or str\n Boolean value controls whether to load the default pretrained weights for model.\n String value represents the hashtag for a certain version of pretrained weights.\n num_sync_bn_devices : int\n Number of devices for training. If `num_sync_bn_devices < 2`, SyncBatchNorm is disabled.\n\n Returns\n -------\n mxnet.gluon.HybridBlock\n Fully hybrid yolo3 network.\n\n \"\"\"\n from ...data import VOCDetection\n pretrained_base = False if pretrained else pretrained_base\n base_net = darknet53(pretrained=pretrained_base, num_sync_bn_devices=num_sync_bn_devices)\n stages = [base_net.features[:15], base_net.features[15:24], base_net.features[24:]]\n anchors = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]]\n strides = [8, 16, 32]\n classes = VOCDetection.CLASSES\n return get_yolov3(\n 'darknet53', stages, [512, 256, 128], anchors, strides, classes, 'voc',\n pretrained=pretrained, num_sync_bn_devices=num_sync_bn_devices, **kwargs)\n\ndef yolo3_darknet53_coco(pretrained_base=True, pretrained=False, num_sync_bn_devices=-1, **kwargs):\n \"\"\"YOLO3 multi-scale with darknet53 base network on COCO dataset.\n\n Parameters\n ----------\n pretrained_base : boolean\n Whether fetch and load pretrained weights for base network.\n pretrained : bool or str\n Boolean value controls whether to load the default pretrained weights for model.\n String value represents the hashtag for a certain version of pretrained weights.\n num_sync_bn_devices : int, default is -1\n Number of devices for training. If `num_sync_bn_devices < 2`, SyncBatchNorm is disabled.\n\n Returns\n -------\n mxnet.gluon.HybridBlock\n Fully hybrid yolo3 network.\n \"\"\"\n from ...data import COCODetection\n pretrained_base = False if pretrained else pretrained_base\n base_net = darknet53(pretrained=pretrained_base, num_sync_bn_devices=num_sync_bn_devices)\n stages = [base_net.features[:15], base_net.features[15:24], base_net.features[24:]]\n anchors = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]]\n strides = [8, 16, 32]\n classes = COCODetection.CLASSES\n return get_yolov3(\n 'darknet53', stages, [512, 256, 128], anchors, strides, classes, 'coco',\n pretrained=pretrained, num_sync_bn_devices=num_sync_bn_devices, **kwargs)\n\ndef yolo3_darknet53_custom(classes, transfer=None, pretrained_base=True, pretrained=False,\n num_sync_bn_devices=-1, **kwargs):\n \"\"\"YOLO3 multi-scale with darknet53 base network on custom dataset.\n\n Parameters\n ----------\n classes : iterable of str\n Names of custom foreground classes. `len(classes)` is the number of foreground classes.\n transfer : str or None\n If not `None`, will try to reuse pre-trained weights from SSD networks trained on other\n datasets.\n pretrained_base : boolean\n Whether fetch and load pretrained weights for base network.\n num_sync_bn_devices : int, default is -1\n Number of devices for training. If `num_sync_bn_devices < 2`, SyncBatchNorm is disabled.\n\n Returns\n -------\n mxnet.gluon.HybridBlock\n Fully hybrid yolo3 network.\n \"\"\"\n if transfer is None:\n base_net = darknet53(pretrained=pretrained_base, num_sync_bn_devices=num_sync_bn_devices)\n stages = [base_net.features[:15], base_net.features[15:24], base_net.features[24:]]\n anchors = [\n [10, 13, 16, 30, 33, 23],\n [30, 61, 62, 45, 59, 119],\n [116, 90, 156, 198, 373, 326]]\n strides = [8, 16, 32]\n net = get_yolov3(\n 'darknet53', stages, [512, 256, 128], anchors, strides, classes, 'coco',\n pretrained=pretrained, num_sync_bn_devices=num_sync_bn_devices, **kwargs)\n else:\n from ...model_zoo import get_model\n net = get_model('yolo3_darknet53_' + str(transfer), pretrained=True, **kwargs)\n net.reset_class(classes)\n return net\n" ]
[ [ "numpy.arange", "numpy.expand_dims", "numpy.array", "numpy.concatenate", "numpy.meshgrid" ] ]
almarklein/pirt
[ "b43a57aad89ad2638a65e58079153567a49a43f2" ]
[ "pirt/gaussfun.py" ]
[ "\"\"\"\nThe gaussfun module implements functions for diffusion and Gaussian\nderivatives for data of any dimension.\n\nContents of this module:\n\n * gaussiankernel - Create a Gaussian kernel\n * gaussiankernel2 - Create a 2D Gaussian kernel\n * diffusionkernel - Create a discrete analog to the Gaussian kernel\n * gfilter - Filter data with Gaussian (derivative) kernels\n * diffuse - Filter data with true discrete diffusion kernels\n * gfilter2 - Filter data by specifying sigma in world coordinates\n * diffuse2 - Diffuse data by specifying sigma in world coordinates\n\n\"\"\"\n\n# SHORTED VERSION WITHOUT PYRAMID STUFF\n\nimport numpy as np\nimport scipy.ndimage\n\nfrom . import Aarray\n\n\n## Kernels\n\ndef _gaussiankernel(sigma, order, t):\n \"\"\" _gaussiankernel(sigma, order, t)\n Calculate a Gaussian kernel of the given sigma and with the given\n order, using the given t-values. \n \"\"\"\n \n # if sigma 0, kernel is a single 1.\n if sigma==0:\n return np.array([1.0])\n \n # precalculate some stuff\n sigma2 = sigma**2\n sqrt2 = np.sqrt(2)\n \n # Calculate the gaussian, it is unnormalized. We'll normalize at the end.\n basegauss = np.exp(- t**2 / (2*sigma2) )\n \n # Scale the t-vector, what we actually do is H( t/(sigma*sqrt2) ), \n # where H() is the Hermite polynomial. \n x = t / (sigma*sqrt2)\n \n # Depending on the order, calculate the Hermite polynomial (physicists \n # notation). We let Mathematica calculate these, and put the first 20 \n # orders in here. 20 orders should be sufficient for most tasks :)\n if order<0: \n raise Exception(\"The order should not be negative!\") \n elif order==0:\n part = 1\n elif order==1:\n part = 2*x\n elif order==2:\n part = -2 + 4*x**2\n elif order==3:\n part = -12*x + 8*x**3\n elif order==4:\n part = 12 - 48*x**2 + 16*x**4\n elif order==5:\n part = 120*x - 160*x**3 + 32*x**5\n elif order==6:\n part = -120 + 720*x**2 - 480*x**4 + 64*x**6\n elif order==7: \n part = -1680*x + 3360*x**3 - 1344*x**5 + 128*x**7\n elif order==8: \n part = 1680 - 13440*x**2 + 13440*x**4 - 3584*x**6 + 256*x**8\n elif order==9: \n part = 30240*x - 80640*x**3 + 48384*x**5 - 9216*x**7 + 512*x**9\n elif order==10: \n part = (-30240 + 302400*x**2 - 403200*x**4 + 161280*x**6 - 23040*x**8 \n + 1024*x**10)\n elif order==11: \n part = (-665280*x + 2217600*x**3 - 1774080*x**5 + 506880*x**7 \n - 56320*x**9 + 2048*x**11)\n elif order==12: \n part = (665280 - 7983360*x**2 + 13305600*x**4 - 7096320*x**6 \n + 1520640*x**8 - 135168*x**10 + 4096*x**12)\n elif order==13: \n part = (17297280*x - 69189120*x**3 + 69189120*x**5 - 26357760*x**7 \n + 4392960*x**9 - 319488*x**11 + 8192*x**13)\n elif order==14: \n part = (-17297280 + 242161920*x**2 - 484323840*x**4 + 322882560*x**6 \n - 92252160*x**8 + 12300288*x**10 - 745472*x**12 + 16384*x**14)\n elif order==15: \n part = (-518918400*x + 2421619200*x**3 - 2905943040*x**5 \n + 1383782400*x**7 - 307507200*x**9 + 33546240*x**11 \n - 1720320*x**13 + 32768*x**15)\n elif order==16: \n part = (518918400 - 8302694400*x**2 + 19372953600*x**4 \n - 15498362880*x**6 + 5535129600*x**8 \n - 984023040*x**10 + 89456640*x**12 - 3932160*x**14 \n + 65536*x**16) \n else:\n raise Exception(\"This order is not implemented!\")\n \n \n # Apply Hermite polynomial to gauss\n k = (-1)**order * part * basegauss\n \n ## Normalize\n \n # By calculating the normalization factor by integrating the gauss, rather\n # than using the expression 1/(sigma*sqrt(2pi)), we know that the KERNEL\n # volume is 1 when the order is 0.\n norm_default = 1 / basegauss.sum()\n # == 1 / ( sigma * sqrt(2*pi) )\n\n # Here's another normalization term that we need because we use the Hermite\n # polynomials.\n norm_hermite = 1/(sigma*sqrt2)**order\n\n # A note on Gaussian derivatives: as sigma increases, the resulting\n # image (when smoothed) will have smaller intensities. To correct for\n # this (if this is necessary) a diffusion normalization term can be\n # applied: sigma**2\n\n # Normalize and return\n return k * ( norm_default * norm_hermite )\n\n\n\ndef gaussiankernel(sigma, order=0, N=None, returnt=False, warn=True):\n \"\"\" gaussiankernel(sigma, order=0, N=None, returnt=False, warn=True)\n \n Creates a 1D gaussian derivative kernel with the given sigma\n and the given order. (An order of 0 is a \"regular\" Gaussian.)\n \n The returned kernel is a column vector, thus working in the first \n dimension (in images, this often is y). \n \n The returned kernel is odd by default. Using N one can specify the\n full kernel size (if not int, the ceil operator is applied). By \n specifying a negative value for N, the tail length (number of elements\n on both sides of the center element) can be specified.\n The total kernel size than becomes ceil(-N)*2+1. Though the method\n to supply it may be a bit obscure, this measure can be handy, since \n the tail length if often related to the sigma. If not given, the \n optimal N is determined automatically, depending on sigma and order. \n \n If the given scale is a small for the given order, a warning is\n produced (unless warn==True).\n \n ----- Used Literature:\n\n Koenderink, J. J. \n The structure of images. \n Biological Cybernetics 50, 5 (1984), 363-370.\n\n Lindeberg, T. \n Scale-space for discrete signals. \n IEEE Transactions on Pattern Analysis and Machine Intelligence 12, 3 (1990), 234-254.\n\n Ter Haar Romeny, B. M., Niessen, W. J., Wilting, J., and Florack, L. M. J. \n Differential structure of images: Accuracy of representation.\n In First IEEE International Conference on Image Processing, (Austin, TX) (1994).\n \"\"\"\n \n # Check inputs\n if not N:\n # Calculate ratio that is small, but large enough to prevent errors\n ratio = 3 + 0.25 * order - 2.5/((order-6)**2+(order-9)**2)\n # Calculate N\n N = int( np.ceil( ratio*sigma ) ) * 2 + 1\n \n elif N > 0:\n if not isinstance(N, int):\n N = int( np.ceil(N) )\n \n elif N < 0:\n N = -N\n if not isinstance(N, int):\n N = int( np.ceil(N) )\n N = N * 2 + 1\n \n # Check whether given sigma is large enough \n sigmaMin = 0.5 + order**(0.62) / 5\n if warn and sigma < sigmaMin:\n print('WARNING: The scale (sigma) is very small for the given order, '\n 'better use a larger scale!')\n \n # Create t vector which indicates the x-position\n t = np.arange(-N/2.0+0.5, N/2.0, 1.0, dtype=np.float64)\n \n # Get kernel\n k = _gaussiankernel(sigma, order, t)\n \n # Done\n if returnt:\n return k, t\n else:\n return k\n\n\n\ndef gaussiankernel2(sigma, ox, oy, N=None):\n \"\"\" gaussiankernel2(sigma, ox, oy, N=-3*sigma)\n Create a 2D Gaussian kernel.\n \"\"\"\n # Default N\n if N is None:\n N = -3*sigma\n \n # Calculate kernels\n k1 = gaussiankernel(sigma, ox, N)\n k2 = gaussiankernel(sigma, oy, N)\n \n # Matrix multiply\n k = np.matrix(k1).T * np.matrix(k2)\n return k.A \n\n\ndef diffusionkernel(sigma, N=4, returnt=False):\n \"\"\" diffusionkernel(sigma, N=4, returnt=False)\n \n A discrete analog to the continuous Gaussian kernel, \n as proposed by Toni Lindeberg.\n \n N is the tail length factor (relative to sigma).\n \n \"\"\"\n \n # Make sure sigma is float\n sigma = float(sigma)\n \n # Often refered to as the scale parameter, or t\n sigma2 = sigma*sigma \n \n # Where we start, from which we go backwards\n # This is also the tail length\n if N > 0:\n nstart = int(np.ceil(N*sigma)) + 1\n else:\n nstart = abs(N) + 1\n \n # Allocate kernel and times\n t = np.arange(-nstart, nstart+1, dtype='float64')\n k = np.zeros_like(t)\n \n # Make a start\n n = nstart # center (t[nstart]==0)\n k[n+nstart] = 0\n n = n-1\n k[n+nstart] = 0.01\n \n # Iterate!\n for n in range(nstart-1,0,-1): \n # Calculate previous\n k[(n-1)+nstart] = 2*n/sigma2 * k[n+nstart] + k[(n+1)+nstart]\n \n # The part at the left can be erroneous, so let's use the right part only\n k[:nstart] = np.flipud(k[-nstart:])\n \n # Remove the tail, which is zero\n k = k[1:-1]\n t = t[1:-1]\n \n # Normalize\n k = k / k.sum()\n \n # the function T that we look for is T = e^(-sigma2) * I(n,sigma2)\n # We found I(n,sigma2) and because we normalized it, the normalization term\n # e^(-sigma2) is no longer necesary.\n \n # Done\n if returnt:\n return k, t\n else:\n return k\n\n\n## Filters\n\ndef gfilter(L, sigma, order=0, mode='constant', warn=True):\n \"\"\" gfilter(L, sigma, order=0, mode='constant', warn=True)\n \n Gaussian filterering and Gaussian derivative filters.\n \n Parameters\n ----------\n L : np.ndarray\n The input data to filter\n sigma : scalar or list-of-scalars\n The smoothing parameter, can be given for each dimension\n order : int or list-of-ints\n The order of the derivative, can be given for each dimension\n mode : {'reflect','constant','nearest','mirror', 'wrap'}\n Determines how edge effects are handled. (see scipy.ndimage.convolve1d)\n warn : boolean\n Whether to show a warning message if the sigma is too small to \n represent the required derivative.\n \n Notes\n =====\n Makes use of the seperability property of the Gaussian by convolving\n 1D kernels in each dimension. \n \n \n Example\n =======\n # Calculate the second order derivative with respect to x (Lx) (if the\n # first dimension of the image is Y).\n result1 = gfilter( im, 2, [0,2] ) \n # Calculate the first order derivative with respect to y and z (Lyz).\n result2 = gfilter( volume, 3, [0,1,1] ) \n \n \"\"\"\n \n # store original\n Lo = L\n \n # make sigma ok\n try:\n sigma = [sig for sig in sigma]\n except TypeError:\n sigma = [sigma for i in range(L.ndim)]\n \n # same for order\n if order is None:\n order = 0\n try:\n order = [o for o in order]\n except TypeError:\n order = [order for i in range(L.ndim)]\n \n # test sigma\n if len(sigma) != L.ndim:\n tmp = \"the amount of sigmas given must match the dimensions of L!\"\n raise Exception(tmp) \n # test order\n if len(order) != L.ndim:\n tmp = \"the amount of sigmas given must match the dimensions of L!\"\n raise Exception(tmp)\n \n for d in range(L.ndim):\n # get kernel\n k = gaussiankernel(sigma[d], order[d], warn=warn)\n # convolve\n L = scipy.ndimage.convolve1d(L, k, d, mode=mode)\n \n \n # Make Aarray if we can\n if hasattr(Lo, 'sampling') and hasattr(Lo, 'origin'):\n L = Aarray(L, Lo.sampling, Lo.origin)\n \n # Done\n return L\n\n\ndef diffuse(L, sigma, mode='nearest'):\n \"\"\" diffuse(L, sigma)\n \n Diffusion using a discrete variant of the diffusion operator. \n \n Parameters\n ----------\n L : np.ndarray\n The input data to filter\n sigma : scalar or list-of-scalars\n The smoothing parameter, can be given for each dimension\n \n Details\n -------\n In the continous domain, the Gaussian is the only true diffusion\n operator. However, by using a sampled Gaussian kernel in the \n discrete domain, errors are introduced, particularly if for\n small sigma. \n \n This implementation uses a a discrete variant of the diffusion\n operator, which is based on modified Bessel functions. This results\n in a better approximation of the diffusion process, particularly\n when applying the diffusion recursively. There are also advantages\n for calculating derivatives, see below.\n \n Based on:\n Lindeberg, T. \"Discrete derivative approximations with scale-space\n properties: A basis for low-level feature extraction\", \n J. of Mathematical Imaging and Vision, 3(4), pp. 349--376, 1993.\n \n Calculating derivatives\n -----------------------\n Because this imeplementation applies diffusion using a discrete \n representation of the diffusion kernel, one can calculate true\n derivatives using small-support derivative operators. For 1D:\n * Lx = 0.5 * ( L[x+1] - L[x-1] )\n * Lxx = L[x+1] - 2*L[x] + L(x-1)\n \n \"\"\"\n \n # Store original\n Lo = L\n \n # Make sigma ok\n try:\n sigma = [sig for sig in sigma]\n except TypeError:\n sigma = [sigma for i in range(L.ndim)]\n \n # Test sigma\n if len(sigma) != L.ndim:\n tmp = \"the amount of sigmas given must match the dimensions of L!\"\n raise Exception(tmp) \n \n # Diffuse\n for d in range(L.ndim):\n # get kernel\n k = diffusionkernel(sigma[d])\n # convolve\n L = scipy.ndimage.convolve1d(L, k, d, mode=mode)\n \n # Make Aarray if we can\n if hasattr(Lo, 'sampling') and hasattr(Lo, 'origin'):\n L = Aarray(L, Lo.sampling, Lo.origin)\n \n # Done\n return L\n\n\ndef gfilter2(L, scale, order=0, mode='reflect', warn=True):\n \"\"\" gfilter2(L, scale, order=0, mode='reflect', warn=True)\n \n Apply Gaussian filtering by specifying a scale in world coordinates\n rather than a sigma. This function determines the sigmas to apply,\n based on the sampling of the elements.\n \n See gfilter for more information.\n \n (If L is not an Aarray, this function yields the same result as gfilter.)\n \n \"\"\"\n \n # Determine sigmas\n if hasattr(L, 'sampling'):\n sigmas = [float(scale)/s for s in L.sampling]\n else:\n sigmas = float(scale)\n \n # Filter\n return gfilter(L, sigmas, order, mode, warn)\n\n\ndef diffuse2(L, scale, mode='nearest'):\n \"\"\" diffuse2(L, scale, mode='nearest')\n \n Apply diffusion by specifying a scale in world coordinates\n rather than a sigma. This function determines the sigmas to apply,\n based on the sampling of the elements.\n \n See diffuse for more information.\n \n (If L is not an Aarray, this function yields the same result as diffuse.)\n \n \"\"\"\n \n # Determine sigmas\n if hasattr(L, 'sampling'):\n sigmas = [float(scale)/s for s in L.sampling]\n else:\n sigmas = float(scale)\n \n # Filter\n return diffuse(L, sigmas, mode)\n\n" ]
[ [ "numpy.zeros_like", "numpy.flipud", "numpy.ceil", "numpy.matrix", "numpy.exp", "numpy.arange", "numpy.sqrt", "numpy.array" ] ]
reichlab/jacques
[ "772e6f69de944f5ce19af16b24dfd76d023861f9" ]
[ "test/test_kernel_smooth_quantile_fn.py" ]
[ "import os\nos.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport unittest\n\nfrom jacques import kernels\n\n\nclass Test_Kernel_Smooth_Quantile_Fn(unittest.TestCase):\n def test_quantile_smooth_bw(self):\n tau = np.concatenate(\n [np.array([0.01, 0.025])] +\n [np.linspace(0.05, 0.95, 19)] +\n [np.array([0.975, 0.99])],\n axis = 0)\n theta_b_raw = np.ones(1)\n theta_b = 0.25 * np.exp(theta_b_raw) / (1.0 + np.exp(theta_b_raw))\n \n lower_tau = tau[:9]\n central_tau = tau[9:(-9)]\n upper_tau = tau[-9:]\n expected = np.concatenate(\n [lower_tau - lower_tau**2 / (4 * theta_b)] +\n [np.full_like(central_tau, theta_b)] +\n [(1 - upper_tau) - (1 - upper_tau)**2 / (4 * theta_b)],\n axis = 0\n )\n \n actual = kernels.quantile_smooth_bw(tf.constant(tau), tf.constant(theta_b_raw))\n \n # actual matches expected\n self.assertTrue(np.all(np.abs(actual.numpy() - expected) < 1e-12))\n \n \n def test_integrated_epanechnikov(self):\n # TODO\n raise NotImplementedError\n \n \n def test_kernel_quantile_fn(self):\n tau = np.concatenate(\n [np.array([0.01, 0.025])] +\n [np.linspace(0.05, 0.95, 19)] +\n [np.array([0.975, 0.99])],\n axis = 0)\n y = np.array(\n [[1.0, 5.0, 8.0],\n [5.0, 2.0, 3.0],\n [11.0, 20.0, 15.0]]\n )\n w = np.array(\n [[[0.1, 0.5, 0.4],\n [0.333, 0.333, 0.334]],\n [[0.3, 0.2, 0.5],\n [0.0, 0.0, 1.0]],\n [[0.4, 0.4, 0.2],\n [0.2, 0.2, 0.6]]]\n )\n y_sorted = np.array(\n [[1.0, 5.0, 8.0],\n [2.0, 3.0, 5.0],\n [11.0, 15.0, 20.0]]\n )\n w_sorted = np.array(\n [[[0.1, 0.5, 0.4],\n [0.333, 0.333, 0.334]],\n [[0.2, 0.5, 0.3],\n [0.0, 1.0, 0.0]],\n [[0.4, 0.2, 0.4],\n [0.2, 0.6, 0.2]]]\n )\n theta_b_raw = np.ones(1)\n bw = kernels.quantile_smooth_bw(tf.constant(tau), tf.constant(theta_b_raw))\n \n expected = np.zeros((3, 2, 23))\n # batch\n for b in range(3):\n # test set observation\n for j in range(2):\n # quantile level\n for k in range(23):\n tau_k = tau[k]\n bw_k = bw[k]\n cw = np.concatenate([np.array([0.0]), np.cumsum(w_sorted[b, j, :])], axis=0)\n # train set observation\n for i in range(3):\n U_im1 = kernels.integrated_epanechnikov(\n (tau_k - cw[i+1-1]) / bw_k\n )\n U_i = kernels.integrated_epanechnikov(\n (tau_k - cw[i+1]) / bw_k\n )\n expected[b, j, k] = expected[b, j, k] + \\\n (U_im1 - U_i) * y_sorted[b, i]\n \n actual = kernels.kernel_quantile_fn(\n tf.constant(y),\n tf.constant(w),\n tf.constant(tau),\n tf.constant(theta_b_raw))\n \n # actual matches expected\n self.assertTrue(np.all(np.abs(actual.numpy() - expected) < 1e-12))\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.ones", "numpy.full_like", "numpy.cumsum", "numpy.zeros", "numpy.exp", "numpy.array", "tensorflow.constant", "numpy.linspace" ] ]
JxTang-bioinformatics/Tangbio
[ "7e0f0ed45371504c65d2a7ed419aed934e26c583" ]
[ "CaMelia model/Feature extraction/unionfeature_for_train.py" ]
[ "# -*- coding: utf-8 -*-\r\nfrom __future__ import division\r\nfrom sys import argv\r\nimport pandas as pd\r\nimport numpy as np\r\nimport os,time\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n###########################################\r\ndef reduce_mem(df):\r\n starttime = time.time()\r\n numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\r\n start_mem = df.memory_usage().sum() / 1024**2\r\n for col in df.columns:\r\n col_type = df[col].dtypes\r\n if col_type in numerics:\r\n c_min = df[col].min()\r\n c_max = df[col].max()\r\n if pd.isnull(c_min) or pd.isnull(c_max):\r\n continue\r\n if str(col_type)[:3] == 'int':\r\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\r\n df[col] = df[col].astype(np.int8)\r\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\r\n df[col] = df[col].astype(np.int16)\r\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\r\n df[col] = df[col].astype(np.int32)\r\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\r\n df[col] = df[col].astype(np.int64)\r\n else:\r\n if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\r\n df[col] = df[col].astype(np.float16)\r\n elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\r\n df[col] = df[col].astype(np.float32)\r\n else:\r\n df[col] = df[col].astype(np.float64)\r\n end_mem = df.memory_usage().sum() / 1024**2\r\n print('-- Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction),time spend:{:2.2f} min'.format(end_mem,\r\n 100*(start_mem-end_mem)/start_mem,\r\n (time.time()-starttime)/60))\r\n return df\r\n\r\n###########################################\r\nif __name__ == '__main__': \r\n \r\n #All data located in the same directory\r\n DataPath = r'%s' % argv[1]\r\n #Input data \r\n InputDataName = '%s' % argv[2] \r\n #neighboring range\r\n neighbor_region = int(argv[3])\r\n \r\n \r\n \r\n gse = DataPath\r\n ff = InputDataName\t \r\n region = neighbor_region \r\n \r\n \r\n name = ff.split('.')[0].split('_')[-1]\r\n \r\n \r\n path = r'%s/%s' % (gse,ff)\r\n data = pd.read_csv(path,header=0,sep='\\t')\r\n data = reduce_mem(data)\r\n if list(data)[0] != 'chrom':\r\n del data['%s' % list(data)[0]]\r\n \r\n cell_num = list(data)[2:( len(list(data))-1 )]\r\n \r\n \r\n file_dir = r'%s/Available_Train_dataset/region%d' % (gse,region)\r\n if not os.path.exists(file_dir):\r\n os.makedirs(file_dir) \r\n \r\n for i in range(len(cell_num)): \r\n #local_methFeature\r\n path = r'%s/region10_localmatched_morethan08/%s.txt' % (gse,cell_num[i])\r\n local_methFeature = pd.read_csv(path,header=0,sep='\\t')\r\n local_methFeature = reduce_mem(local_methFeature)\r\n local_methFeature = local_methFeature.rename(columns={'aver_meth':'local_methFeature'})\r\n local_methFeature = local_methFeature[['chrom','location','local_methFeature']]\r\n\r\n #neighbor_methFeature\r\n path = r'%s/neighbor_methFeature_%d/localRegion_%s/%s_neighbor_methFeature.txt' % (gse,region,region,cell_num[i])\r\n neighbor_methFeature = pd.read_csv(path,header=0,sep='\\t')\r\n neighbor_methFeature = reduce_mem(neighbor_methFeature)\r\n\r\n #merge-[neighbor,local]\r\n data_all = pd.merge(data[['chrom','location','%s' % cell_num[i] ]],local_methFeature)\r\n data_all = pd.merge(data_all,neighbor_methFeature,how='inner',on=['chrom','location'])\r\n \r\n data_all.to_csv(r'%s/%s.txt' % (file_dir,cell_num[i]),sep='\\t',header=True,index=False) \r\n del data_all\r\n" ]
[ [ "pandas.read_csv", "numpy.iinfo", "pandas.merge", "pandas.isnull", "numpy.finfo" ] ]
ganik/DeepSpeed
[ "788e1c40e83beacfc4901e7daa1e097d2efb82bb" ]
[ "tests/unit/test_partition.py" ]
[ "import pytest\n\nimport torch\nimport torch.distributed as dist\n\nfrom deepspeed.runtime.utils import partition_uniform\nfrom deepspeed.runtime.utils import partition_balanced\nfrom deepspeed.runtime.utils import prefix_sum_inc\nfrom deepspeed.runtime.utils import PartitionedTensor\n\nfrom .common import distributed_test\n\n\n@distributed_test(world_size=4)\ndef test_partitioned_tensor():\n world = dist.get_world_size()\n rank = dist.get_rank()\n\n group = dist.new_group(ranks=list(range(world)))\n\n rows = world * 4\n cols = 3\n\n full = torch.rand(rows, cols).cuda()\n dist.broadcast(full, src=0, group=group)\n part = PartitionedTensor(full, group=group)\n\n assert len(part.local_size()) == 1\n assert part.local_size()[0] * world == full.numel()\n\n reconstructed = part.full()\n assert torch.equal(full, reconstructed)\n\n\n@distributed_test(world_size=4)\ndef test_partitioned_tensor_meta():\n world = dist.get_world_size()\n rank = dist.get_rank()\n\n group = dist.new_group(ranks=list(range(world)))\n\n rows = world * 7\n cols = 3\n\n full = torch.rand(rows, cols).cuda()\n dist.broadcast(full, src=0, group=group)\n part = PartitionedTensor(full, group=group)\n\n my_meta = PartitionedTensor.from_meta(part.to_meta(), part.local_data, group)\n assert torch.equal(full, my_meta.full())\n\n\ndef assert_valid_partition(weights, parts, P):\n N = len(weights)\n assert len(parts) == P + 1\n assert parts[0] == 0\n assert parts[P] == N\n for idx in range(P):\n assert parts[idx] <= parts[idx + 1]\n\n\ndef get_partition_weights(weights, parts):\n \"\"\" Return the amount of weight in each partition. \"\"\"\n costs = [0] * (len(parts) - 1)\n P = len(parts) - 1\n for p in range(P):\n start = parts[p]\n stop = parts[p + 1]\n costs[p] = sum(weights[start:stop])\n return costs\n\n\ndef test_prefix_sum():\n x = [3, 4, 5]\n psum = prefix_sum_inc(x)\n assert psum == [3, 7, 12]\n\n\ndef test_valid_partition():\n N = 10\n P = 1\n weights = [1] * N\n parts = partition_balanced(weights, P)\n assert_valid_partition(weights, parts, P)\n\n\ndef test_short_partition_uniform():\n N = 2\n P = 4\n weights = [1] * N\n parts = partition_uniform(len(weights), P)\n assert_valid_partition(weights, parts, P)\n\n\ndef test_short_partition():\n N = 2\n P = 4\n weights = [1] * N\n parts = partition_balanced(weights, P)\n assert_valid_partition(weights, parts, P)\n\n\ndef test_easy_balance_uniform():\n weights = [1] * 8\n P = 4\n parts = partition_uniform(len(weights), P)\n assert_valid_partition(weights, parts, P)\n costs = get_partition_weights(weights, parts)\n assert all(c == 2 for c in costs)\n\n\ndef test_easy_balance_balanced():\n weights = [1] * 8\n P = 4\n parts = partition_balanced(weights, P)\n assert_valid_partition(weights, parts, P)\n costs = get_partition_weights(weights, parts)\n assert all(c == 2 for c in costs), costs\n\n\ndef test_int_balanced():\n weights = [0, 1, 2, 3, 3, 3]\n P = 4\n parts = partition_balanced(weights, P)\n assert parts == [0, 3, 4, 5, 6]\n\n assert_valid_partition(weights, parts, P)\n costs = get_partition_weights(weights, parts)\n assert all(c == 3 for c in costs)\n\n\ndef test_float_balanced():\n weights = [0., 1.1, 1.9, 3., 3., 3.]\n P = 4\n parts = partition_balanced(weights, P)\n assert_valid_partition(weights, parts, P)\n assert parts == [0, 3, 4, 5, 6]\n\n\[email protected](reason=\"Variance-minimizing partitioning returns different result.\")\ndef test_float_lastheavy():\n weights = [0., 1.1, 1.9, 3., 30.]\n P = 2\n parts = partition_balanced(weights, P)\n assert_valid_partition(weights, parts, P)\n assert parts == [0, 4, 5]\n\n\ndef test_float_midheavy():\n weights = [0., 1.1, 30, 3.]\n P = 3\n parts = partition_balanced(weights, P)\n assert_valid_partition(weights, parts, P)\n assert parts == [0, 2, 3, 4]\n\n\ndef test_balance_bert():\n # Parameters per layer for a transformer model with 24 transformers and hidden dim 1024\n weights = [\n 52559872,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 12596224,\n 0,\n 52559872\n ]\n P = 8\n parts = partition_balanced(weights, P)\n assert_valid_partition(weights, parts, P)\n" ]
[ [ "torch.distributed.get_rank", "torch.distributed.get_world_size", "torch.distributed.broadcast", "torch.rand", "torch.equal" ] ]
takumiw/nishika-cable-classification-1st-place
[ "6438c36fa607b79cd1b2dad195881dacc6a48e9d" ]
[ "src/utils_metrics.py" ]
[ "from typing import Dict, List, Optional\n\nimport numpy as np\nfrom sklearn.metrics import accuracy_score, f1_score, log_loss\n\n\ndef calc_metrics(\n y_true: np.ndarray, y_pred: np.ndarray, y_prob: Optional[np.ndarray] = None, metrics: List[str] = [\"loss\"]\n) -> Dict[str, float]:\n result = {}\n for metric in metrics:\n if metric == \"loss\":\n result[\"loss\"] = log_loss(y_true, y_prob)\n elif metric == \"accuracy\":\n result[\"accuracy\"] = accuracy_score(y_true, y_pred)\n elif metric == \"f1\":\n result[\"f1\"] = f1_score(y_true, y_pred, average=\"micro\")\n else:\n raise NameError(f\"metric {metric} is not defined\")\n return result\n" ]
[ [ "sklearn.metrics.f1_score", "sklearn.metrics.accuracy_score", "sklearn.metrics.log_loss" ] ]
DipeshV/olympic-hero
[ "73a82f36afed429b59895c68faffe838b90fc72b" ]
[ "code.py" ]
[ "# --------------\n#Importing header files\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#Path of the file\r\npath\r\n\r\n#Code starts here\r\ndata = pd.read_csv(path)\r\n\r\ndata.rename(columns={'Total':'Total_Medals'}, inplace=True)\r\nprint(data.head())\n\n\n# --------------\n#Code starts here\n\ndata['Better_Event'] = np.where(data['Total_Summer'] > data['Total_Winter'], 'Summer', 'Winter')\ndata['Better_Event'] = np.where(data['Total_Summer'] == data['Total_Winter'], 'Both', data['Better_Event'])\n\nbetter_event = data['Better_Event'].value_counts().index.values[0]\n\n#print(data.head(5))\nprint(better_event)\n\nassert data['Better_Event'].value_counts()['Summer'] == 143, \"Should be 143\"\n\n\n# --------------\n#Code starts here\r\ntop_countries = data[['Country_Name','Total_Summer', 'Total_Winter', 'Total_Medals']]\r\n\r\nprint(top_countries.head(5))\r\n\r\ntop_countries=top_countries[:-1]\r\n\r\ndef top_ten(data,column):\r\n country_list = []\r\n country_list=list((data.nlargest(10,column)['Country_Name']))\r\n return country_list\r\n\r\n\r\ntop_10_summer=top_ten(top_countries,'Total_Summer')\r\nprint(\"Top 10 Summer:\\n\",top_10_summer, \"\\n\")\r\ntop_10_winter=top_ten(top_countries,'Total_Winter')\r\nprint(\"Top 10 Winter:\\n\",top_10_winter, \"\\n\")\r\ntop_10=top_ten(top_countries,'Total_Medals')\r\nprint(\"Top 10 :\\n\",top_10, \"\\n\")\r\n\r\ncommon=list(set(top_10_summer) & set(top_10_winter) & set(top_10))\r\nprint(common)\n\n\n# --------------\n#Code starts here\n\n#Create dataframe anmd plot for Summer Event\nsummer_df = data[data['Country_Name'].isin(top_10_summer)]\n\n#print(summer_df)\n\nplt.figure(figsize=(20,6))\nplt.bar(summer_df['Country_Name'], summer_df['Total_Summer'])\n\nplt.title('Top 10 Summer')\nplt.xlabel('Country Name')\nplt.ylabel('Total Medals')\n\n#Create the dataframe and plot for Winter Event\n\nwinter_df = data[data['Country_Name'].isin(top_10_winter)]\n\n#print(winter_df)\n\nplt.figure(figsize=(20,6))\nplt.bar(winter_df['Country_Name'], winter_df['Total_Winter'])\n\nplt.title('Top 10 Winter')\nplt.xlabel('Country Name')\nplt.ylabel('Total Medals')\n\n#Create the dataframe and plot for Winter Event\n\ntop_df = data[data['Country_Name'].isin(top_10)]\n\n#print(top_df)\n\nplt.figure(figsize=(20,6))\nplt.bar(top_df['Country_Name'], top_df['Total_Medals'])\n\nplt.title('Top 10')\nplt.xlabel('Country Name')\nplt.ylabel('Total Medals')\n\n\n# --------------\nsummer_df['Golden_Ratio'] = summer_df['Gold_Summer'] / summer_df['Total_Summer']\n\nsummer_max_ratio = max(summer_df['Golden_Ratio'])\n\nprint(summer_df['Golden_Ratio'].idxmax())\n\nsummer_country_gold = summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']\n\nprint(\"Top Summer Coutnry: \", summer_country_gold, \" with a ratio of %.2f\" % summer_max_ratio)\n\n# For Winter List\n\nwinter_df['Golden_Ratio'] = winter_df['Gold_Winter'] / winter_df['Total_Winter']\n\nwinter_max_ratio = max(winter_df['Golden_Ratio'])\n\nwinter_country_gold = winter_df.loc[winter_df['Golden_Ratio'].idxmax(), 'Country_Name']\n\nprint(\"Top Winter Country: \", winter_country_gold, \" with a ratio of %.2f\" % winter_max_ratio)\n\n# For Over List\n\ntop_df['Golden_Ratio'] = top_df['Gold_Total'] / top_df['Total_Medals']\n\ntop_max_ratio = max(top_df['Golden_Ratio'])\n\ntop_country_gold = top_df.loc[top_df['Golden_Ratio'].idxmax(), 'Country_Name']\n\nprint(\"Top Country: \", top_country_gold, \" with a ratio of %.2f\" % top_max_ratio)\n\n\n# --------------\n#Code starts here\r\n\r\ndata_1 = data[:-1]\r\n\r\ndata_1['Total_Points'] = data_1['Gold_Total'] * 3 + data_1['Silver_Total'] * 2 + data_1['Bronze_Total'] * 1\r\n\r\nprint(data_1.head(10))\r\n\r\nmost_points = max(data_1['Total_Points'])\r\n\r\nbest_country = data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']\r\n\r\nprint(\"The maximum points achieved is: \", most_points, \" by \", best_country)\n\n\n# --------------\n#Code starts here\n\n#Subsetting the dataframe\nbest=data[data['Country_Name']==best_country]\nbest.reset_index(drop = True, inplace = True)\nbest=best[['Gold_Total','Silver_Total','Bronze_Total']]\n\n\n#Plotting bar plot\nbest.plot.bar(stacked=True)\n\n#Changing the x-axis label\nplt.xlabel('United States')\n\n#Changing the y-axis label\nplt.ylabel('Medals Tally')\n\n#Rotating the ticks of X-axis\nplt.xticks(rotation=45)\n\n#Updating the graph legend\nl=plt.legend()\nl.get_texts()[0].set_text('Gold_Total :' + str(best['Gold_Total'].values))\nl.get_texts()[1].set_text('Silver_Total :' + str(best['Silver_Total'].values))\nl.get_texts()[2].set_text('Bronze_Total :' + str(best['Bronze_Total'].values))\n\n\n\n#Code ends here\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.xticks", "pandas.read_csv", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.ylabel", "numpy.where", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.bar" ] ]
Antolin1/DMG-Python
[ "ba3942e13006e1a32f3fe9f1b29615311f667274" ]
[ "dmg/realism/discriminativeModel.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 17 10:33:24 2021\n\n@author: Jose Antonio\n\"\"\"\n\n#of the paper Towards Char... using GNNs\n\n\nimport torch_geometric.nn as pyg_nn\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch_scatter.composite import scatter_softmax\n\nclass DiscriminativeModel(nn.Module):\n \n def __init__(self, dim_input, \n hidden_dim,dropout, \n vocab_nodes, \n vocab_edges):\n super(DiscriminativeModel, self).__init__()\n \n \n self.emb_nodes = nn.Embedding(len(vocab_nodes), dim_input)\n \n \n self.conv_1 = pyg_nn.RGCNConv(in_channels = dim_input, out_channels = hidden_dim, \n num_relations = len(vocab_edges))\n \n self.conv_2 = pyg_nn.RGCNConv(in_channels = hidden_dim, out_channels = hidden_dim, \n num_relations = len(vocab_edges))\n \n \n \n self.d_1 = nn.Dropout(dropout)\n \n self.lin = nn.Linear(hidden_dim, 1)\n \n self.attention_vector = nn.Linear(hidden_dim,1,bias=False)\n \n def forward(self,nodeTypes,edge_index, edge_attr, bs):\n \n \n nodeTypes = self.emb_nodes(nodeTypes)\n \n \n \n nodes_mess_1 = self.conv_1(nodeTypes, edge_index, edge_attr)\n nodes_mess_1 = self.d_1(F.relu(nodes_mess_1))\n \n nodes_mess_1 = F.relu(self.conv_2(nodes_mess_1, edge_index, edge_attr))\n \n \n attentions = scatter_softmax(torch.squeeze(self.attention_vector(nodes_mess_1)), bs)\n \n nodes_mess_1 = torch.unsqueeze(attentions,dim=1) * nodes_mess_1\n \n graph_emb = pyg_nn.global_add_pool(nodes_mess_1, bs)\n \n rtu = self.lin(graph_emb)\n \n return F.sigmoid(rtu)\n \n def getAttentions(self,nodeTypes,edge_index, edge_attr, bs):\n \n nodeTypes = self.emb_nodes(nodeTypes)\n nodes_mess_1 = self.conv_1(nodeTypes, edge_index, edge_attr)\n nodes_mess_1 = self.d_1(F.relu(nodes_mess_1))\n \n nodes_mess_1 = F.relu(self.conv_2(nodes_mess_1, edge_index, edge_attr))\n \n \n attentions = scatter_softmax(torch.squeeze(self.attention_vector(nodes_mess_1)), bs)\n \n return attentions\n" ]
[ [ "torch.unsqueeze", "torch.nn.functional.sigmoid", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.Dropout" ] ]
mitiku1/Emopy-Multi-Input-
[ "b520eb3f3d121c9d456a52315f1fd78ef43f74fd" ]
[ "train/__main__.py" ]
[ "import argparse\nfrom train import start_training\nimport cv2\nfrom skimage import feature\nimport numpy as np\nimport dlib\nimport tensorflow as tf \nimport keras \n\ndef get_cmd_args():\n \"\"\" Parse user command line arguments\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\",\"--dataset_dir\",default=\"dataset\",type=str)\n parser.add_argument(\"-e\",\"--epoch\",default=10,type=int)\n parser.add_argument(\"-b\",\"--batch\",default=100,type=int)\n parser.add_argument(\"-s\",\"--step\",default=1000,type=int)\n parser.add_argument(\"-l\",\"--lr\",default=1e-4,type=float)\n parser.add_argument(\"-i\",\"--input_shape\",nargs=3,type=int,default=[48,48,1])\n parser.add_argument(\"-m\",\"--model_output\",type=str,default=\"model\")\n parser.add_argument(\"-f\",\"--features\",type=str,default=\"all\")\n\n\n args = parser.parse_args()\n return args\n\ndef main():\n \"\"\"Start of training program.\n \"\"\"\n np.random.seed(1)\n tf.set_random_seed(2)\n args = get_cmd_args()\n if args.input_shape[2]!=1:\n raise Exception(\"Currenly tested for only gray scale images. input_shape should be [height,width,1]\")\n start_training(args)\n \n\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "tensorflow.set_random_seed", "numpy.random.seed" ] ]
meliao/fourier_neural_operator
[ "216915c6f1acd0651c7203bc8f16824efc495c5f" ]
[ "experiments/21_use_other_frequencies/train_models.py" ]
[ "\"\"\"\n@author: Zongyi Li\nThis file is the Fourier Neural Operator for 1D problem such as the (time-independent) Burgers equation discussed in Section 5.1 in the [paper](https://arxiv.org/pdf/2010.08895.pdf).\n\"\"\"\n\nimport logging\nimport os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n# import torch.fft as fft\nfrom torch.nn.parameter import Parameter\nimport scipy.io as sio\n\nimport operator\nfrom functools import reduce\nfrom functools import partial\nfrom timeit import default_timer\n\ntorch.manual_seed(0)\nnp.random.seed(0)\n\n\nclass SpectralConv1dModes(nn.Module):\n def __init__(self, in_channels, out_channels, modes1):\n super(SpectralConv1dModes, self).__init__()\n\n \"\"\"\n 1D Fourier layer. It does FFT, linear transform, and Inverse FFT.\n \"\"\"\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.modes_low = modes1[0] #Number of Fourier modes to multiply, at most floor(N/2) + 1\n self.modes_high = modes1[0] #Number of Fourier modes to multiply, at most floor(N/2) + 1\n self.n_modes = self.modes_high - self.modes_low + 1\n # self.n_modes = len(modes1)\n self.scale = (1 / (in_channels*out_channels))\n self.weights1 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.n_modes, dtype=torch.cfloat))\n\n # Complex multiplication\n def compl_mul1d(self, input, weights):\n # (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x)\n return torch.einsum(\"bix,iox->box\", input, weights)\n\n def forward(self, x):\n batchsize = x.shape[0]\n #Compute Fourier coeffcients up to factor of e^(- something constant)\n x_ft = torch.fft.rfft(x)\n\n # Multiply relevant Fourier modes\n out_ft = torch.zeros(batchsize, self.out_channels, x.size(-1)//2 + 1, device=x.device, dtype=torch.cfloat)\n out_ft[:, :, self.modes_low:self.modes_high] = self.compl_mul1d(x_ft[:, :, self.modes_low:self.modes_high], self.weights1)\n\n #Return to physical space\n x = torch.fft.irfft(out_ft, n=x.size(-1))\n return x\n\n\nclass FNO1dComplexChooseModes(nn.Module):\n def __init__(self, modes, width):\n super(FNO1dComplexChooseModes, self).__init__()\n\n \"\"\"\n The overall network. It contains 4 layers of the Fourier layer.\n 1. Lift the input to the desire channel dimension by self.fc0 .\n 2. 4 layers of the integral operators u' = (W + K)(u).\n W defined by self.w; K defined by self.conv .\n 3. Project from the channel space to the output space by self.fc1 and self.fc2 .\n\n input: the solution of the initial condition and location (Re(a(x)), Im(a(x)), x)\n input shape: (batchsize, x=s, c=3)\n output: the solution of a later timestep\n output shape: (batchsize, x=s, c=2)\n \"\"\"\n\n self.modes1 = modes\n self.width = width\n self.fc0 = nn.Linear(3, self.width) # input channel is 3: (Re(a(x)), Im(a(x)), x)\n\n self.conv0 = SpectralConv1dModes(self.width, self.width, self.modes1)\n self.conv1 = SpectralConv1dModes(self.width, self.width, self.modes1)\n self.conv2 = SpectralConv1dModes(self.width, self.width, self.modes1)\n self.conv3 = SpectralConv1dModes(self.width, self.width, self.modes1)\n self.w0 = nn.Conv1d(self.width, self.width, 1)\n self.w1 = nn.Conv1d(self.width, self.width, 1)\n self.w2 = nn.Conv1d(self.width, self.width, 1)\n self.w3 = nn.Conv1d(self.width, self.width, 1)\n\n\n self.fc1 = nn.Linear(self.width, 128)\n self.fc2 = nn.Linear(128, 2)\n\n def forward(self, x):\n\n x = self.fc0(x)\n x = x.permute(0, 2, 1)\n\n x1 = self.conv0(x)\n x2 = self.w0(x)\n x = x1 + x2\n x = F.relu(x)\n\n x1 = self.conv1(x)\n x2 = self.w1(x)\n x = x1 + x2\n x = F.relu(x)\n\n x1 = self.conv2(x)\n x2 = self.w2(x)\n x = x1 + x2\n x = F.relu(x)\n\n x1 = self.conv3(x)\n x2 = self.w3(x)\n x = x1 + x2\n\n x = x.permute(0, 2, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n return torch.view_as_complex(x)\n\nclass OneStepDataSet(torch.utils.data.Dataset):\n def __init__(self, X, t_grid, x_grid):\n super(OneStepDataSet, self).__init__()\n assert X.shape[1] == t_grid.shape[-1]\n self.X = torch.tensor(X, dtype=torch.cfloat)\n self.t = torch.tensor(t_grid.flatten(), dtype=torch.float)\n self.x_grid = torch.tensor(x_grid, dtype=torch.float).view(-1, 1)\n self.n_tsteps = self.t.shape[0] - 1\n self.n_batches = self.X.shape[0]\n self.dataset_len = self.n_tsteps * self.n_batches\n\n def make_x_train(self, x_in):\n x_in = torch.view_as_real(x_in)\n y = torch.cat([x_in, self.x_grid], axis=1)\n return y\n\n def __getitem__(self, idx):\n idx_original = idx\n t_idx = int(idx % self.n_tsteps) + 1\n idx = int(idx // self.n_tsteps)\n batch_idx = int(idx % self.n_batches)\n x = self.make_x_train(self.X[batch_idx, t_idx - 1]) #.reshape(self.output_shape)\n y = self.X[batch_idx, t_idx] #.reshape(self.output_shape)\n return x, y\n\n def __len__(self):\n return self.dataset_len\n\n def __repr__(self):\n return \"OneStepDataSet with length {}, t_grid {}, n_batches {}\".format(self.dataset_len,\n self.t,\n self.n_batches)\n\ndef write_result_to_file(fp, missing_str='', **trial):\n \"\"\"Write a line to a tab-separated file saving the results of a single\n trial.\n\n Parameters\n ----------\n fp : str\n Output filepath\n missing_str : str\n (Optional) What to print in the case of a missing trial value\n **trial : dict\n One trial result. Keys will become the file header\n Returns\n -------\n None\n\n \"\"\"\n header_lst = list(trial.keys())\n header_lst.sort()\n if not os.path.isfile(fp):\n header_line = \"\\t\".join(header_lst) + \"\\n\"\n with open(fp, 'w') as f:\n f.write(header_line)\n trial_lst = [str(trial.get(i, missing_str)) for i in header_lst]\n trial_line = \"\\t\".join(trial_lst) + \"\\n\"\n with open(fp, 'a') as f:\n f.write(trial_line)\n\ndef MSE(x, y):\n errors = x - y\n return torch.mean(torch.square(errors.abs()))\n\ndef l2_normalized_error(pred, actual):\n errors = pred - actual\n error_norms = torch.linalg.norm(errors, dim=1, ord=2)\n actual_norms = torch.linalg.norm(actual, dim=1, ord=2)\n return torch.mean(torch.divide(error_norms, actual_norms))\n\ndef train_loop(model, optimizer, scheduler, start_epoch, end_epoch, device, train_data_loader, train_df, do_testing,\n test_every_n, test_data_loader, test_df, model_path, results_dd):\n \"\"\"This is the main training loop\n\n Parameters\n ----------\n model : torch.nn.Model\n Model to train.\n optimizer : torch.optimizer\n Optimization algorithm.\n scheduler : torch.lr_scheduler\n Learning rate scheduler.\n epochs : int\n Number of full passes over the training dataset.\n device : torch.device\n Determines whether a GPU is used.\n train_data_loader : torch.DataLoader\n Object which iterates over train dataset.\n train_df : str\n Filepath to save intermediate training results.\n do_testing : bool\n Whether to test the model throughout training.\n test_every_n : int\n How often to do said testing.\n test_data_loader : torch.DataLoader\n iterates over test dataset.\n test_df : str\n Filepath to save intermediate test results.\n model_path : str\n Filepath (formattable with epoch number) to save model.\n\n Returns\n -------\n model\n Trained model.\n \"\"\"\n\n train_dd = {}\n test_dd = {}\n logging.info(\"Beginning training for {} epochs\".format(end_epoch - start_epoch))\n\n model.train()\n t0_train = default_timer()\n for ep in range(start_epoch, end_epoch):\n # model.train()\n t1 = default_timer()\n train_mse = 0\n train_l2 = 0\n for x, y in train_data_loader:\n x, y = x.to(device), y.to(device)\n # print(\"X SHAPE: {}, Y SHAPE: {}\".format(x.shape, y.shape))\n\n optimizer.zero_grad()\n out = model(x)\n\n mse = MSE(out, y)\n mse.backward()\n # loss.backward()\n optimizer.step()\n\n train_mse += mse.item()\n\n scheduler.step()\n # model.eval()\n\n train_mse /= len(train_data_loader)\n\n t2 = default_timer()\n logging.info(\"Epoch: {}, time: {:.2f}, train_mse: {:.4f}\".format(ep, t2-t1, train_mse))\n train_dd['epoch'] = ep\n train_dd['MSE'] = train_mse\n train_dd['time'] = t2-t1\n write_result_to_file(train_df, **train_dd)\n\n ########################################################\n # Intermediate testing and saving\n ########################################################\n if ep % test_every_n == 0:\n test_mse = 0.\n test_l2_norm_error = 0.\n if do_testing:\n model.eval()\n with torch.no_grad():\n for x, y in test_data_loader:\n x, y = x.to(device), y.to(device)\n\n out = model(x)\n\n mse = MSE(out, y)\n test_mse += mse.item()\n\n l2_err = l2_normalized_error(out, y)\n test_l2_norm_error += l2_err.item()\n model.train()\n\n test_mse /= len(test_data_loader)\n test_l2_norm_error /= len(test_data_loader)\n\n test_dd['test_mse'] = test_mse\n test_dd['test_l2_normalized_error'] = test_l2_norm_error\n test_dd['epoch'] = ep\n\n write_result_to_file(test_df, **test_dd)\n logging.info(\"Test: Epoch: {}, test_mse: {:.4f}\".format(ep, test_mse))\n torch.save(model, model_path.format(ep))\n\n torch.save(model, model_path.format(end_epoch))\n if end_epoch - start_epoch > 0:\n results_dd['train_mse'] = train_mse\n results_dd['test_mse'] = test_mse\n return model\n\ndef setup_training(args, device, batch_size=1024, learning_rate=0.001, step_size=100, gamma=0.5):\n\n ################################################################\n # create results_dd\n ################################################################\n results_dd = {}\n #################################################################\n # read training data\n ################################################################\n\n d = sio.loadmat(args.data_fp)\n usol = d['output'][:,[0,args.time_idx]]\n t_grid = d['t'][:,[0,args.time_idx]]\n x_grid = d['x']\n logging.info(\"USOL SHAPE {}, T_GRID SHAPE: {}, X_GRID SHAPE: {}\".format(usol.shape,\n t_grid.shape,\n x_grid.shape))\n\n train_dataset = OneStepDataSet(usol, t_grid, x_grid)\n logging.info(\"Dataset: {}\".format(train_dataset))\n results_dd['ntrain'] = len(train_dataset)\n results_dd['prediction_time'] = args.time_idx\n\n train_data_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=batch_size,\n shuffle=True)\n\n ################################################################\n # read testing data\n ################################################################\n if not args.no_test:\n\n d_test = sio.loadmat(args.test_data_fp)\n usol_test = d_test['output'][:,[0,args.time_idx]]\n t_grid_test = d_test['t'][:,[0,args.time_idx]]\n x_grid_test = d_test['x']\n\n test_dataset = OneStepDataSet(usol_test, t_grid_test, x_grid_test)\n logging.info(\"Test Dataset: {}\".format(test_dataset))\n results_dd['ntest'] = len(test_dataset)\n\n test_data_loader = torch.utils.data.DataLoader(test_dataset,\n batch_size=batch_size,\n shuffle=True)\n\n ##################################################################\n # initialize model and optimizer\n ##################################################################\n args.freq_modes[0].sort()\n model_params = {'width': args.width, 'modes':args.freq_modes[0]}\n\n model = FNO1dComplexChooseModes(width=args.width, modes=args.freq_modes[0]).to(device)\n logging.info(\"Model freq modes: {}\".format(model.modes1))\n\n results_dd.update(model_params)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer,\n step_size=step_size,\n gamma=gamma)\n results_dd['learning_rate'] = learning_rate\n\n ##################################################################\n # Call training loop\n ##################################################################\n logging.info(\"Starting FNO training\")\n model = train_loop(model=model,\n optimizer=optimizer,\n scheduler=scheduler,\n start_epoch=0,\n end_epoch=args.epochs,\n device=device,\n train_data_loader=train_data_loader,\n train_df=args.train_df,\n do_testing=(not args.no_test),\n test_every_n=100,\n test_data_loader=test_data_loader,\n test_df=args.test_df,\n model_path=args.model_fp,\n results_dd=results_dd)\n return model, results_dd\n\ndef main(args):\n # Figure out CUDA\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n logging.info(\"Running computation on device: {}\".format(device))\n\n ################################################################\n # Set up and do training\n ################################################################\n\n lr = (10 ** args.lr_exp)\n\n model, results_dd = setup_training(args, device, learning_rate=lr)\n\n if args.results_fp is not None:\n write_result_to_file(args.results_fp, **results_dd)\n logging.info(\"Wrote results to {}\".format(args.results_fp))\n else:\n logging.info(\"No results_fp specified, so here are the results\")\n logging.info(results_dd)\n\n logging.info(\"Finished\")\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_fp')\n parser.add_argument('--test_data_fp')\n parser.add_argument('--results_fp')\n parser.add_argument('--model_fp')\n parser.add_argument('--train_df')\n parser.add_argument('--test_df')\n parser.add_argument('--ntrain', type=int)\n parser.add_argument('--ntest', type=int)\n parser.add_argument('--time_idx', type=int, default=1)\n parser.add_argument('--lr_exp', type=int, default=-3)\n parser.add_argument('--epochs', type=int)\n parser.add_argument('--freq_modes', type=int, nargs=2, action='append')\n parser.add_argument('--width', type=int, default=64)\n parser.add_argument('--time_subsample', type=int, default=1)\n parser.add_argument('--no_test', default=False, action='store_true')\n\n args = parser.parse_args()\n fmt = \"%(asctime)s:FNO: %(levelname)s - %(message)s\"\n time_fmt = '%Y-%m-%d %H:%M:%S'\n logging.basicConfig(level=logging.INFO,\n format=fmt,\n datefmt=time_fmt)\n main(args)\n" ]
[ [ "torch.utils.data.DataLoader", "torch.linalg.norm", "torch.view_as_real", "torch.rand", "torch.no_grad", "numpy.random.seed", "torch.cuda.is_available", "torch.cat", "scipy.io.loadmat", "torch.divide", "torch.manual_seed", "torch.fft.rfft", "torch.tensor", "torch.nn.Conv1d", "torch.optim.lr_scheduler.StepLR", "torch.einsum", "torch.nn.Linear", "torch.view_as_complex", "torch.nn.functional.relu" ] ]
kingfener/espresso
[ "da8352a6e97c82e5d92c39972666a772e6bb508a" ]
[ "setup.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nfrom setuptools import setup, find_packages, Extension\nimport sys\n\n\nif sys.version_info < (3, 6):\n sys.exit('Sorry, Python >= 3.6 is required for fairseq.')\n\n\nwith open('README.md') as f:\n readme = f.read()\n\n\nif sys.platform == 'darwin':\n extra_compile_args = ['-stdlib=libc++', '-O3']\nelse:\n extra_compile_args = ['-std=c++11', '-O3']\n\n\nclass NumpyExtension(Extension):\n \"\"\"Source: https://stackoverflow.com/a/54128391\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.__include_dirs = []\n super().__init__(*args, **kwargs)\n\n @property\n def include_dirs(self):\n import numpy\n return self.__include_dirs + [numpy.get_include()]\n\n @include_dirs.setter\n def include_dirs(self, dirs):\n self.__include_dirs = dirs\n\n\nextensions = [\n Extension(\n 'fairseq.libbleu',\n sources=[\n 'fairseq/clib/libbleu/libbleu.cpp',\n 'fairseq/clib/libbleu/module.cpp',\n ],\n extra_compile_args=extra_compile_args,\n ),\n NumpyExtension(\n 'fairseq.data.data_utils_fast',\n sources=['fairseq/data/data_utils_fast.pyx'],\n language='c++',\n extra_compile_args=extra_compile_args,\n ),\n NumpyExtension(\n 'fairseq.data.token_block_utils_fast',\n sources=['fairseq/data/token_block_utils_fast.pyx'],\n language='c++',\n extra_compile_args=extra_compile_args,\n ),\n]\n\n\ncmdclass = {}\n\n\ntry:\n # torch is not available when generating docs\n from torch.utils import cpp_extension\n extensions.extend([\n cpp_extension.CppExtension(\n 'fairseq.libnat',\n sources=[\n 'fairseq/clib/libnat/edit_dist.cpp',\n ],\n )\n ])\n\n if 'CUDA_HOME' in os.environ:\n extensions.extend([\n cpp_extension.CppExtension(\n 'fairseq.libnat_cuda',\n sources=[\n 'fairseq/clib/libnat_cuda/edit_dist.cu',\n 'fairseq/clib/libnat_cuda/binding.cpp'\n ],\n )])\n cmdclass['build_ext'] = cpp_extension.BuildExtension\n\nexcept ImportError:\n pass\n\n\nif 'READTHEDOCS' in os.environ:\n # don't build extensions when generating docs\n extensions = []\n if 'build_ext' in cmdclass:\n del cmdclass['build_ext']\n\n # use CPU build of PyTorch\n dependency_links = [\n 'https://download.pytorch.org/whl/cpu/torch-1.3.0%2Bcpu-cp36-cp36m-linux_x86_64.whl'\n ]\nelse:\n dependency_links = []\n\n\nif 'clean' in sys.argv[1:]:\n # Source: https://bit.ly/2NLVsgE\n print(\"deleting Cython files...\")\n import subprocess\n subprocess.run(['rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd'], shell=True)\n\n\nsetup(\n name='fairseq',\n version='0.9.0',\n description='Facebook AI Research Sequence-to-Sequence Toolkit',\n url='https://github.com/pytorch/fairseq',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n long_description=readme,\n long_description_content_type='text/markdown',\n setup_requires=[\n 'cython',\n 'numpy',\n 'setuptools>=18.0',\n ],\n install_requires=[\n 'cffi',\n 'cython',\n 'kaldi_io',\n 'numpy',\n 'regex',\n 'sacrebleu',\n 'torch',\n 'tqdm',\n ],\n dependency_links=dependency_links,\n packages=find_packages(exclude=['scripts', 'tests']),\n ext_modules=extensions,\n test_suite='tests',\n entry_points={\n 'console_scripts': [\n 'fairseq-eval-lm = fairseq_cli.eval_lm:cli_main',\n 'fairseq-generate = fairseq_cli.generate:cli_main',\n 'fairseq-interactive = fairseq_cli.interactive:cli_main',\n 'fairseq-preprocess = fairseq_cli.preprocess:cli_main',\n 'fairseq-score = fairseq_cli.score:cli_main',\n 'fairseq-train = fairseq_cli.train:cli_main',\n 'fairseq-validate = fairseq_cli.validate:cli_main',\n ],\n },\n cmdclass=cmdclass,\n zip_safe=False,\n)\n" ]
[ [ "numpy.get_include", "torch.utils.cpp_extension.CppExtension" ] ]
gourav108/coreml
[ "6bc2d494dff23cff923368e735992a4f4a47483c" ]
[ "object_detection/metrics/coco_evaluation.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Class for evaluating object detections with COCO metrics.\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\nfrom core import standard_fields\nfrom metrics import coco_tools\nfrom utils import object_detection_evaluation\n\n\nclass CocoDetectionEvaluator(object_detection_evaluation.DetectionEvaluator):\n \"\"\"Class to evaluate COCO detection metrics.\"\"\"\n\n def __init__(self,\n categories,\n include_metrics_per_category=False,\n all_metrics_per_category=False):\n \"\"\"Constructor.\n\n Args:\n categories: A list of dicts, each of which has the following keys -\n 'id': (required) an integer id uniquely identifying this category.\n 'name': (required) string representing category name e.g., 'cat', 'dog'.\n include_metrics_per_category: If True, include metrics for each category.\n all_metrics_per_category: Whether to include all the summary metrics for\n each category in per_category_ap. Be careful with setting it to true if\n you have more than handful of categories, because it will pollute\n your mldash.\n \"\"\"\n super(CocoDetectionEvaluator, self).__init__(categories)\n # _image_ids is a dictionary that maps unique image ids to Booleans which\n # indicate whether a corresponding detection has been added.\n self._image_ids = {}\n self._groundtruth_list = []\n self._detection_boxes_list = []\n self._category_id_set = set([cat['id'] for cat in self._categories])\n self._annotation_id = 1\n self._metrics = None\n self._include_metrics_per_category = include_metrics_per_category\n self._all_metrics_per_category = all_metrics_per_category\n\n def clear(self):\n \"\"\"Clears the state to prepare for a fresh evaluation.\"\"\"\n self._image_ids.clear()\n self._groundtruth_list = []\n self._detection_boxes_list = []\n\n def add_single_ground_truth_image_info(self,\n image_id,\n groundtruth_dict):\n \"\"\"Adds groundtruth for a single image to be used for evaluation.\n\n If the image has already been added, a warning is logged, and groundtruth is\n ignored.\n\n Args:\n image_id: A unique string/integer identifier for the image.\n groundtruth_dict: A dictionary containing -\n InputDataFields.groundtruth_boxes: float32 numpy array of shape\n [num_boxes, 4] containing `num_boxes` groundtruth boxes of the format\n [ymin, xmin, ymax, xmax] in absolute image coordinates.\n InputDataFields.groundtruth_classes: integer numpy array of shape\n [num_boxes] containing 1-indexed groundtruth classes for the boxes.\n InputDataFields.groundtruth_is_crowd (optional): integer numpy array of\n shape [num_boxes] containing iscrowd flag for groundtruth boxes.\n \"\"\"\n if image_id in self._image_ids:\n tf.logging.warning('Ignoring ground truth with image id %s since it was '\n 'previously added', image_id)\n return\n\n groundtruth_is_crowd = groundtruth_dict.get(\n standard_fields.InputDataFields.groundtruth_is_crowd)\n # Drop groundtruth_is_crowd if empty tensor.\n if groundtruth_is_crowd is not None and not groundtruth_is_crowd.shape[0]:\n groundtruth_is_crowd = None\n\n self._groundtruth_list.extend(\n coco_tools.ExportSingleImageGroundtruthToCoco(\n image_id=image_id,\n next_annotation_id=self._annotation_id,\n category_id_set=self._category_id_set,\n groundtruth_boxes=groundtruth_dict[\n standard_fields.InputDataFields.groundtruth_boxes],\n groundtruth_classes=groundtruth_dict[\n standard_fields.InputDataFields.groundtruth_classes],\n groundtruth_is_crowd=groundtruth_is_crowd))\n self._annotation_id += groundtruth_dict[standard_fields.InputDataFields.\n groundtruth_boxes].shape[0]\n # Boolean to indicate whether a detection has been added for this image.\n self._image_ids[image_id] = False\n\n def add_single_detected_image_info(self,\n image_id,\n detections_dict):\n \"\"\"Adds detections for a single image to be used for evaluation.\n\n If a detection has already been added for this image id, a warning is\n logged, and the detection is skipped.\n\n Args:\n image_id: A unique string/integer identifier for the image.\n detections_dict: A dictionary containing -\n DetectionResultFields.detection_boxes: float32 numpy array of shape\n [num_boxes, 4] containing `num_boxes` detection boxes of the format\n [ymin, xmin, ymax, xmax] in absolute image coordinates.\n DetectionResultFields.detection_scores: float32 numpy array of shape\n [num_boxes] containing detection scores for the boxes.\n DetectionResultFields.detection_classes: integer numpy array of shape\n [num_boxes] containing 1-indexed detection classes for the boxes.\n\n Raises:\n ValueError: If groundtruth for the image_id is not available.\n \"\"\"\n if image_id not in self._image_ids:\n raise ValueError('Missing groundtruth for image id: {}'.format(image_id))\n\n if self._image_ids[image_id]:\n tf.logging.warning('Ignoring detection with image id %s since it was '\n 'previously added', image_id)\n return\n\n self._detection_boxes_list.extend(\n coco_tools.ExportSingleImageDetectionBoxesToCoco(\n image_id=image_id,\n category_id_set=self._category_id_set,\n detection_boxes=detections_dict[standard_fields.\n DetectionResultFields\n .detection_boxes],\n detection_scores=detections_dict[standard_fields.\n DetectionResultFields.\n detection_scores],\n detection_classes=detections_dict[standard_fields.\n DetectionResultFields.\n detection_classes]))\n self._image_ids[image_id] = True\n\n def evaluate(self):\n \"\"\"Evaluates the detection boxes and returns a dictionary of coco metrics.\n\n Returns:\n A dictionary holding -\n\n 1. summary_metrics:\n 'DetectionBoxes_Precision/mAP': mean average precision over classes\n averaged over IOU thresholds ranging from .5 to .95 with .05\n increments.\n 'DetectionBoxes_Precision/[email protected]': mean average precision at 50% IOU\n 'DetectionBoxes_Precision/[email protected]': mean average precision at 75% IOU\n 'DetectionBoxes_Precision/mAP (small)': mean average precision for small\n objects (area < 32^2 pixels).\n 'DetectionBoxes_Precision/mAP (medium)': mean average precision for\n medium sized objects (32^2 pixels < area < 96^2 pixels).\n 'DetectionBoxes_Precision/mAP (large)': mean average precision for large\n objects (96^2 pixels < area < 10000^2 pixels).\n 'DetectionBoxes_Recall/AR@1': average recall with 1 detection.\n 'DetectionBoxes_Recall/AR@10': average recall with 10 detections.\n 'DetectionBoxes_Recall/AR@100': average recall with 100 detections.\n 'DetectionBoxes_Recall/AR@100 (small)': average recall for small objects\n with 100.\n 'DetectionBoxes_Recall/AR@100 (medium)': average recall for medium objects\n with 100.\n 'DetectionBoxes_Recall/AR@100 (large)': average recall for large objects\n with 100 detections.\n\n 2. per_category_ap: if include_metrics_per_category is True, category\n specific results with keys of the form:\n 'Precision mAP ByCategory/category' (without the supercategory part if\n no supercategories exist). For backward compatibility\n 'PerformanceByCategory' is included in the output regardless of\n all_metrics_per_category.\n \"\"\"\n groundtruth_dict = {\n 'annotations': self._groundtruth_list,\n 'images': [{'id': image_id} for image_id in self._image_ids],\n 'categories': self._categories\n }\n coco_wrapped_groundtruth = coco_tools.COCOWrapper(groundtruth_dict)\n coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(\n self._detection_boxes_list)\n box_evaluator = coco_tools.COCOEvalWrapper(\n coco_wrapped_groundtruth, coco_wrapped_detections, agnostic_mode=False)\n box_metrics, box_per_category_ap = box_evaluator.ComputeMetrics(\n include_metrics_per_category=self._include_metrics_per_category,\n all_metrics_per_category=self._all_metrics_per_category)\n box_metrics.update(box_per_category_ap)\n box_metrics = {'DetectionBoxes_'+ key: value\n for key, value in iter(box_metrics.items())}\n return box_metrics\n\n def get_estimator_eval_metric_ops(self, image_id, groundtruth_boxes,\n groundtruth_classes, detection_boxes,\n detection_scores, detection_classes):\n \"\"\"Returns a dictionary of eval metric ops to use with `tf.EstimatorSpec`.\n\n Note that once value_op is called, the detections and groundtruth added via\n update_op are cleared.\n\n Args:\n image_id: Unique string/integer identifier for the image.\n groundtruth_boxes: float32 tensor of shape [num_boxes, 4] containing\n `num_boxes` groundtruth boxes of the format\n [ymin, xmin, ymax, xmax] in absolute image coordinates.\n groundtruth_classes: int32 tensor of shape [num_boxes] containing\n 1-indexed groundtruth classes for the boxes.\n detection_boxes: float32 tensor of shape [num_boxes, 4] containing\n `num_boxes` detection boxes of the format [ymin, xmin, ymax, xmax]\n in absolute image coordinates.\n detection_scores: float32 tensor of shape [num_boxes] containing\n detection scores for the boxes.\n detection_classes: int32 tensor of shape [num_boxes] containing\n 1-indexed detection classes for the boxes.\n\n Returns:\n a dictionary of metric names to tuple of value_op and update_op that can\n be used as eval metric ops in tf.EstimatorSpec. Note that all update ops\n must be run together and similarly all value ops must be run together to\n guarantee correct behaviour.\n \"\"\"\n def update_op(\n image_id,\n groundtruth_boxes,\n groundtruth_classes,\n detection_boxes,\n detection_scores,\n detection_classes):\n self.add_single_ground_truth_image_info(\n image_id,\n {'groundtruth_boxes': groundtruth_boxes,\n 'groundtruth_classes': groundtruth_classes})\n self.add_single_detected_image_info(\n image_id,\n {'detection_boxes': detection_boxes,\n 'detection_scores': detection_scores,\n 'detection_classes': detection_classes})\n\n update_op = tf.py_func(update_op, [image_id,\n groundtruth_boxes,\n groundtruth_classes,\n detection_boxes,\n detection_scores,\n detection_classes], [])\n metric_names = ['DetectionBoxes_Precision/mAP',\n 'DetectionBoxes_Precision/[email protected]',\n 'DetectionBoxes_Precision/[email protected]',\n 'DetectionBoxes_Precision/mAP (large)',\n 'DetectionBoxes_Precision/mAP (medium)',\n 'DetectionBoxes_Precision/mAP (small)',\n 'DetectionBoxes_Recall/AR@1',\n 'DetectionBoxes_Recall/AR@10',\n 'DetectionBoxes_Recall/AR@100',\n 'DetectionBoxes_Recall/AR@100 (large)',\n 'DetectionBoxes_Recall/AR@100 (medium)',\n 'DetectionBoxes_Recall/AR@100 (small)']\n if self._include_metrics_per_category:\n for category_dict in self._categories:\n metric_names.append('DetectionBoxes_PerformanceByCategory/mAP/' +\n category_dict['name'])\n\n def first_value_func():\n self._metrics = self.evaluate()\n self.clear()\n return np.float32(self._metrics[metric_names[0]])\n\n def value_func_factory(metric_name):\n def value_func():\n return np.float32(self._metrics[metric_name])\n return value_func\n\n # Ensure that the metrics are only evaluated once.\n first_value_op = tf.py_func(first_value_func, [], tf.float32)\n eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}\n with tf.control_dependencies([first_value_op]):\n for metric_name in metric_names[1:]:\n eval_metric_ops[metric_name] = (tf.py_func(\n value_func_factory(metric_name), [], np.float32), update_op)\n return eval_metric_ops\n\n\ndef _check_mask_type_and_value(array_name, masks):\n \"\"\"Checks whether mask dtype is uint8 and the values are either 0 or 1.\"\"\"\n if masks.dtype != np.uint8:\n raise ValueError('{} must be of type np.uint8. Found {}.'.format(\n array_name, masks.dtype))\n if np.any(np.logical_and(masks != 0, masks != 1)):\n raise ValueError('{} elements can only be either 0 or 1.'.format(\n array_name))\n\n\nclass CocoMaskEvaluator(object_detection_evaluation.DetectionEvaluator):\n \"\"\"Class to evaluate COCO detection metrics.\"\"\"\n\n def __init__(self, categories, include_metrics_per_category=False):\n \"\"\"Constructor.\n\n Args:\n categories: A list of dicts, each of which has the following keys -\n 'id': (required) an integer id uniquely identifying this category.\n 'name': (required) string representing category name e.g., 'cat', 'dog'.\n include_metrics_per_category: If True, include metrics for each category.\n \"\"\"\n super(CocoMaskEvaluator, self).__init__(categories)\n self._image_id_to_mask_shape_map = {}\n self._image_ids_with_detections = set([])\n self._groundtruth_list = []\n self._detection_masks_list = []\n self._category_id_set = set([cat['id'] for cat in self._categories])\n self._annotation_id = 1\n self._include_metrics_per_category = include_metrics_per_category\n\n def clear(self):\n \"\"\"Clears the state to prepare for a fresh evaluation.\"\"\"\n self._image_id_to_mask_shape_map.clear()\n self._image_ids_with_detections.clear()\n self._groundtruth_list = []\n self._detection_masks_list = []\n\n def add_single_ground_truth_image_info(self,\n image_id,\n groundtruth_dict):\n \"\"\"Adds groundtruth for a single image to be used for evaluation.\n\n If the image has already been added, a warning is logged, and groundtruth is\n ignored.\n\n Args:\n image_id: A unique string/integer identifier for the image.\n groundtruth_dict: A dictionary containing -\n InputDataFields.groundtruth_boxes: float32 numpy array of shape\n [num_boxes, 4] containing `num_boxes` groundtruth boxes of the format\n [ymin, xmin, ymax, xmax] in absolute image coordinates.\n InputDataFields.groundtruth_classes: integer numpy array of shape\n [num_boxes] containing 1-indexed groundtruth classes for the boxes.\n InputDataFields.groundtruth_instance_masks: uint8 numpy array of shape\n [num_boxes, image_height, image_width] containing groundtruth masks\n corresponding to the boxes. The elements of the array must be in\n {0, 1}.\n \"\"\"\n if image_id in self._image_id_to_mask_shape_map:\n tf.logging.warning('Ignoring ground truth with image id %s since it was '\n 'previously added', image_id)\n return\n\n groundtruth_instance_masks = groundtruth_dict[\n standard_fields.InputDataFields.groundtruth_instance_masks]\n _check_mask_type_and_value(standard_fields.InputDataFields.\n groundtruth_instance_masks,\n groundtruth_instance_masks)\n self._groundtruth_list.extend(\n coco_tools.\n ExportSingleImageGroundtruthToCoco(\n image_id=image_id,\n next_annotation_id=self._annotation_id,\n category_id_set=self._category_id_set,\n groundtruth_boxes=groundtruth_dict[standard_fields.InputDataFields.\n groundtruth_boxes],\n groundtruth_classes=groundtruth_dict[standard_fields.\n InputDataFields.\n groundtruth_classes],\n groundtruth_masks=groundtruth_instance_masks))\n self._annotation_id += groundtruth_dict[standard_fields.InputDataFields.\n groundtruth_boxes].shape[0]\n self._image_id_to_mask_shape_map[image_id] = groundtruth_dict[\n standard_fields.InputDataFields.groundtruth_instance_masks].shape\n\n def add_single_detected_image_info(self,\n image_id,\n detections_dict):\n \"\"\"Adds detections for a single image to be used for evaluation.\n\n If a detection has already been added for this image id, a warning is\n logged, and the detection is skipped.\n\n Args:\n image_id: A unique string/integer identifier for the image.\n detections_dict: A dictionary containing -\n DetectionResultFields.detection_scores: float32 numpy array of shape\n [num_boxes] containing detection scores for the boxes.\n DetectionResultFields.detection_classes: integer numpy array of shape\n [num_boxes] containing 1-indexed detection classes for the boxes.\n DetectionResultFields.detection_masks: optional uint8 numpy array of\n shape [num_boxes, image_height, image_width] containing instance\n masks corresponding to the boxes. The elements of the array must be\n in {0, 1}.\n\n Raises:\n ValueError: If groundtruth for the image_id is not available or if\n spatial shapes of groundtruth_instance_masks and detection_masks are\n incompatible.\n \"\"\"\n if image_id not in self._image_id_to_mask_shape_map:\n raise ValueError('Missing groundtruth for image id: {}'.format(image_id))\n\n if image_id in self._image_ids_with_detections:\n tf.logging.warning('Ignoring detection with image id %s since it was '\n 'previously added', image_id)\n return\n\n groundtruth_masks_shape = self._image_id_to_mask_shape_map[image_id]\n detection_masks = detections_dict[standard_fields.DetectionResultFields.\n detection_masks]\n if groundtruth_masks_shape[1:] != detection_masks.shape[1:]:\n raise ValueError('Spatial shape of groundtruth masks and detection masks '\n 'are incompatible: {} vs {}'.format(\n groundtruth_masks_shape,\n detection_masks.shape))\n _check_mask_type_and_value(standard_fields.DetectionResultFields.\n detection_masks,\n detection_masks)\n self._detection_masks_list.extend(\n coco_tools.ExportSingleImageDetectionMasksToCoco(\n image_id=image_id,\n category_id_set=self._category_id_set,\n detection_masks=detection_masks,\n detection_scores=detections_dict[standard_fields.\n DetectionResultFields.\n detection_scores],\n detection_classes=detections_dict[standard_fields.\n DetectionResultFields.\n detection_classes]))\n self._image_ids_with_detections.update([image_id])\n\n def evaluate(self):\n \"\"\"Evaluates the detection masks and returns a dictionary of coco metrics.\n\n Returns:\n A dictionary holding -\n\n 1. summary_metrics:\n 'DetectionMasks_Precision/mAP': mean average precision over classes\n averaged over IOU thresholds ranging from .5 to .95 with .05 increments.\n 'DetectionMasks_Precision/[email protected]': mean average precision at 50% IOU.\n 'DetectionMasks_Precision/[email protected]': mean average precision at 75% IOU.\n 'DetectionMasks_Precision/mAP (small)': mean average precision for small\n objects (area < 32^2 pixels).\n 'DetectionMasks_Precision/mAP (medium)': mean average precision for medium\n sized objects (32^2 pixels < area < 96^2 pixels).\n 'DetectionMasks_Precision/mAP (large)': mean average precision for large\n objects (96^2 pixels < area < 10000^2 pixels).\n 'DetectionMasks_Recall/AR@1': average recall with 1 detection.\n 'DetectionMasks_Recall/AR@10': average recall with 10 detections.\n 'DetectionMasks_Recall/AR@100': average recall with 100 detections.\n 'DetectionMasks_Recall/AR@100 (small)': average recall for small objects\n with 100 detections.\n 'DetectionMasks_Recall/AR@100 (medium)': average recall for medium objects\n with 100 detections.\n 'DetectionMasks_Recall/AR@100 (large)': average recall for large objects\n with 100 detections.\n\n 2. per_category_ap: if include_metrics_per_category is True, category\n specific results with keys of the form:\n 'Precision mAP ByCategory/category' (without the supercategory part if\n no supercategories exist). For backward compatibility\n 'PerformanceByCategory' is included in the output regardless of\n all_metrics_per_category.\n \"\"\"\n groundtruth_dict = {\n 'annotations': self._groundtruth_list,\n 'images': [{'id': image_id, 'height': shape[1], 'width': shape[2]}\n for image_id, shape in self._image_id_to_mask_shape_map.\n iteritems()],\n 'categories': self._categories\n }\n coco_wrapped_groundtruth = coco_tools.COCOWrapper(\n groundtruth_dict, detection_type='segmentation')\n coco_wrapped_detection_masks = coco_wrapped_groundtruth.LoadAnnotations(\n self._detection_masks_list)\n mask_evaluator = coco_tools.COCOEvalWrapper(\n coco_wrapped_groundtruth, coco_wrapped_detection_masks,\n agnostic_mode=False, iou_type='segm')\n mask_metrics, mask_per_category_ap = mask_evaluator.ComputeMetrics(\n include_metrics_per_category=self._include_metrics_per_category)\n mask_metrics.update(mask_per_category_ap)\n mask_metrics = {'DetectionMasks_'+ key: value\n for key, value in mask_metrics.iteritems()}\n return mask_metrics\n\n def get_estimator_eval_metric_ops(self, image_id, groundtruth_boxes,\n groundtruth_classes,\n groundtruth_instance_masks,\n detection_scores, detection_classes,\n detection_masks):\n \"\"\"Returns a dictionary of eval metric ops to use with `tf.EstimatorSpec`.\n\n Note that once value_op is called, the detections and groundtruth added via\n update_op are cleared.\n\n Args:\n image_id: Unique string/integer identifier for the image.\n groundtruth_boxes: float32 tensor of shape [num_boxes, 4] containing\n `num_boxes` groundtruth boxes of the format\n [ymin, xmin, ymax, xmax] in absolute image coordinates.\n groundtruth_classes: int32 tensor of shape [num_boxes] containing\n 1-indexed groundtruth classes for the boxes.\n groundtruth_instance_masks: uint8 tensor array of shape\n [num_boxes, image_height, image_width] containing groundtruth masks\n corresponding to the boxes. The elements of the array must be in {0, 1}.\n detection_scores: float32 tensor of shape [num_boxes] containing\n detection scores for the boxes.\n detection_classes: int32 tensor of shape [num_boxes] containing\n 1-indexed detection classes for the boxes.\n detection_masks: uint8 tensor array of shape\n [num_boxes, image_height, image_width] containing instance masks\n corresponding to the boxes. The elements of the array must be in {0, 1}.\n\n Returns:\n a dictionary of metric names to tuple of value_op and update_op that can\n be used as eval metric ops in tf.EstimatorSpec. Note that all update ops\n must be run together and similarly all value ops must be run together to\n guarantee correct behaviour.\n \"\"\"\n def update_op(\n image_id,\n groundtruth_boxes,\n groundtruth_classes,\n groundtruth_instance_masks,\n detection_scores,\n detection_classes,\n detection_masks):\n self.add_single_ground_truth_image_info(\n image_id,\n {'groundtruth_boxes': groundtruth_boxes,\n 'groundtruth_classes': groundtruth_classes,\n 'groundtruth_instance_masks': groundtruth_instance_masks})\n self.add_single_detected_image_info(\n image_id,\n {'detection_scores': detection_scores,\n 'detection_classes': detection_classes,\n 'detection_masks': detection_masks})\n\n update_op = tf.py_func(update_op, [image_id,\n groundtruth_boxes,\n groundtruth_classes,\n groundtruth_instance_masks,\n detection_scores,\n detection_classes,\n detection_masks], [])\n metric_names = ['DetectionMasks_Precision/mAP',\n 'DetectionMasks_Precision/[email protected]',\n 'DetectionMasks_Precision/[email protected]',\n 'DetectionMasks_Precision/mAP (large)',\n 'DetectionMasks_Precision/mAP (medium)',\n 'DetectionMasks_Precision/mAP (small)',\n 'DetectionMasks_Recall/AR@1',\n 'DetectionMasks_Recall/AR@10',\n 'DetectionMasks_Recall/AR@100',\n 'DetectionMasks_Recall/AR@100 (large)',\n 'DetectionMasks_Recall/AR@100 (medium)',\n 'DetectionMasks_Recall/AR@100 (small)']\n if self._include_metrics_per_category:\n for category_dict in self._categories:\n metric_names.append('DetectionMasks_PerformanceByCategory/mAP/' +\n category_dict['name'])\n\n def first_value_func():\n self._metrics = self.evaluate()\n self.clear()\n return np.float32(self._metrics[metric_names[0]])\n\n def value_func_factory(metric_name):\n def value_func():\n return np.float32(self._metrics[metric_name])\n return value_func\n\n # Ensure that the metrics are only evaluated once.\n first_value_op = tf.py_func(first_value_func, [], tf.float32)\n eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}\n with tf.control_dependencies([first_value_op]):\n for metric_name in metric_names[1:]:\n eval_metric_ops[metric_name] = (tf.py_func(\n value_func_factory(metric_name), [], np.float32), update_op)\n return eval_metric_ops\n" ]
[ [ "numpy.logical_and", "numpy.float32", "tensorflow.logging.warning", "tensorflow.py_func", "tensorflow.control_dependencies" ] ]
cemac/UNRESP_AQSensorTools
[ "0c73eb48ccd1680da866dc344ab22bc40ef899fb" ]
[ "aqtools/getAQMeshData.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nScript name: getAQMeshData.py\nAuthor: JO'N/ CEMAC (University of Leeds)\nDate: March 2018\nPurpose: Download data from an AQMesh pod using the API tool\nUsage: ./getAQMeshData.py <stationID> <startDate> <endDate> <variables> <outFreq>\n <stationID> - Unique ID of the AQMesh station from which you want to download data\n <startDate> - Start date/time (UTC) of data to download, in format YYYY-MM-DDTHH:MM:SS. Or type 'start' to get data from the earliest possible time.\n <endDate> - End date/time (UTC) of data to download, in format YYYY-MM-DDTHH:MM:SS. Or type 'end' to get data from the latest possible time.\n <variables> - List of variables to download, in single quotes separated by spaces, e.g. 'NO PM10 SO2'. Or specify 'ALL' to download all variables\n <outFreq> - \"Frequency of output files. Type 'all' for all data in one file, 'daily' for one calendar day per file, or 'monthly' for one calendar month per file\nOutput: One or multiple csv data files (depending on chosen output frequency) with naming convention: AQMeshData_[stationID]_[dateRange]_[variables].csv\n\"\"\"\n\nimport pandas as pd\nfrom pandas.io.json import json_normalize\nimport json\nimport requests\nfrom dateutil.parser import parse\nimport pytz\nimport datetime as dt\nimport os\n\n\ndef main(stationID, startDate, endDate, variables, outFreq):\n\n pyDir = os.path.dirname(os.path.realpath(__file__))\n\n # PARAMETERS\n allFreqs = ['all', 'daily', 'monthly']\n allVars = ['AIRPRES', 'HUM', 'NO', 'NO2', 'O3', 'PARTICULE_COUNT', 'PM1',\n 'PM10', 'PM2.5', 'PMTOTAL', 'SO2', 'TEMP', 'VOLTAGE']\n colOrder = ['TBTimestamp', 'TETimestamp', 'SensorLabel', 'SensorName',\n 'PreScaled', 'Slope', 'Offset', 'Scaled', 'UnitName', 'Status']\n # READ IN ACCOUNT INFO\n codesFile = os.path.join(pyDir, 'AQMeshCodes.txt')\n assert os.path.exists(\n codesFile), \"Can't find file AQMeshCodes.txt in same directory as python script\"\n f = open(codesFile, 'r')\n lines = f.readlines()\n f.close()\n assert len(\n lines) == 3, \"AQMeshCodes.txt should contain exactly 3 lines: A comment line, Account ID, Licence Key\"\n accountID = lines[1].strip()\n licenceKey = lines[2].strip()\n # CHECK VARIABLES\n if variables == 'ALL':\n vars = allVars\n varStr = 'AllVars'\n else:\n vars = [s for s in variables.split()]\n for v in vars:\n assert v in allVars, \"Variable name '\" + v + \\\n \"' not valid. Full list of available variables: \" + \\\n str(allVars)\n varStr = '-'.join(vars)\n # CHECK OUTPUT FREQUENCY\n assert outFreq in allFreqs, \"Output frequency '\" + outFreq + \\\n \"' not valid. List of available options: \" + str(allFreqs)\n # GET VALID TIME RANGE AND CHECK START/END DATES\n # API documentation here: https://api.airmonitors.net/3.5/documentation?key=D73341AM\n try:\n url = \"https://api.airmonitors.net/3.5/GET/\" + accountID + \\\n \"/\" + licenceKey + \"/stationdata/Period/\" + stationID\n rawText = requests.get(url=url)\n rawJson = json.loads(rawText.text)\n except:\n print(\"Couldn't access data. Are you online? Are the codes in AQMeshCodes.txt correct?\")\n raise\n validStart = parse(rawJson[0]['FirstTETimestamp'])\n validEnd = parse(rawJson[0]['LastTBTimestamp'])\n if startDate == 'start':\n start = validStart\n else:\n try:\n start = pytz.utc.localize(parse(startDate))\n except:\n print(\"Could not interpret start date - check the format\")\n raise\n if endDate == 'end':\n end = validEnd\n else:\n try:\n end = pytz.utc.localize(parse(endDate))\n except:\n print(\"Could not interpret end date - check the format\")\n raise\n assert (start - validStart).seconds >= 0, \"The start date/time must come after \" + str(validStart)\n assert (\n validEnd - end).seconds >= 0, \"The end date/time must come before \" + str(validEnd)\n assert (\n end - start).seconds >= 0, \"The start date/time must come before the end date/time\"\n #####\n\n # SPLIT TIME RANGE INTO DAYS FOR DOWNLOAD\n startDay = dt.datetime(start.year, start.month, start.day, tzinfo=pytz.UTC)\n nextDay = startDay + dt.timedelta(days=1)\n dateDays = [start]\n while nextDay < end:\n dateDays.append(nextDay)\n nextDay += dt.timedelta(days=1)\n dateDays.append(end)\n startDays = dateDays[0:-1]\n endDays = dateDays[1:]\n if(len(endDays) > 1):\n for d, day in enumerate(endDays[:-1]):\n endDays[d] -= dt.timedelta(seconds=1)\n startDaysStr = [t.strftime('%Y-%m-%dT%H:%M:%S') for t in startDays]\n endDaysStr = [t.strftime('%Y-%m-%dT%H:%M:%S') for t in endDays]\n #####\n\n # LOAD IN DATA AND WRITE TO CSV\n allData = pd.DataFrame(columns=colOrder)\n print('Script started on ' + dt.datetime.now().strftime('%c'))\n for i in range(len(startDays)):\n foundData = False\n print('Attempting to download data from ' +\n startDays[i].strftime('%Y-%m-%d'))\n url = \"https://api.airmonitors.net/3.5/GET/\" + accountID + \"/\" + licenceKey + \\\n \"/stationdata/\" + startDaysStr[i] + \\\n \"/\" + endDaysStr[i] + \"/\" + stationID\n if variables != 'ALL':\n url = url + \"/\" + varStr\n rawText = requests.get(url=url)\n if not rawText.text == 'NO DATA WAS FOUND FOR YOUR GIVEN PARAMETERS':\n foundData = True\n rawJson = json.loads(rawText.text)\n rawDF = json_normalize(rawJson, record_path=['Channels'], meta=[\n 'TBTimestamp', 'TETimestamp'])\n procDF = rawDF.drop(['Channel'], axis=1) # Drop channel column\n procDF = procDF[colOrder] # Reorder columns\n # flip row so oldest date first\n procDF = procDF.reindex(index=procDF.index[::-1])\n if outFreq == 'daily':\n if foundData:\n fname = 'AQMeshData_' + stationID + '_' + \\\n startDays[i].strftime('%Y-%m-%d') + '_' + varStr + '.csv'\n print('Writing data to file ' + fname)\n procDF.to_csv(os.path.join(pyDir, fname), index=False)\n elif foundData:\n allData = allData.append(procDF)\n if not foundData:\n print('No data found for this day')\n if outFreq == 'monthly' and (startDays[i].month != (startDays[i] + dt.timedelta(days=1)).month or i == len(startDays) - 1):\n if allData.shape[0] == 0:\n print('No data found for this day')\n else:\n fname = 'AQMeshData_' + stationID + '_' + \\\n startDays[i].strftime('%Y-%m') + '_' + varStr + '.csv'\n print('Writing data to file ' + fname)\n allData.to_csv(os.path.join(pyDir, fname), index=False)\n allData = pd.DataFrame(columns=colOrder)\n if outFreq == 'all':\n if allData.shape[0] == 0:\n print('No data found in entire specified period')\n else:\n fname = 'AQMeshData_' + stationID + '_' + \\\n start.strftime('%Y-%m-%dT%H-%M-%S') + '_to_' + \\\n end.strftime('%Y-%m-%dT%H-%M-%S') + '_' + varStr + '.csv'\n print('Writing data to file ' + fname)\n allData.to_csv(os.path.join(pyDir, fname), index=False)\n print('Script ended on ' + dt.datetime.now().strftime('%c'))\n\n\nif __name__ == '__main__':\n # READ IN COMMAND LINE ARGUMENTS\n import argparse\n parser = argparse.ArgumentParser(description=\"Script to download data from an AQMesh pod using the API tool\",\n epilog=\"Example of use: ./getAQMeshData.py 1733150 2018-01-01T00:00:00 2018-01-31T23:59:59 'SO2 NO2' daily\")\n parser.add_argument(\n \"stationID\", help=\"Unique ID of the AQMesh station from which you want to download data, e.g. 1733150 for El Panama\", type=str)\n parser.add_argument(\n \"startDate\", help=\"Start date/time (UTC) of data to download, in format YYYY-MM-DDTHH:MM:SS, e.g. 2018-01-01T00:00:00. Or type 'start' to get data from the earliest possible time.\", type=str)\n parser.add_argument(\n \"endDate\", help=\"End date/time (UTC) of data to download, in format YYYY-MM-DDTHH:MM:SS, e.g. 2018-01-31T23:59:59. Or type 'end' to get data up to the latest possible time.\", type=str)\n parser.add_argument(\"variables\", help=\"List of variables to download, in single quotes separated by spaces, e.g. 'NO PM10 SO2'. Or specify 'ALL'\\\n to download all variables. Full list of available variables: AIRPRES, HUM, NO, NO2, O3, PARTICULE_COUNT, PM1, PM10, PM2.5, PMTOTAL, SO2,\\\n TEMP, VOLTAGE\", type=str)\n parser.add_argument(\"outFreq\", help=\"Frequency of output files. Type 'all' to generate one output file containing all data,\\\n 'daily' to generate one output file per calendar day, or 'monthly' to generate one output file per calendar month\", type=str)\n args = parser.parse_args()\n # CALL MAIN ROUTINE\n main(args.stationID, args.startDate,\n args.endDate, args.variables, args.outFreq)\n" ]
[ [ "pandas.io.json.json_normalize", "pandas.DataFrame" ] ]
JONGHYEOK667/Udacity_SelfDrivingCar_P4
[ "6cee1afbd33d704fd594c40ce80c893024f6d022" ]
[ "model.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# ---\n\n\n\n\n# In[1]: Setting GPU w/ Tensorflow running\n\n\nimport tensorflow as tf\nprint(tf.__version__)\n\nimport keras\nprint(keras.__version__)\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Memory rowth must be set before GPUs have been initialized\n print(e)\n \n# config = tf.ConfigProto()\n# config.gpu_option.per_process_gpu_memory_fraction = 0.4\n# session = tf.Session(config=config)\n# session.close()\n\n# In[2]: Import modules\n\n\nimport csv\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nfrom tensorflow import keras\nfrom sklearn.utils import shuffle\nimport seaborn as sns\n\n\n\n# In[3]: Load Data (Camera image and steering angle value)\n\n\n\nlines = []\ncenter_img, left_img, right_img, steer_val = [],[],[],[]\n\npath = '0.driving_data/driving_log.csv' ## data path need\n\nwith open(path) as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n if float(line[3]) != 0.0:\n center_img.append(line[0])\n left_img.append(line[1])\n right_img.append(line[2])\n steer_val.append(float(line[3]))\n \n else:\n prob = np.random.uniform()\n if prob <= 0.2:\n center_img.append(line[0])\n left_img.append(line[1])\n right_img.append(line[2])\n steer_val.append(float(line[3]))\n \n \n\n \n# In[4]: plot histogram for steering value on logging data (option)\n \n \nf = plt.hist(steer_val, bins = 40, edgecolor='black', linewidth = 1.2)\nplt.title('Collected data', fontsize = 10)\nplt.xlabel('Steering value (scaled)')\nplt.ylabel('counts')\n# plt.savefig('output_fig/1.collected_data.jpg')\n\nsteer_val = np.array(steer_val)\nsteer_val = np.around(steer_val,3)\n\n\n\n\n# In[5]: helper function for this project\n\n\n\ndef BGR2RGB(img):\n img_RGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img_RGB\n\n\ndef extract_array(img_path):\n images = []\n for line in img_path:\n source_path = line\n filename = source_path.split('\\\\')[-1]\n current_path = '0.driving_data/IMG/' + filename ## IMG path need!!\n image = cv2.imread(current_path)\n image = BGR2RGB(image)\n images.append(image)\n return images\n\n\n\ndef Flip(imgs):\n images = []\n for img in imgs:\n image = cv2.flip(img,1)\n images.append(image)\n \n return images\n\n\n\n\n# In[6]: Extract Image array data and callibration steering value \n# depends on each camera (left / center / right)\n\n\n## make left / center / right img, steer data\n\noffset = 0.1\n\nimages_left = extract_array(left_img)\nsteers_left = steer_val + offset\n\nimages_center = extract_array(center_img)\nsteers_center = steer_val\n\nimages_right = extract_array(right_img)\nsteers_right = steer_val - offset\n\n\n# In[7]: Flip image and steering value for augment input data\n\n\nimages_left_flip = Flip(images_left)\nsteers_left_flip = -steers_left\n\n\nimages_center_flip = Flip(images_center)\nsteers_center_flip = -steers_center\n\n\nimages_right_flip = Flip(images_right)\nsteers_right_flip = -steers_right\n\n\n# In[8]: select images and steering value for figure (option)\n\n\nindex = np.random.randint(len(steer_val)+1)\n\nimage_left = images_left[index]\nimage_center = images_center[index]\nimage_right = images_right[index]\n\nsteer_left = steers_left[index]\nsteer_center = steers_center[index]\nsteer_right = steers_right[index]\n\n\nimage_left_flip = images_left_flip[index]\nimage_center_flip = images_center_flip[index]\nimage_right_flip = images_right_flip[index]\n\nsteer_left_flip = steers_left_flip[index]\nsteer_center_flip = steers_center_flip[index]\nsteer_right_flip = steers_right_flip[index]\n\n\n\n# In[9]: plot sample input image and steering value (3 camera and fliped)\n# on logging data (option)\n\n\nf, ((ax1,ax2,ax3),(ax4,ax5,ax6)) = plt.subplots(2, 3, figsize=(15, 10))\nax1.imshow(image_left)\nax1.set_title('left, '+'steer('+str(np.around(steer_left,3))+')', fontsize=20)\nax2.imshow(image_center)\nax2.set_title('center, '+'steer('+str(np.around(steer_center,3))+')', fontsize=20)\nax3.imshow(image_right)\nax3.set_title('right, '+'steer('+str(np.around(steer_right,3))+')', fontsize=20)\nax4.imshow(image_left_flip)\nax4.set_title('left_flip, '+'steer('+str(np.around(steer_left_flip,3))+')', fontsize=20)\nax5.imshow(image_center_flip)\nax5.set_title('center_flip, '+'steer('+str(np.around(steer_center_flip,3))+')', fontsize=20)\nax6.imshow(image_right_flip)\nax6.set_title('right_flip, '+'steer('+str(np.around(steer_right_flip,3))+')', fontsize=20)\n\nf.tight_layout()\nf.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.0)\n# f.savefig('output_fig/2.Augmented_Image(BiasedRecover).jpg')\n\n\n\n# In[10]: Make input data (image array) and label (steering value)\n\n\nimages = images_left + images_center + images_right + images_left_flip + images_center_flip + images_right_flip\nimages = np.array(images)\n\nsteers = np.append(steers_left,steers_center)\nsteers = np.append(steers,steers_right)\nsteers = np.append(steers,steers_left_flip)\nsteers = np.append(steers,steers_center_flip)\nsteers = np.append(steers,steers_right_flip)\n\n# images = images_center + images_center_flip \n# images = np.array(images)\n\n# steers = np.append(steers_center,steers_center_flip)\n\n\n# In[11]: plot histogram for all steering value (option)\n\n\nf = plt.hist(steers, bins = 40, edgecolor='black', linewidth = 1.2)\nplt.title('Augmented data', fontsize = 10)\nplt.xlabel('Steering value (scaled)')\nplt.ylabel('counts')\n# plt.savefig('output_fig/3.Augmented_data.jpg')\n\n\n# In[12]: shffle data\n\n\nindex = np.random.choice(steers.shape[0], int(steers.shape[0]/1), replace = False)\nx_suffle = images[index]\ny_suffle = steers[index]\n\n\n# In[13]: split train / valid data\n\n\nx_train = x_suffle[0:int(7*steers.shape[0]/10)]\ny_train = y_suffle[0:int(7*steers.shape[0]/10)]\n\nx_val = x_suffle[int(7*steers.shape[0]/10):]\ny_val = y_suffle[int(7*steers.shape[0]/10):]\n\n\n\n\n# In[14]: define generator\n\n\ndef generator(feature, label, batch_size = 32):\n num_sample = feature.shape[0]\n while 1 :\n for offset in range(0, num_sample, batch_size):\n x_train = feature[offset:offset+batch_size]\n y_train = label[offset:offset+batch_size]\n\n yield (x_train, y_train) \n \n\n\n# In[15]: setting generator parameter and input\n\n\nbatch_size = 256\ntrain_generator = generator(x_train, y_train, batch_size = batch_size)\nval_generator = generator(x_val, y_val, batch_size = batch_size)\n\n\n# In[16]:stack CNN model\n\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Flatten, Dense, Lambda, Cropping2D\n\ndrop_rate = 0.4\n\nmodel = keras.models.Sequential([\n keras.layers.Cropping2D(cropping=((70,25),(0,0)),input_shape = (160, 320, 3)),\n keras.layers.Lambda(lambda x : x/255.0 - 0.5),\n keras.layers.Conv2D(filters = 24,kernel_size = (5,5), strides = (2,2), padding = 'same'),\n keras.layers.Activation('relu'),\n keras.layers.Conv2D(filters = 36,kernel_size = (5,5), strides = (2,2), padding = 'same'),\n keras.layers.Activation('relu'),\n keras.layers.Conv2D(filters = 48,kernel_size = (5,5), strides = (2,2), padding = 'same'),\n keras.layers.Activation('relu'),\n keras.layers.Conv2D(filters = 64,kernel_size = (3,3), strides = (1,1), padding = 'same'),\n keras.layers.Activation('relu'),\n keras.layers.Dropout(rate=0.2),\n keras.layers.Conv2D(filters = 64,kernel_size = (3,3), strides = (1,1), padding = 'same'),\n keras.layers.Activation('relu'),\n keras.layers.Flatten(),\n keras.layers.Dense(100, activation = 'relu'),\n keras.layers.Dropout(rate=0.4),\n keras.layers.Dense(50, activation = 'relu'),\n keras.layers.Dropout(rate=0.4),\n keras.layers.Dense(10, activation = 'relu'),\n keras.layers.Dense(1)\n])\n\n\nmodel.summary()\nmodel.compile(optimizer = keras.optimizers.Adam(),\n loss = 'mse', metrics = ['mae'])\n\n\n# In[17]:\n\n\nfrom math import ceil\n\nsteps_per_epoch = ceil(x_train.shape[0]/batch_size)\nvalidation_steps = ceil(x_val.shape[0]/batch_size)\n\n\n\n# In[18]:Fit model\n\n\nhistory = model.fit_generator(train_generator, \n steps_per_epoch = steps_per_epoch,\n validation_data = val_generator,\n validation_steps = validation_steps,\n epochs = 100,\n callbacks = [keras.callbacks.EarlyStopping(patience=5,monitor='val_loss',mode = 'min',verbose = 1 )],\n verbose = 1)\n\n\n# In[19]:plot history for training CNN network (option)\n\n\nhistory.history\nf, (ax1,ax2) = plt.subplots(1, 2, figsize=(15, 6))\nax1.plot(history.history['loss'], '-b', label = 'loss')\nax1.plot(history.history['val_loss'], '--b', label = 'val_loss')\nax2.plot(history.history['mae'], '-r', label = 'mae')\nax2.plot(history.history['val_mae'], '--r', label = 'val_mae')\nax1.set_title('loss (mse)', fontsize=20)\nax1.set_xlabel('Epoch')\nax2.set_title('mae', fontsize=20)\nax2.set_xlabel('Epoch')\nf.tight_layout()\nf.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1)\n# f.savefig('output_fig/4.Train_History.jpg')\n\n\n# In[20]:save trained model\n\n\nmodel.save('model_temp.h5')\n\n\n" ]
[ [ "tensorflow.keras.layers.Flatten", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.layers.Cropping2D", "tensorflow.config.experimental.list_logical_devices", "tensorflow.keras.layers.Lambda", "matplotlib.pyplot.ylabel", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.layers.Conv2D", "tensorflow.config.experimental.list_physical_devices", "numpy.append", "tensorflow.keras.layers.Activation", "matplotlib.pyplot.title", "tensorflow.keras.layers.Dense", "matplotlib.pyplot.hist", "numpy.around", "numpy.random.uniform", "tensorflow.config.experimental.set_memory_growth", "tensorflow.keras.layers.Dropout", "matplotlib.pyplot.subplots", "numpy.array", "matplotlib.pyplot.xlabel" ] ]
mikedeltalima/python-qinfer
[ "8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3" ]
[ "src/qinfer/utils.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n##\n# utils.py : some auxiliary functions\n##\n# © 2017, Chris Ferrie ([email protected]) and\n# Christopher Granade ([email protected]).\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n##\n\n## FEATURES ###################################################################\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\n## IMPORTS ####################################################################\n\nfrom builtins import range\n\nimport warnings\n\nimport numpy as np\nimport numpy.linalg as la\n\nfrom scipy.linalg import eigh\n\nfrom scipy.stats import logistic, binom\nfrom scipy.special import gammaln, gamma, expit, logit\nfrom scipy.linalg import sqrtm\n\nfrom numpy.testing import assert_almost_equal\n\nfrom qinfer._due import due, Doi\nfrom qinfer._exceptions import ApproximationWarning\n\n## FUNCTIONS ##################################################################\n\ndef get_qutip_module(required_version='3.2'):\n \"\"\"\n Attempts to return the qutip module, but\n silently returns ``None`` if it can't be\n imported, or doesn't have version at\n least ``required_version``.\n\n :param str required_version: Valid input to\n ``distutils.version.LooseVersion``.\n :return: The qutip module or ``None``.\n :rtype: ``module`` or ``NoneType``\n \"\"\"\n try:\n import qutip as qt\n from distutils.version import LooseVersion\n _qt_version = LooseVersion(qt.version.version)\n if _qt_version < LooseVersion(required_version):\n return None\n except ImportError:\n return None\n\n return qt\n\ndef check_qutip_version(required_version='3.2'):\n \"\"\"\n Returns ``true`` iff the imported qutip\n version exists and has ``LooseVersion``\n of at least ``required_version``.\n\n :param str required_version: Valid input to\n ``distutils.version.LooseVersion``.\n :rtype: ``bool``\n \"\"\"\n try:\n qt = get_qutip_module(required_version)\n return qt is not None\n except:\n # In any other case (including something other\n # than ImportError) we say it's not good enough\n return False\n\n\ndef binomial_pdf(N,n,p):\n r\"\"\"\n Returns the PDF of the binomial distribution\n :math:`\\operatorname{Bin}(N, p)` evaluated at :math:`n`.\n \"\"\"\n return binom(N, p).pmf(n)\n\ndef multinomial_pdf(n,p):\n r\"\"\"\n Returns the PDF of the multinomial distribution\n :math:`\\operatorname{Multinomial}(N, n, p)=\n \\frac{N!}{n_1!\\cdots n_k!}p_1^{n_1}\\cdots p_k^{n_k}`\n\n :param np.ndarray n : Array of outcome integers\n of shape ``(sides, ...)`` where sides is the number of\n sides on the dice and summing over this index indicates\n the number of rolls for the given experiment.\n :param np.ndarray p : Array of (assumed) probabilities\n of shape ``(sides, ...)`` or ``(sides-1,...)``\n with the rest of the dimensions the same as ``n``.\n If ``sides-1``, the last probability is chosen so that the\n probabilities of all sides sums to 1. If ``sides``\n is the last index, these probabilities are assumed\n to sum to 1.\n\n Note that the numbers of experiments don't need to be given because\n they are implicit in the sum over the 0 index of ``n``.\n \"\"\"\n\n # work in log space to avoid overflow\n log_N_fac = gammaln(np.sum(n, axis=0) + 1)[np.newaxis,...]\n log_n_fac_sum = np.sum(gammaln(n + 1), axis=0)\n\n # since working in log space, we need special\n # consideration at p=0. deal with p=0, n>0 later.\n def nlogp(n,p):\n result = np.zeros(p.shape)\n mask = p!=0\n result[mask] = n[mask] * np.log(p[mask])\n return result\n\n if p.shape[0] == n.shape[0] - 1:\n ep = np.empty(n.shape)\n ep[:p.shape[0],...] = p\n ep[-1,...] = 1-np.sum(p,axis=0)\n else:\n ep = p\n log_p_sum = np.sum(nlogp(n, ep), axis=0)\n\n probs = np.exp(log_N_fac - log_n_fac_sum + log_p_sum)\n\n # if n_k>0 but p_k=0, the whole probability must be 0\n mask = np.sum(np.logical_and(n!=0, ep==0), axis=0) == 0\n probs = mask * probs\n\n return probs[0,...]\n\ndef sample_multinomial(N, p, size=None):\n r\"\"\"\n Draws fixed number of samples N from different\n multinomial distributions (with the same number dice sides).\n\n :param int N: How many samples to draw from each distribution.\n :param np.ndarray p: Probabilities specifying each distribution.\n Sum along axis 0 should be 1.\n :param size: Output shape. ``int`` or tuple of\n ``int``s. If the given shape is,\n e.g., ``(m, n, k)``, then m * n * k samples are drawn\n for each distribution.\n Default is None, in which case a single value\n is returned for each distribution.\n\n :rtype: np.ndarray\n :return: Array of shape ``(p.shape, size)`` or p.shape if\n size is ``None``.\n \"\"\"\n # ensure s is array\n s = np.array([1]) if size is None else np.array([size]).flatten()\n\n def take_samples(ps):\n # we have to flatten to make apply_along_axis work.\n return np.random.multinomial(N, ps, np.prod(s)).flatten()\n\n # should have shape (prod(size)*ps.shape[0], ps.shape[1:])\n samples = np.apply_along_axis(take_samples, 0, p)\n # should have shape (size, p.shape)\n samples = samples.reshape(np.concatenate([s, p.shape]))\n # should have shape (p.shape, size)\n samples = samples.transpose(np.concatenate(\n [np.arange(s.ndim, p.ndim+s.ndim), np.arange(s.ndim)]\n ))\n\n if size is None:\n # get rid of trailing singleton dimension.\n samples = samples[...,0]\n\n return samples\n\n\ndef outer_product(vec):\n r\"\"\"\n Returns the outer product of a vector :math:`v`\n with itself, :math:`v v^\\T`.\n \"\"\"\n return (\n np.dot(vec[:, np.newaxis], vec[np.newaxis, :])\n if len(vec.shape) == 1 else\n np.dot(vec, vec.T)\n )\n\ndef particle_meanfn(weights, locations, fn=None):\n r\"\"\"\n Returns the mean of a function :math:`f` over model\n parameters.\n\n :param numpy.ndarray weights: Weights of each particle.\n :param numpy.ndarray locations: Locations of each\n particle.\n :param callable fn: Function of model parameters to\n take the mean of. If `None`, the identity function\n is assumed.\n \"\"\"\n warnings.warn('particle_meanfn is deprecated, please use distributions.ParticleDistribution',\n DeprecationWarning)\n fn_vals = fn(locations) if fn is not None else locations\n return np.sum(weights * fn_vals.transpose([1, 0]),\n axis=1)\n\n\ndef particle_covariance_mtx(weights,locations):\n \"\"\"\n Returns an estimate of the covariance of a distribution\n represented by a given set of SMC particle.\n\n :param weights: An array containing the weights of each\n particle.\n :param location: An array containing the locations of\n each particle.\n :rtype: :class:`numpy.ndarray`, shape\n ``(n_modelparams, n_modelparams)``.\n :returns: An array containing the estimated covariance matrix.\n \"\"\"\n # TODO: add shapes to docstring.\n warnings.warn('particle_covariance_mtx is deprecated, please use distributions.ParticleDistribution',\n DeprecationWarning)\n\n # Find the mean model vector, shape (n_modelparams, ).\n mu = particle_meanfn(weights, locations)\n\n # Transpose the particle locations to have shape\n # (n_modelparams, n_particles).\n xs = locations.transpose([1, 0])\n # Give a shorter name to the particle weights, shape (n_particles, ).\n ws = weights\n\n cov = (\n # This sum is a reduction over the particle index, chosen to be\n # axis=2. Thus, the sum represents an expectation value over the\n # outer product $x . x^T$.\n #\n # All three factors have the particle index as the rightmost\n # index, axis=2. Using the Einstein summation convention (ESC),\n # we can reduce over the particle index easily while leaving\n # the model parameter index to vary between the two factors\n # of xs.\n #\n # This corresponds to evaluating A_{m,n} = w_{i} x_{m,i} x_{n,i}\n # using the ESC, where A_{m,n} is the temporary array created.\n np.einsum('i,mi,ni', ws, xs, xs)\n # We finish by subracting from the above expectation value\n # the outer product $mu . mu^T$.\n - np.dot(mu[..., np.newaxis], mu[np.newaxis, ...])\n )\n\n # The SMC approximation is not guaranteed to produce a\n # positive-semidefinite covariance matrix. If a negative eigenvalue\n # is produced, we should warn the caller of this.\n assert np.all(np.isfinite(cov))\n if not np.all(la.eig(cov)[0] >= 0):\n warnings.warn('Numerical error in covariance estimation causing positive semidefinite violation.', ApproximationWarning)\n\n return cov\n\n\ndef ellipsoid_volume(A=None, invA=None):\n \"\"\"\n Returns the volume of an ellipsoid given either its\n matrix or the inverse of its matrix.\n \"\"\"\n\n if invA is None and A is None:\n raise ValueError(\"Must pass either inverse(A) or A.\")\n\n if invA is None and A is not None:\n invA = la.inv(A)\n\n # Find the unit sphere volume.\n # http://en.wikipedia.org/wiki/Unit_sphere#General_area_and_volume_formulas\n n = invA.shape[0]\n Vn = (np.pi ** (n/2)) / gamma(1 + (n/2))\n\n return Vn * la.det(sqrtm(invA))\n\[email protected](\n Doi(\"10.1016/j.dam.2007.02.013\"),\n description=\"Khachiyan algorithm\",\n tags=[\"implementation\"]\n)\ndef mvee(points, tol=0.001):\n \"\"\"\n Returns the minimum-volume enclosing ellipse (MVEE)\n of a set of points, using the Khachiyan algorithm.\n \"\"\"\n\n # This function is a port of the matlab function by\n # Nima Moshtagh found here:\n # https://www.mathworks.com/matlabcentral/fileexchange/9542-minimum-volume-enclosing-ellipsoid\n # with accompanying writup here:\n # https://www.researchgate.net/profile/Nima_Moshtagh/publication/254980367_MINIMUM_VOLUME_ENCLOSING_ELLIPSOIDS/links/54aab5260cf25c4c472f487a.pdf\n\n N, d = points.shape\n\n Q = np.zeros([N,d+1])\n Q[:,0:d] = points[0:N,0:d]\n Q[:,d] = np.ones([1,N])\n\n Q = np.transpose(Q)\n points = np.transpose(points)\n count = 1\n err = 1\n u = (1/N) * np.ones(shape = (N,))\n\n while err > tol:\n\n X = np.dot(np.dot(Q, np.diag(u)), np.transpose(Q))\n M = np.diag( np.dot(np.dot(np.transpose(Q), la.inv(X)),Q))\n jdx = np.argmax(M)\n step_size = (M[jdx] - d - 1)/((d+1)*(M[jdx] - 1))\n new_u = (1 - step_size)*u\n new_u[jdx] = new_u[jdx] + step_size\n count = count + 1\n err = la.norm(new_u - u)\n u = new_u\n\n U = np.diag(u)\n c = np.dot(points,u)\n A = (1/d) * la.inv(np.dot(np.dot(points,U), np.transpose(points)) - np.outer(c,c) )\n return A, np.transpose(c)\n\ndef in_ellipsoid(x, A, c):\n \"\"\"\n Determines which of the points ``x`` are in the\n closed ellipsoid with shape matrix ``A`` centered at ``c``.\n For a single point ``x``, this is computed as\n\n .. math::\n (c-x)^T\\cdot A^{-1}\\cdot (c-x) \\leq 1\n\n :param np.ndarray x: Shape ``(n_points, dim)`` or ``n_points``.\n :param np.ndarray A: Shape ``(dim, dim)``, positive definite\n :param np.ndarray c: Shape ``(dim)``\n :return: `bool` or array of bools of length ``n_points``\n \"\"\"\n if x.ndim == 1:\n y = c - x\n return np.einsum('j,jl,l', y, np.linalg.inv(A), y) <= 1\n else:\n y = c[np.newaxis,:] - x\n return np.einsum('ij,jl,il->i', y, np.linalg.inv(A), y) <= 1\n\ndef uniquify(seq):\n \"\"\"\n Returns the unique elements of a sequence ``seq``.\n \"\"\"\n #from http://stackoverflow.com/a/480227/1205799\n seen = set()\n seen_add = seen.add\n return [ x for x in seq if x not in seen and not seen_add(x)]\n\ndef assert_sigfigs_equal(x, y, sigfigs=3):\n \"\"\"\n Tests if all elements in x and y\n agree up to a certain number of\n significant figures.\n\n :param np.ndarray x: Array of numbers.\n :param np.ndarray y: Array of numbers you want to\n be equal to ``x``.\n :param int sigfigs: How many significant\n figures you demand that they share.\n Default is 3.\n \"\"\"\n # determine which power of 10 best describes x\n xpow = np.floor(np.log10(x))\n # now rescale 1 \\leq x < 9\n x = x * 10**(- xpow)\n # scale y by the same amount\n y = y * 10**(- xpow)\n\n # now test if abs(x-y) < 0.5 * 10**(-sigfigs)\n assert_almost_equal(x, y, sigfigs)\n\ndef format_uncertainty(value, uncertianty, scinotn_break=4):\n \"\"\"\n Given a value and its uncertianty, format as a LaTeX string\n for pretty-printing.\n\n :param int scinotn_break: How many decimal points to print\n before breaking into scientific notation.\n \"\"\"\n if uncertianty == 0:\n # Return the exact number, without the ± annotation as a fixed point\n # number, since all digits matter.\n # FIXME: this assumes a precision of 6; need to select that dynamically.\n return \"{0:f}\".format(value)\n else:\n # Return a string of the form \"0.00 \\pm 0.01\".\n mag_unc = int(np.log10(np.abs(uncertianty)))\n # Zero should be printed as a single digit; that is, as wide as str \"1\".\n mag_val = int(np.log10(np.abs(value))) if value != 0 else 0\n n_digits = max(mag_val - mag_unc, 0)\n\n\n if abs(mag_val) < abs(mag_unc) and abs(mag_unc) > scinotn_break:\n # We're formatting something close to zero, so recale uncertianty\n # accordingly.\n scale = 10**mag_unc\n return r\"({{0:0.{0}f}} \\pm {{1:0.{0}f}}) \\times 10^{{2}}\".format(\n n_digits\n ).format(\n value / scale,\n uncertianty / scale,\n mag_unc\n )\n if abs(mag_val) <= scinotn_break:\n return r\"{{0:0.{n_digits}f}} \\pm {{1:0.{n_digits}f}}\".format(n_digits=n_digits).format(value, uncertianty)\n else:\n scale = 10**mag_val\n return r\"({{0:0.{0}f}} \\pm {{1:0.{0}f}}) \\times 10^{{2}}\".format(\n n_digits\n ).format(\n value / scale,\n uncertianty / scale,\n mag_val\n )\n\ndef compactspace(scale, n):\n r\"\"\"\n Returns points :math:`x` spaced in the open interval\n :math:`(-\\infty, \\infty)` by linearly spacing in the compactified\n coordinate :math:`s(x) = e^{-\\alpha x} / (1 + e^{-\\alpha x})^2`,\n where :math:`\\alpha` is a scale factor.\n \"\"\"\n logit = logistic(scale=scale).ppf\n compact_xs = np.linspace(0, 1, n + 2)[1:-1]\n return logit(compact_xs)\n\ndef to_simplex(y):\n r\"\"\"\n Interprets the last index of ``y`` as stick breaking fractions \n in logit space and returns a non-negative array of \n the same shape where the last dimension always sums to unity.\n \n A unit simplex is a list of non-negative numbers :math:`(x_1,...,x_K)`\n that sum to one, :math:`\\sum_{k=1}^K x_k=1`, for example, the \n probabilities of an K-sided die.\n It is sometimes desireable to parameterize this object with variables \n that are unconstrained and \"decorrelated\".\n To this end, we imagine :math:`\\vec{x}` as a partition of the unit \n stick :math:`[0,1]` with :math:`K-1` break points between \n :math:`K` successive intervals of respective lengths :math:`(x_1,...,x_K)`.\n Instead of storing the interval lengths, we start from the left-most break \n point and iteratively store the breaking fractions, :math:`z_k`, \n of the remaining stick.\n This gives the formula \n :math:`z_k=x_k / (1-\\sum_{k'=1}^{k-1}x_k)` with the convention \n :math:`x_0:=0`, \n which has an inverse formula :math:`x_k = z_k(1-z_{k-1})\\cdots(1-z_1)`.\n Note that :math:`z_K=1` since the last stick is not broken; this is the \n result of the redundant information imposed by :math:`\\sum_{k=1}^K x_k=1`.\n To unbound the parameters :math:`z_k` into the real line, \n we pass through the logit function, \n :math:`\\operatorname{logit}(p)=\\log\\frac{p}{1-p}`, \n to end up with the parameterization \n :math:`y_k=\\operatorname{logit}(z_k)+\\log(K-k)`, with the convention \n :math:`y_K=0`.\n The shift by :math:`\\log(K-k)` is largely asthetic and causes the \n uniform simplex :math:`\\vec{x}=(1/K,1/K,...,1/K)` to be mapped to \n :math:`\\vec{x}=(0,0,...,0)`.\n\n Inverse to :func:`from_simplex`.\n\n :param np.ndarray: Array of logit space stick breaking \n fractions along the last index.\n\n :rtype: ``np.ndarray``\n \"\"\"\n n = y.shape[-1]\n # z are the stick breaking fractions in [0,1]\n z = expit(y - np.log(n - np.arange(1, n+1)))\n x = np.empty(y.shape)\n x[..., 0] = z[..., 0]\n x[..., 1:] = z[..., 1:] * (1 - z[..., :-1]).cumprod(axis=-1)\n return x\n\ndef from_simplex(x):\n r\"\"\"\n Inteprets the last index of x as unit simplices and returns a\n real array of the sampe shape in logit space.\n\n Inverse to :func:`to_simplex` ; see that function for more details.\n\n :param np.ndarray: Array of unit simplices along the last index.\n \n :rtype: ``np.ndarray``\n \"\"\"\n n = x.shape[-1]\n # z are the stick breaking fractions in [0,1]\n # the last one is always 1, so don't worry about it\n z = np.empty(shape=x.shape)\n z[..., 0] = x[..., 0]\n z[..., 1:-1] = x[..., 1:-1] / (1 - x[..., :-2].cumsum(axis=-1))\n\n # now z are the logit-transformed breaking fractions\n z[..., :-1] = logit(z[..., :-1]) - logit(1 / (n - np.arange(n-1, dtype=np.float)))\n # set this to 0 manually to avoid subtracting inf-inf\n z[..., -1] = 0\n return z\n\ndef pretty_time(secs, force_h=False, force_m=False):\n if secs > 86400:\n return \"{d} days, \".format(d=int(secs//86400)) + pretty_time(secs % 86400, force_h=True)\n elif force_h or secs > 3600:\n return \"{h}:\".format(h=int(secs//3600)) + pretty_time(secs % 3600, force_m=True)\n elif force_m or secs > 60:\n return (\n \"{m:0>2}:{s:0>2}\" if force_m else \"{m}:{s:0>2}\"\n ).format(m=int(secs//60), s=int(secs%60))\n else:\n return \"{0:0.2f} seconds\".format(secs)\n\ndef safe_shape(arr, idx=0, default=1):\n shape = np.shape(arr)\n return shape[idx] if idx < len(shape) else default\n\ndef join_struct_arrays(arrays):\n \"\"\"\n Takes a list of possibly structured arrays, concatenates their\n dtypes, and returns one big array with that dtype. Does the\n inverse of ``separate_struct_array``.\n\n :param list arrays: List of ``np.ndarray``s\n \"\"\"\n # taken from http://stackoverflow.com/questions/5355744/numpy-joining-structured-arrays\n sizes = np.array([a.itemsize for a in arrays])\n offsets = np.r_[0, sizes.cumsum()]\n shape = arrays[0].shape\n joint = np.empty(shape + (offsets[-1],), dtype=np.uint8)\n for a, size, offset in zip(arrays, sizes, offsets):\n joint[...,offset:offset+size] = np.atleast_1d(a).view(np.uint8).reshape(shape + (size,))\n dtype = sum((a.dtype.descr for a in arrays), [])\n return joint.ravel().view(dtype)\n\ndef separate_struct_array(array, dtypes):\n \"\"\"\n Takes an array with a structured dtype, and separates it out into\n a list of arrays with dtypes coming from the input ``dtypes``.\n Does the inverse of ``join_struct_arrays``.\n\n :param np.ndarray array: Structured array.\n :param dtypes: List of ``np.dtype``, or just a ``np.dtype`` and the number of\n them is figured out automatically by counting bytes.\n \"\"\"\n try:\n offsets = np.cumsum([np.dtype(dtype).itemsize for dtype in dtypes])\n except TypeError:\n dtype_size = np.dtype(dtypes).itemsize\n num_fields = int(array.nbytes / (array.size * dtype_size))\n offsets = np.cumsum([dtype_size] * num_fields)\n dtypes = [dtypes] * num_fields\n offsets = np.concatenate([[0], offsets]).astype(int)\n uint_array = array.view(np.uint8).reshape(array.shape + (-1,))\n return [\n uint_array[..., offsets[idx]:offsets[idx+1]].flatten().view(dtype)\n for idx, dtype in enumerate(dtypes)\n ]\n\ndef sqrtm_psd(A, est_error=True, check_finite=True):\n \"\"\"\n Returns the matrix square root of a positive semidefinite matrix,\n truncating negative eigenvalues.\n \"\"\"\n w, v = eigh(A, check_finite=check_finite)\n mask = w <= 0\n w[mask] = 0\n np.sqrt(w, out=w)\n A_sqrt = (v * w).dot(v.conj().T)\n\n if est_error:\n return A_sqrt, np.linalg.norm(np.dot(A_sqrt, A_sqrt) - A, 'fro')\n else:\n return A_sqrt\n\ndef decorate_init(init_decorator):\n \"\"\"\n Given a class definition and a decorator that acts on methods,\n applies that decorator to the class' __init__ method.\n Useful for decorating __init__ while still allowing __init__ to be\n inherited.\n \"\"\"\n\n def class_decorator(cls):\n cls.__init__ = init_decorator(cls.__init__)\n return cls\n\n return class_decorator\n\n#==============================================================================\n#Test Code\nif __name__ == \"__main__\":\n\n from mpl_toolkits.mplot3d import Axes3D\n from mpl_toolkits.mplot3d.art3d import Poly3DCollection\n import matplotlib.pyplot as plt\n from scipy.spatial import Delaunay\n\n #some random points\n points = np.array([[ 0.53135758, -0.25818091, -0.32382715],\n [ 0.58368177, -0.3286576, -0.23854156,],\n [ 0.18741533, 0.03066228, -0.94294771],\n [ 0.65685862, -0.09220681, -0.60347573],\n [ 0.63137604, -0.22978685, -0.27479238],\n [ 0.59683195, -0.15111101, -0.40536606],\n [ 0.68646128, 0.0046802, -0.68407367],\n [ 0.62311759, 0.0101013, -0.75863324]])\n\n # compute mvee\n A, centroid = mvee(points)\n print(A)\n\n # point it and some other stuff\n U, D, V = la.svd(A)\n\n rx, ry, rz = [1/np.sqrt(d) for d in D]\n u, v = np.mgrid[0:2*np.pi:20j,-np.pi/2:np.pi/2:10j]\n\n x=rx*np.cos(u)*np.cos(v)\n y=ry*np.sin(u)*np.cos(v)\n z=rz*np.sin(v)\n\n for idx in range(x.shape[0]):\n for idy in range(y.shape[1]):\n x[idx,idy],y[idx,idy],z[idx,idy] = np.dot(np.transpose(V),np.array([x[idx,idy],y[idx,idy],z[idx,idy]])) + centroid\n\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(points[:,0],points[:,1],points[:,2])\n ax.plot_surface(x, y, z, cstride = 1, rstride = 1, alpha = 0.1)\n plt.show()\n\ndef binom_est_p(n, N, hedge=float(0)):\n r\"\"\"\n Given a number of successes :math:`n` and a number of trials :math:`N`,\n estimates the binomial distribution parameter :math:`p` using the\n hedged maximum likelihood estimator of [FB12]_.\n\n :param n: Number of successes.\n :type n: `numpy.ndarray` or `int`\n :param int N: Number of trials.\n :param float hedge: Hedging parameter :math:`\\beta`.\n :rtype: `float` or `numpy.ndarray`.\n :return: The estimated binomial distribution parameter :math:`p` for each\n value of :math:`n`.\n \"\"\"\n return (n + hedge) / (N + 2 * hedge)\n\ndef binom_est_error(p, N, hedge = float(0)):\n r\"\"\"\n \"\"\"\n\n # asymptotic np.sqrt(p * (1 - p) / N)\n return np.sqrt(p*(1-p)/(N+2*hedge+1))\n" ]
[ [ "numpy.ones", "numpy.sum", "numpy.diag", "scipy.special.logit", "numpy.dtype", "scipy.special.gamma", "numpy.log", "numpy.apply_along_axis", "numpy.isfinite", "numpy.testing.assert_almost_equal", "numpy.transpose", "scipy.stats.binom", "matplotlib.pyplot.figure", "numpy.logical_and", "numpy.abs", "numpy.cos", "scipy.linalg.eigh", "numpy.log10", "numpy.linspace", "numpy.linalg.eig", "scipy.special.gammaln", "numpy.zeros", "numpy.dot", "numpy.argmax", "numpy.arange", "numpy.prod", "numpy.einsum", "numpy.array", "numpy.linalg.norm", "numpy.cumsum", "numpy.empty", "scipy.stats.logistic", "numpy.linalg.inv", "numpy.exp", "numpy.linalg.svd", "numpy.atleast_1d", "matplotlib.pyplot.show", "numpy.shape", "numpy.sqrt", "numpy.sin", "numpy.concatenate", "numpy.outer", "scipy.linalg.sqrtm" ] ]
TheJacksonLaboratory/JAX_Microbiome_Workshop
[ "800dffdec753b7b1c2a08b0e0ef29d85e54eb5cc" ]
[ "scripts/create-table.py" ]
[ "import pandas as pd\nimport sys\nimport os\nimport re\nimport shutil\nimport subprocess\n\ninputs=sys.argv[1]\noutput=sys.argv[2]\n#names=[\"Username\", \"IP\", \"Terminal 1\", \"Terminal 2\", \"RStudio\", \"Jupyter\", \"Download Files\"]\ndf = pd.read_csv(inputs, sep=\",\", header=None, names=[\"Username\", \"IP\"])\ndf['Terminal 1'] = df[\"IP\"].map(lambda beta_value: \"<a href='{}/terminal/' target='_blank'>terminal 1</a>\".format(beta_value))\ndf['Terminal 2'] = df[\"IP\"].map(lambda beta_value: \"<a href='{}/terminal2/' target='_blank'>terminal 2</a>\".format(beta_value))\ndf['RStudio'] = df[\"IP\"].map(lambda beta_value: \"<a href='{}/rstudio' target='_blank'>rstudio</a>\".format(beta_value))\ndf['Download Files'] = df[\"IP\"].map(lambda beta_value: \"<a href='{}' target='_blank'>download files</a>\".format(beta_value))\n\nprint(df)\nprint(df.columns)\ndel df['IP']\ndf.to_csv(\"csv-intermediate-file-csv\", index=False, header=[\"User\", \"Terminal 1\", \"Terminal 2\", \"RStudio\", \"Download Files\"])\n\ndef csvtomd(output):\n return subprocess.Popen(\n 'csvtomd csv-intermediate-file-csv > {}; rm csv-intermediate-file-csv'.format(output),\n stdout=subprocess.PIPE, shell=True)\ncsvtomd(output)\n" ]
[ [ "pandas.read_csv" ] ]
samuelfneumann/rllab
[ "ccd80547380b25344b1b091e730beb646d12d192" ]
[ "rllab/optimizers/conjugate_gradient_optimizer.py" ]
[ "from rllab.misc import ext\nfrom rllab.misc import krylov\nfrom rllab.misc import logger\nfrom rllab.core.serializable import Serializable\nimport theano.tensor as TT\nimport theano\nimport itertools\nimport numpy as np\nfrom rllab.misc.ext import sliced_fun\nfrom ast import Num\n\n\nclass PerlmutterHvp(Serializable):\n\n def __init__(self, num_slices=1):\n Serializable.quick_init(self, locals())\n self.target = None\n self.reg_coeff = None\n self.opt_fun = None\n self._num_slices = num_slices\n\n def update_opt(self, f, target, inputs, reg_coeff):\n self.target = target\n self.reg_coeff = reg_coeff\n params = target.get_params(trainable=True)\n\n constraint_grads = theano.grad(\n f, wrt=params, disconnected_inputs='warn')\n xs = tuple([ext.new_tensor_like(\"%s x\" % p.name, p) for p in params])\n\n def Hx_plain():\n Hx_plain_splits = TT.grad(\n TT.sum([TT.sum(g * x)\n for g, x in zip(constraint_grads, xs)]),\n wrt=params,\n disconnected_inputs='warn'\n )\n return TT.concatenate([TT.flatten(s) for s in Hx_plain_splits])\n\n self.opt_fun = ext.lazydict(\n f_Hx_plain=lambda: ext.compile_function(\n inputs=inputs + xs,\n outputs=Hx_plain(),\n log_name=\"f_Hx_plain\",\n ),\n )\n\n def build_eval(self, inputs):\n def eval(x):\n xs = tuple(self.target.flat_to_params(x, trainable=True))\n ret = sliced_fun(self.opt_fun[\"f_Hx_plain\"], self._num_slices)(\n inputs, xs) + self.reg_coeff * x\n return ret\n\n return eval\n\n\nclass FiniteDifferenceHvp(Serializable):\n\n def __init__(self, base_eps=1e-8, symmetric=True, grad_clip=None, num_slices=1):\n Serializable.quick_init(self, locals())\n self.base_eps = base_eps\n self.symmetric = symmetric\n self.grad_clip = grad_clip\n self._num_slices = num_slices\n\n def update_opt(self, f, target, inputs, reg_coeff):\n self.target = target\n self.reg_coeff = reg_coeff\n\n params = target.get_params(trainable=True)\n\n constraint_grads = theano.grad(\n f, wrt=params, disconnected_inputs='warn')\n flat_grad = ext.flatten_tensor_variables(constraint_grads)\n\n def f_Hx_plain(*args):\n inputs_ = args[:len(inputs)]\n xs = args[len(inputs):]\n flat_xs = np.concatenate([np.reshape(x, (-1,)) for x in xs])\n param_val = self.target.get_param_values(trainable=True)\n eps = np.cast['float32'](\n self.base_eps / (np.linalg.norm(param_val) + 1e-8))\n self.target.set_param_values(\n param_val + eps * flat_xs, trainable=True)\n flat_grad_dvplus = self.opt_fun[\"f_grad\"](*inputs_)\n if self.symmetric:\n self.target.set_param_values(\n param_val - eps * flat_xs, trainable=True)\n flat_grad_dvminus = self.opt_fun[\"f_grad\"](*inputs_)\n hx = (flat_grad_dvplus - flat_grad_dvminus) / (2 * eps)\n self.target.set_param_values(param_val, trainable=True)\n else:\n self.target.set_param_values(param_val, trainable=True)\n flat_grad = self.opt_fun[\"f_grad\"](*inputs_)\n hx = (flat_grad_dvplus - flat_grad) / eps\n return hx\n\n self.opt_fun = ext.lazydict(\n f_grad=lambda: ext.compile_function(\n inputs=inputs,\n outputs=flat_grad,\n log_name=\"f_grad\",\n ),\n f_Hx_plain=lambda: f_Hx_plain,\n )\n\n def build_eval(self, inputs):\n def eval(x):\n xs = tuple(self.target.flat_to_params(x, trainable=True))\n ret = sliced_fun(self.opt_fun[\"f_Hx_plain\"], self._num_slices)(\n inputs, xs) + self.reg_coeff * x\n return ret\n\n return eval\n\n\nclass ConjugateGradientOptimizer(Serializable):\n \"\"\"\n Performs constrained optimization via line search. The search direction is computed using a conjugate gradient\n algorithm, which gives x = A^{-1}g, where A is a second order approximation of the constraint and g is the gradient\n of the loss function.\n \"\"\"\n\n def __init__(\n self,\n cg_iters=10,\n reg_coeff=1e-5,\n subsample_factor=1.,\n backtrack_ratio=0.8,\n max_backtracks=15,\n accept_violation=False,\n hvp_approach=None,\n num_slices=1):\n \"\"\"\n\n :param cg_iters: The number of CG iterations used to calculate A^-1 g\n :param reg_coeff: A small value so that A -> A + reg*I\n :param subsample_factor: Subsampling factor to reduce samples when using \"conjugate gradient. Since the\n computation time for the descent direction dominates, this can greatly reduce the overall computation time.\n :param accept_violation: whether to accept the descent step if it violates the line search condition after\n exhausting all backtracking budgets\n :return:\n \"\"\"\n Serializable.quick_init(self, locals())\n self._cg_iters = cg_iters\n self._reg_coeff = reg_coeff\n self._subsample_factor = subsample_factor\n self._backtrack_ratio = backtrack_ratio\n self._max_backtracks = max_backtracks\n self._num_slices = num_slices\n\n self._opt_fun = None\n self._target = None\n self._max_constraint_val = None\n self._constraint_name = None\n self._accept_violation = accept_violation\n if hvp_approach is None:\n hvp_approach = PerlmutterHvp(num_slices)\n self._hvp_approach = hvp_approach\n\n def update_opt(self, loss, target, leq_constraint, inputs, extra_inputs=None, constraint_name=\"constraint\", *args,\n **kwargs):\n \"\"\"\n :param loss: Symbolic expression for the loss function.\n :param target: A parameterized object to optimize over. It should implement methods of the\n :class:`rllab.core.paramerized.Parameterized` class.\n :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.\n :param inputs: A list of symbolic variables as inputs, which could be subsampled if needed. It is assumed\n that the first dimension of these inputs should correspond to the number of data points\n :param extra_inputs: A list of symbolic variables as extra inputs which should not be subsampled\n :return: No return value.\n \"\"\"\n\n inputs = tuple(inputs)\n if extra_inputs is None:\n extra_inputs = tuple()\n else:\n extra_inputs = tuple(extra_inputs)\n\n constraint_term, constraint_value = leq_constraint\n\n params = target.get_params(trainable=True)\n grads = theano.grad(loss, wrt=params, disconnected_inputs='warn')\n flat_grad = ext.flatten_tensor_variables(grads)\n\n self._hvp_approach.update_opt(f=constraint_term, target=target, inputs=inputs + extra_inputs,\n reg_coeff=self._reg_coeff)\n\n self._target = target\n self._max_constraint_val = constraint_value\n self._constraint_name = constraint_name\n\n self._opt_fun = ext.lazydict(\n f_loss=lambda: ext.compile_function(\n inputs=inputs + extra_inputs,\n outputs=loss,\n log_name=\"f_loss\",\n ),\n f_grad=lambda: ext.compile_function(\n inputs=inputs + extra_inputs,\n outputs=flat_grad,\n log_name=\"f_grad\",\n ),\n f_constraint=lambda: ext.compile_function(\n inputs=inputs + extra_inputs,\n outputs=constraint_term,\n log_name=\"constraint\",\n ),\n f_loss_constraint=lambda: ext.compile_function(\n inputs=inputs + extra_inputs,\n outputs=[loss, constraint_term],\n log_name=\"f_loss_constraint\",\n ),\n )\n\n def loss(self, inputs, extra_inputs=None):\n inputs = tuple(inputs)\n if extra_inputs is None:\n extra_inputs = tuple()\n return sliced_fun(self._opt_fun[\"f_loss\"], self._num_slices)(inputs, extra_inputs)\n\n def constraint_val(self, inputs, extra_inputs=None):\n inputs = tuple(inputs)\n if extra_inputs is None:\n extra_inputs = tuple()\n return sliced_fun(self._opt_fun[\"f_constraint\"], self._num_slices)(inputs, extra_inputs)\n\n def optimize(self, inputs, extra_inputs=None, subsample_grouped_inputs=None):\n\n inputs = tuple(inputs)\n if extra_inputs is None:\n extra_inputs = tuple()\n\n if self._subsample_factor < 1:\n if subsample_grouped_inputs is None:\n subsample_grouped_inputs = [inputs]\n subsample_inputs = tuple()\n for inputs_grouped in subsample_grouped_inputs:\n n_samples = len(inputs_grouped[0])\n inds = np.random.choice(\n n_samples, int(n_samples * self._subsample_factor), replace=False)\n subsample_inputs += tuple([x[inds] for x in inputs_grouped])\n else:\n subsample_inputs = inputs\n\n logger.log(\"computing loss before\")\n loss_before = sliced_fun(self._opt_fun[\"f_loss\"], self._num_slices)(\n inputs, extra_inputs)\n logger.log(\"performing update\")\n logger.log(\"computing descent direction\")\n\n flat_g = sliced_fun(self._opt_fun[\"f_grad\"], self._num_slices)(\n inputs, extra_inputs)\n\n Hx = self._hvp_approach.build_eval(subsample_inputs + extra_inputs)\n\n descent_direction = krylov.cg(Hx, flat_g, cg_iters=self._cg_iters)\n\n initial_step_size = np.sqrt(\n 2.0 * self._max_constraint_val *\n (1. / (descent_direction.dot(Hx(descent_direction)) + 1e-8))\n )\n if np.isnan(initial_step_size):\n initial_step_size = 1.\n flat_descent_step = initial_step_size * descent_direction\n\n logger.log(\"descent direction computed\")\n\n prev_param = np.copy(self._target.get_param_values(trainable=True))\n n_iter = 0\n for n_iter, ratio in enumerate(self._backtrack_ratio ** np.arange(self._max_backtracks)):\n cur_step = ratio * flat_descent_step\n cur_param = prev_param - cur_step\n self._target.set_param_values(cur_param, trainable=True)\n loss, constraint_val = sliced_fun(\n self._opt_fun[\"f_loss_constraint\"], self._num_slices)(inputs, extra_inputs)\n if loss < loss_before and constraint_val <= self._max_constraint_val:\n break\n if (np.isnan(loss) or np.isnan(constraint_val) or loss >= loss_before or constraint_val >=\n self._max_constraint_val) and not self._accept_violation:\n logger.log(\"Line search condition violated. Rejecting the step!\")\n if np.isnan(loss):\n logger.log(\"Violated because loss is NaN\")\n if np.isnan(constraint_val):\n logger.log(\"Violated because constraint %s is NaN\" %\n self._constraint_name)\n if loss >= loss_before:\n logger.log(\"Violated because loss not improving\")\n if constraint_val >= self._max_constraint_val:\n logger.log(\n \"Violated because constraint %s is violated\" % self._constraint_name)\n self._target.set_param_values(prev_param, trainable=True)\n logger.log(\"backtrack iters: %d\" % n_iter)\n logger.log(\"computing loss after\")\n logger.log(\"optimization finished\")\n" ]
[ [ "numpy.arange", "numpy.reshape", "numpy.linalg.norm", "numpy.isnan" ] ]
cjm-sfw/multi-parsing
[ "439e8624c0183fdb7d70973fa91911b8f2087834" ]
[ "mmdet/datasets/densepose.py" ]
[ "import numpy as np\nfrom pycocotools.coco import COCO\n\nfrom .custom import CustomDataset\nfrom .registry import DATASETS\n\n\[email protected]_module\nclass DensePose(CustomDataset):\n\n CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',\n 'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',\n 'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',\n 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',\n 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',\n 'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',\n 'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',\n 'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',\n 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',\n 'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',\n 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',\n 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',\n 'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush')\n \n PARSING_CLASSES = ('Torso', 'Right Hand', 'Left Hand', 'Left Foot', 'Right Foot', \n 'Upper Leg Right', 'Upper Leg Left', 'Lower Leg Right', 'Lower Leg Left', \n 'Upper Arm Left', 'Upper Arm Right', 'Lower Arm Left', 'Lower Arm Right', 'Head')\n\n def load_annotations(self, ann_file):\n self.coco = COCO(ann_file)\n self.cat_ids = self.coco.getCatIds('person')\n self.cat2label = {\n cat_id: i + 1\n for i, cat_id in enumerate(self.cat_ids)\n }\n self.img_ids = self.coco.getImgIds(catIds=self.cat_ids)\n \n img_infos = []\n \n for i in self.img_ids:\n info = self.coco.loadImgs([i])[0]\n info['filename'] = info['file_name'].replace('COCO_train2014_', '')\n img_infos.append(info)\n return img_infos\n\n def get_ann_info(self, idx):\n img_id = self.img_infos[idx]['id']\n ann_ids = self.coco.getAnnIds(imgIds=[img_id])\n ann_info = self.coco.loadAnns(ann_ids)\n return self._parse_ann_info(self.img_infos[idx], ann_info)\n\n def _filter_imgs(self, min_size=32):\n \"\"\"Filter images too small or without ground truths.\"\"\"\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.img_infos):\n if self.filter_empty_gt and self.img_ids[i] not in ids_with_ann:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds\n \n\n def _parse_ann_info(self, img_info, ann_info):\n \"\"\"Parse bbox and mask annotation.\n\n Args:\n ann_info (list[dict]): Annotation info of an image.\n with_mask (bool): Whether to parse mask annotations.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore,\n labels, masks, seg_map. \"masks\" are raw annotations and not\n decoded into binary masks.\n \"\"\"\n \n gt_bboxes = []\n gt_labels = []\n gt_bboxes_ignore = []\n gt_masks_ann = []\n gt_parsing = []\n\n for i, ann in enumerate(ann_info):\n if ann.get('ignore', False):\n continue\n x1, y1, w, h = ann['bbox']\n if ann['area'] <= 0 or w < 1 or h < 1:\n continue\n bbox = [x1, y1, x1 + w - 1, y1 + h - 1]\n if ann.get('iscrowd', False):\n gt_bboxes_ignore.append(bbox)\n else:\n gt_bboxes.append(bbox)\n gt_labels.append(self.cat2label[ann['category_id']])\n gt_masks_ann.append(ann['segmentation'])\n if ann.get('dp_masks'):\n gt_parsing.append(ann['dp_masks'])\n else:\n gt_parsing.append([])\n\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n seg_map = img_info['filename'].replace('jpg', 'png')\n \n ann = dict(\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks=gt_masks_ann,\n parsing = gt_parsing,\n seg_map=seg_map)\n\n return ann\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
gh-determined-ai/determined
[ "9a1ab33a3a356b69681b3351629fef4ab98ddb56" ]
[ "model_hub/examples/huggingface/text-classification/glue_trial.py" ]
[ "\"\"\"\nThis example is largely based on the GLUE text-classification example in the huggingface\ntransformers library. The license for the transformer's library is reproduced below.\n\n==================================================================================================\n\nCopyright 2020 The HuggingFace Team. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport functools\nimport logging\nfrom typing import Dict, Union\n\nimport attrdict\nimport datasets\nimport numpy as np\nimport transformers\n\nimport determined.pytorch as det_torch\nimport model_hub.huggingface as hf\nimport model_hub.utils as utils\n\ntask_to_keys = {\n \"cola\": (\"sentence\", None),\n \"mnli\": (\"premise\", \"hypothesis\"),\n \"mrpc\": (\"sentence1\", \"sentence2\"),\n \"qnli\": (\"question\", \"sentence\"),\n \"qqp\": (\"question1\", \"question2\"),\n \"rte\": (\"sentence1\", \"sentence2\"),\n \"sst2\": (\"sentence\", None),\n \"stsb\": (\"sentence1\", \"sentence2\"),\n \"wnli\": (\"sentence1\", \"sentence2\"),\n}\n\n\nclass GLUETrial(hf.BaseTransformerTrial):\n def __init__(self, context: det_torch.PyTorchTrialContext) -> None:\n self.logger = logging.getLogger(__name__)\n self.hparams = attrdict.AttrDict(context.get_hparams())\n self.data_config = attrdict.AttrDict(context.get_data_config())\n self.context = context\n\n # Load dataset and get metadata.\n # This needs to be done before we initialize the HF config, tokenizer, and model\n # because we need to know num_labels before doing so.\n\n # For CSV/JSON files, this example will use as labels the column called `label` and as pair\n # of sentences the sentences in columns called `sentence1` and `sentence2` if such column\n # exists or the first two columns not named label if at least two columns are provided.\n #\n # If the CSVs/JSONs contain only one non-label column, the example will do single sentence\n # classification on this single column.\n\n # See more about loading any type of standard or custom dataset at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n self.raw_datasets = hf.default_load_dataset(self.data_config)\n\n if self.hparams.finetuning_task is not None:\n is_regression = self.hparams.finetuning_task == \"stsb\"\n if not is_regression:\n label_list = self.raw_datasets[\"train\"].features[\"label\"].names\n num_labels = len(label_list)\n else:\n num_labels = 1\n else:\n # Trying to have good defaults here, don't hesitate to tweak to your needs.\n is_regression = self.raw_datasets[\"train\"].features[\"label\"].dtype in [\n \"float32\",\n \"float64\",\n ]\n if is_regression:\n num_labels = 1\n else:\n # A useful fast method is datasets.Dataset.unique from\n # https://huggingface.co/docs/datasets/package_reference/main_classes.html\n label_list = self.raw_datasets[\"train\"].unique(\"label\")\n label_list.sort() # Let's sort it for determinism\n num_labels = len(label_list)\n self.is_regression = is_regression\n self.hparams.num_labels = num_labels\n if not self.is_regression:\n self.label_list = label_list\n\n super(GLUETrial, self).__init__(context)\n self.logger.info(self.config)\n\n # We need to create the tokenized dataset after init because we need to model and\n # tokenizer to be available.\n self.tokenized_datasets = self.build_datasets()\n train_length = len(self.tokenized_datasets[\"train\"])\n self.logger.info(\"training records: {}\".format(train_length))\n if (\n \"records_per_epoch\" in self.exp_config\n and train_length != self.exp_config[\"records_per_epoch\"]\n ):\n self.logger.warning(\n \"number of train records {} does not match records_per_epoch of {}\".format(\n train_length, self.exp_config[\"records_per_epoch\"]\n )\n )\n\n # Create metric reducer\n metric = datasets.load_metric(\"glue\", self.hparams.finetuning_task)\n\n # You can define your custom compute_metrics function. It takes an `EvalPrediction` object\n # (a namedtuple with a predictions and label_ids field) and has to return a dictionary\n # mapping string to float.\n def compute_metrics(pred_labels) -> Dict:\n preds, labels = zip(*pred_labels)\n preds = utils.expand_like(preds)\n labels = utils.expand_like(labels)\n preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)\n if self.hparams.finetuning_task is not None:\n result = metric.compute(predictions=preds, references=labels)\n if len(result) > 1:\n result[\"combined_score\"] = np.mean(list(result.values())).item()\n return result\n elif is_regression:\n return {\"mse\": ((preds - labels) ** 2).mean().item()}\n else:\n return {\"accuracy\": (preds == labels).astype(np.float32).mean().item()}\n\n self.reducer = context.wrap_reducer(compute_metrics, for_training=False)\n\n def build_datasets(self) -> Union[datasets.Dataset, datasets.DatasetDict]:\n # Preprocessing the datasets\n if self.hparams.finetuning_task is not None:\n sentence1_key, sentence2_key = task_to_keys[self.hparams.finetuning_task]\n else:\n # We try to have some nice defaults but don't hesitate to tweak to your use case.\n non_label_column_names = [\n name for name in self.raw_datasets[\"train\"].column_names if name != \"label\"\n ]\n if \"sentence1\" in non_label_column_names and \"sentence2\" in non_label_column_names:\n sentence1_key, sentence2_key = \"sentence1\", \"sentence2\"\n else:\n if len(non_label_column_names) >= 2:\n sentence1_key, sentence2_key = non_label_column_names[:2]\n else:\n sentence1_key, sentence2_key = non_label_column_names[0], None\n\n # Padding strategy\n if self.data_config.pad_to_max_length:\n padding = \"max_length\"\n else:\n # We will pad later, dynamically at batch creation to the max_seq_length in each batch.\n padding = False\n\n # Some models have set the order of the labels to use, so let's make sure we do use it.\n label_to_id = None\n if (\n self.model.config.label2id\n != transformers.PretrainedConfig(num_labels=self.hparams.num_labels).label2id\n and self.hparams.finetuning_task is not None\n and not self.is_regression\n ):\n # Some have all caps in their config, some don't.\n label_name_to_id = {k.lower(): v for k, v in self.model.config.label2id.items()}\n if sorted(label_name_to_id.keys()) == sorted(self.label_list):\n label_to_id = {\n i: label_name_to_id[self.label_list[i]] for i in range(self.hparams.num_labels)\n }\n else:\n self.logger.warning(\n \"Your model seems to have been trained with labels, but they don't match the \"\n f\"dataset: model labels: {sorted(label_name_to_id.keys())}, \"\n f\"dataset labels: {sorted(self.label_list)}.\"\n \"\\nIgnoring the model labels as a result.\",\n )\n elif self.hparams.finetuning_task is None and not self.is_regression:\n label_to_id = {v: i for i, v in enumerate(self.label_list)}\n\n if self.data_config.max_seq_length > self.tokenizer.model_max_length:\n self.logger.warning(\n f\"The max_seq_length passed ({self.data_config.max_seq_length}) is larger than \"\n f\"the maximum length for the model ({self.tokenizer.model_max_length}). Using \"\n f\"max_seq_length={self.tokenizer.model_max_length}.\"\n )\n max_seq_length = min(self.data_config.max_seq_length, self.tokenizer.model_max_length)\n\n # We cannot use self.tokenizer as a non-local variable in the preprocess_function if we\n # want map to be able to cache the output of the tokenizer. Hence, the preprocess_function\n # takes a tokenizer explicitly as an input and we create a closure using functools.partial.\n def preprocess_function(tokenizer, padding, max_seq_length, examples):\n # Tokenize the texts\n args = (\n (examples[sentence1_key],)\n if sentence2_key is None\n else (examples[sentence1_key], examples[sentence2_key])\n )\n result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)\n\n # Map labels to IDs (not necessary for GLUE tasks)\n if label_to_id is not None and \"label\" in examples:\n result[\"label\"] = [label_to_id[label] for label in examples[\"label\"]]\n return result\n\n tokenized_datasets = self.raw_datasets.map(\n functools.partial(preprocess_function, self.tokenizer, padding, max_seq_length),\n batched=True,\n load_from_cache_file=not self.data_config.overwrite_cache,\n )\n for _, data in tokenized_datasets.items():\n hf.remove_unused_columns(self.model, data)\n\n # Data collator will default to DataCollatorWithPadding, so we change it if we already\n # did the padding.\n if self.data_config.pad_to_max_length:\n self.collator = transformers.default_data_collator\n elif self.hparams.use_apex_amp:\n collator = transformers.DataCollatorWithPadding(self.tokenizer, pad_to_multiple_of=8)\n self.collator = lambda x: collator(x).data\n else:\n self.collator = None\n return tokenized_datasets\n\n def build_training_data_loader(self) -> det_torch.DataLoader:\n return det_torch.DataLoader(\n self.tokenized_datasets[\"train\"],\n batch_size=self.context.get_per_slot_batch_size(),\n collate_fn=self.collator,\n )\n\n def build_validation_data_loader(self) -> det_torch.DataLoader:\n eval_dataset = self.tokenized_datasets[\n \"validation_matched\" if self.hparams.finetuning_task == \"mnli\" else \"validation\"\n ]\n return det_torch.DataLoader(\n eval_dataset,\n batch_size=self.context.get_per_slot_batch_size(),\n collate_fn=self.collator,\n )\n\n def evaluate_batch(self, batch: det_torch.TorchData, batch_idx: int) -> Dict:\n outputs = self.model(**batch)\n tmp_eval_loss, logits = outputs[:2]\n preds = logits.detach().cpu().numpy()\n out_label_ids = batch[\"labels\"].detach().cpu().numpy()\n self.reducer.update((preds, out_label_ids))\n # We will return just the metrics outputed by the reducer.\n return {}\n" ]
[ [ "numpy.argmax", "numpy.squeeze" ] ]
wuyifan2233/Tencent_WWF
[ "2b248a810295f95cb0483837cb8cb8797c144821" ]
[ "Final_data_building/video_based_split/split_vid_top14_p123.py" ]
[ "# -*- coding: UTF-8 -*-\nfrom math import e, inf\nimport os\nfrom numpy.lib.type_check import _imag_dispatcher\nimport pandas as pd\nimport shutil\nimport numpy as np\nimport cv2\nimport random\nfrom tqdm import tqdm\nimport pyfastcopy\nimport json,sklearn\nfrom sklearn.model_selection import train_test_split\n\n\n\ndef main():\n #test\n data_stat_path='D:\\WWF_Det\\WWF_Det\\Raw_data_stat/top14-p1-p2-p3-merged/top14-p123-combine.csv'\n critiria_path='D:/WWF_Det/WWF_Det/Raw_data_stat/top14-all/split_critiria.csv'\n save_base='D:/WWF_Det/WWF_Det/Final_data_stat/top14-p123/'\n if not os.path.exists(save_base): os.makedirs(save_base)\n save_path=save_base+'video_split.csv'\n df=pd.read_csv(data_stat_path)\n df['label']=None\n id_list=df.index.values.tolist()\n df_cri=pd.read_csv(critiria_path)\n cate_list=np.unique(df['cate'].values)\n np.random.seed(2021)\n \n for cate in cate_list:\n \n infra_index=df.loc[(df['cate'] == cate)&(df['modality'] == 'Infra')].sample(frac=1).index.values.tolist()\n rgb_index=df.loc[(df['cate'] == cate)&(df['modality'] == 'RGB')].sample(frac=1).index.values.tolist()\n infra_test_num=int(df_cri.loc[df_cri['cate']==cate]['infra_test'])\n rgb_test_num=int(df_cri.loc[df_cri['cate']==cate]['rgb_test'])\n infra_test_index=infra_index[:infra_test_num]\n rgb_test_index=rgb_index[:rgb_test_num]\n\n test_index_all=list(infra_test_index+rgb_test_index)\n \n train_index_all=[i for i in infra_index+rgb_index if i not in test_index_all]\n #print(len(test_index_all))\n for ID in test_index_all:\n \n df.loc[ID,'label']='test'\n ori_dir=df.loc[ID,'video_path']\n cate=df.loc[ID,'cate']\n target_base=os.path.join('D:/WWF_Det/WWF_Data/Final_Data/valset-vid-v1/',cate)\n target_dir=os.path.join(target_base,ori_dir.split('/')[-1])\n if not os.path.exists(target_base): os.makedirs(target_base)\n shutil.copyfile(ori_dir,target_dir)\n for ID in train_index_all:\n df.loc[ID,'label']='train'\n #print(df)\n \n #break\n # df.to_csv(save_path,index=False)\n\nif __name__ == \"__main__\":\n \n main()" ]
[ [ "pandas.read_csv", "numpy.random.seed", "numpy.unique" ] ]
trsvchn/captum
[ "38b57082d22854013c0a0b80a51c0b85269afdaf" ]
[ "tests/attr/neuron/test_neuron_deeplift.py" ]
[ "#!/usr/bin/env python3\n\nfrom __future__ import print_function\n\nfrom typing import Tuple, Union\n\nimport torch\nfrom captum._utils.typing import TensorOrTupleOfTensorsGeneric\nfrom captum.attr._core.neuron.neuron_deep_lift import NeuronDeepLift, NeuronDeepLiftShap\nfrom tests.attr.layer.test_layer_deeplift import (\n _create_inps_and_base_for_deeplift_neuron_layer_testing,\n _create_inps_and_base_for_deepliftshap_neuron_layer_testing,\n)\nfrom tests.helpers.basic import BaseTest, assertTensorAlmostEqual\nfrom tests.helpers.basic_models import (\n BasicModel_ConvNet,\n BasicModel_ConvNet_MaxPool3d,\n LinearMaxPoolLinearModel,\n ReLULinearModel,\n)\nfrom torch import Tensor\n\n\nclass Test(BaseTest):\n def test_relu_neuron_deeplift(self) -> None:\n model = ReLULinearModel(inplace=True)\n\n x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)\n x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)\n\n inputs = (x1, x2)\n\n neuron_dl = NeuronDeepLift(model, model.relu)\n attributions = neuron_dl.attribute(inputs, 0, attribute_to_neuron_input=False)\n assertTensorAlmostEqual(self, attributions[0], [[0.0, 0.0, 0.0]])\n assertTensorAlmostEqual(self, attributions[1], [[0.0, 0.0, 0.0]])\n\n def test_deeplift_compare_with_and_without_inplace(self) -> None:\n model1 = ReLULinearModel(inplace=True)\n model2 = ReLULinearModel()\n x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)\n x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)\n inputs = (x1, x2)\n neuron_dl1 = NeuronDeepLift(model1, model1.relu)\n attributions1 = neuron_dl1.attribute(inputs, 0, attribute_to_neuron_input=False)\n\n neuron_dl2 = NeuronDeepLift(model2, model2.relu)\n attributions2 = neuron_dl2.attribute(inputs, 0, attribute_to_neuron_input=False)\n\n assertTensorAlmostEqual(self, attributions1[0], attributions2[0])\n assertTensorAlmostEqual(self, attributions1[1], attributions2[1])\n\n def test_linear_neuron_deeplift(self) -> None:\n model = ReLULinearModel()\n inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()\n\n neuron_dl = NeuronDeepLift(model, model.l3)\n attributions = neuron_dl.attribute(\n inputs, 0, baselines, attribute_to_neuron_input=True\n )\n assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])\n assertTensorAlmostEqual(self, attributions[1], [[0.0, 0.0, 0.0]])\n\n attributions = neuron_dl.attribute(\n inputs, 0, baselines, attribute_to_neuron_input=False\n )\n self.assertTrue(neuron_dl.multiplies_by_inputs)\n assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])\n assertTensorAlmostEqual(self, attributions[1], [[6.0, 9.0, 0.0]])\n\n def test_linear_neuron_deeplift_wo_inp_marginal_effects(self) -> None:\n model = ReLULinearModel()\n inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()\n\n neuron_dl = NeuronDeepLift(model, model.l3, multiply_by_inputs=False)\n attributions = neuron_dl.attribute(\n inputs, 0, baselines, attribute_to_neuron_input=False\n )\n assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])\n assertTensorAlmostEqual(self, attributions[1], [[2.0, 3.0, 0.0]])\n\n def test_relu_deeplift_with_custom_attr_func(self) -> None:\n model = ReLULinearModel()\n inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()\n neuron_dl = NeuronDeepLift(model, model.l3)\n expected = ([0.0, 0.0, 0.0], [0.0, 0.0, 0.0])\n self._relu_custom_attr_func_assert(neuron_dl, inputs, baselines, expected)\n\n def test_relu_neuron_deeplift_shap(self) -> None:\n model = ReLULinearModel()\n (\n inputs,\n baselines,\n ) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()\n\n neuron_dl = NeuronDeepLiftShap(model, model.relu)\n\n attributions = neuron_dl.attribute(\n inputs, 0, baselines, attribute_to_neuron_input=False\n )\n assertTensorAlmostEqual(self, attributions[0], [[0.0, 0.0, 0.0]])\n assertTensorAlmostEqual(self, attributions[1], [[0.0, 0.0, 0.0]])\n\n def test_linear_neuron_deeplift_shap(self) -> None:\n model = ReLULinearModel()\n (\n inputs,\n baselines,\n ) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()\n\n neuron_dl = NeuronDeepLiftShap(model, model.l3)\n attributions = neuron_dl.attribute(\n inputs, 0, baselines, attribute_to_neuron_input=True\n )\n assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])\n assertTensorAlmostEqual(self, attributions[1], [[0.0, 0.0, 0.0]])\n\n attributions = neuron_dl.attribute(\n inputs, 0, baselines, attribute_to_neuron_input=False\n )\n\n self.assertTrue(neuron_dl.multiplies_by_inputs)\n assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])\n assertTensorAlmostEqual(self, attributions[1], [[6.0, 9.0, 0.0]])\n\n def test_linear_neuron_deeplift_shap_wo_inp_marginal_effects(self) -> None:\n model = ReLULinearModel()\n (\n inputs,\n baselines,\n ) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()\n\n neuron_dl = NeuronDeepLiftShap(model, model.l3, multiply_by_inputs=False)\n attributions = neuron_dl.attribute(\n inputs, 0, baselines, attribute_to_neuron_input=False\n )\n\n assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])\n assertTensorAlmostEqual(self, attributions[1], [[2.0, 3.0, 0.0]])\n\n attributions = neuron_dl.attribute(\n inputs, lambda x: x[:, 0], baselines, attribute_to_neuron_input=False\n )\n\n assertTensorAlmostEqual(self, attributions[0], [[-0.0, 0.0, -0.0]])\n assertTensorAlmostEqual(self, attributions[1], [[2.0, 3.0, 0.0]])\n\n def test_relu_deepliftshap_with_custom_attr_func(self) -> None:\n model = ReLULinearModel()\n (\n inputs,\n baselines,\n ) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()\n neuron_dl = NeuronDeepLiftShap(model, model.l3)\n expected = (torch.zeros(3, 3), torch.zeros(3, 3))\n self._relu_custom_attr_func_assert(neuron_dl, inputs, baselines, expected)\n\n def _relu_custom_attr_func_assert(\n self,\n attr_method: Union[NeuronDeepLift, NeuronDeepLiftShap],\n inputs: TensorOrTupleOfTensorsGeneric,\n baselines,\n expected,\n ) -> None:\n def custom_attr_func(\n multipliers: Tuple[Tensor, ...],\n inputs: Tuple[Tensor, ...],\n baselines: Union[None, Tuple[Union[Tensor, int, float], ...]] = None,\n ) -> Tuple[Tensor, ...]:\n return tuple(multiplier * 0.0 for multiplier in multipliers)\n\n attr = attr_method.attribute(\n inputs, 0, baselines, custom_attribution_func=custom_attr_func\n )\n assertTensorAlmostEqual(self, attr[0], expected[0], 0.0)\n assertTensorAlmostEqual(self, attr[1], expected[1], 0.0)\n\n def test_lin_maxpool_lin_classification(self) -> None:\n inputs = torch.ones(2, 4)\n baselines = torch.tensor([[1, 2, 3, 9], [4, 8, 6, 7]]).float()\n\n model = LinearMaxPoolLinearModel()\n ndl = NeuronDeepLift(model, model.pool1)\n attr = ndl.attribute(inputs, neuron_selector=(0), baselines=baselines)\n\n ndl2 = NeuronDeepLift(model, model.lin2)\n attr2 = ndl2.attribute(\n inputs,\n neuron_selector=(0),\n baselines=baselines,\n attribute_to_neuron_input=True,\n )\n assertTensorAlmostEqual(self, attr, attr2)\n\n def test_convnet_maxpool2d_classification(self) -> None:\n inputs = 100 * torch.randn(2, 1, 10, 10)\n model = BasicModel_ConvNet()\n\n ndl = NeuronDeepLift(model, model.pool1)\n attr = ndl.attribute(inputs, neuron_selector=(0, 0, 0))\n\n ndl2 = NeuronDeepLift(model, model.conv2)\n attr2 = ndl2.attribute(\n inputs, neuron_selector=(0, 0, 0), attribute_to_neuron_input=True\n )\n\n assertTensorAlmostEqual(self, attr.sum(), attr2.sum())\n\n def test_convnet_maxpool3d_classification(self) -> None:\n inputs = 100 * torch.randn(2, 1, 10, 10, 10)\n model = BasicModel_ConvNet_MaxPool3d()\n\n ndl = NeuronDeepLift(model, model.pool1)\n attr = ndl.attribute(inputs, neuron_selector=(0, 0, 0, 0))\n\n ndl2 = NeuronDeepLift(model, model.conv2)\n attr2 = ndl2.attribute(\n inputs, neuron_selector=(0, 0, 0, 0), attribute_to_neuron_input=True\n )\n\n assertTensorAlmostEqual(self, attr.sum(), attr2.sum())\n" ]
[ [ "torch.zeros", "torch.ones", "torch.tensor", "torch.randn" ] ]
minnieteng/smoke_project
[ "cc3c8f16f7759fe29e46d3cec32a3ed6ca86bd5f" ]
[ "smoke/noaa/plot_grid.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\n\n\ngrid2D = np.load(r\"C:\\temp\\10km_grids\\20180808-23.npy\")\n\nfig, ax = plt.subplots(figsize=(16.2, 16))\nim = ax.imshow(grid2D)\nax.set_xlabel(\"Cols\")\nax.set_ylabel(\"Rows\")\nplt.colorbar(im)\n\nplt.savefig('grid2D.png')\n" ]
[ [ "numpy.load", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig" ] ]
sherry-1001/dgl-ke
[ "2d2542a21f9725f764e9b927ed257c575f374f47" ]
[ "python/dglke/dataloader/sampler.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# sampler.py\n#\n# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport math\nimport numpy as np\nimport scipy as sp\nimport dgl.backend as F\nimport dgl\nimport os\nimport sys\nimport pickle\nimport time\n\nfrom dgl.base import NID, EID\n\ndef SoftRelationPartition(edges, n, threshold=0.05):\n \"\"\"This partitions a list of edges to n partitions according to their\n relation types. For any relation with number of edges larger than the\n threshold, its edges will be evenly distributed into all partitions.\n For any relation with number of edges smaller than the threshold, its\n edges will be put into one single partition.\n\n Algo:\n For r in relations:\n if r.size() > threadold\n Evenly divide edges of r into n parts and put into each relation.\n else\n Find partition with fewest edges, and put edges of r into \n this partition.\n\n Parameters\n ----------\n edges : (heads, rels, tails) triple\n Edge list to partition\n n : int\n Number of partitions\n threshold : float\n The threshold of whether a relation is LARGE or SMALL\n Default: 5%\n\n Returns\n -------\n List of np.array\n Edges of each partition\n List of np.array\n Edge types of each partition\n bool\n Whether there exists some relations belongs to multiple partitions\n \"\"\"\n heads, rels, tails = edges\n print('relation partition {} edges into {} parts'.format(len(heads), n))\n uniq, cnts = np.unique(rels, return_counts=True)\n idx = np.flip(np.argsort(cnts))\n cnts = cnts[idx]\n uniq = uniq[idx]\n assert cnts[0] > cnts[-1]\n edge_cnts = np.zeros(shape=(n,), dtype=np.int64)\n rel_cnts = np.zeros(shape=(n,), dtype=np.int64)\n rel_dict = {}\n rel_parts = []\n cross_rel_part = []\n for _ in range(n):\n rel_parts.append([])\n\n large_threshold = int(len(rels) * threshold)\n capacity_per_partition = int(len(rels) / n)\n # ensure any relation larger than the partition capacity will be split\n large_threshold = capacity_per_partition if capacity_per_partition < large_threshold \\\n else large_threshold\n num_cross_part = 0\n for i in range(len(cnts)):\n cnt = cnts[i]\n r = uniq[i]\n r_parts = []\n if cnt > large_threshold:\n avg_part_cnt = (cnt // n) + 1\n num_cross_part += 1\n for j in range(n):\n part_cnt = avg_part_cnt if cnt > avg_part_cnt else cnt\n r_parts.append([j, part_cnt])\n rel_parts[j].append(r)\n edge_cnts[j] += part_cnt\n rel_cnts[j] += 1\n cnt -= part_cnt\n cross_rel_part.append(r)\n else:\n idx = np.argmin(edge_cnts)\n r_parts.append([idx, cnt])\n rel_parts[idx].append(r)\n edge_cnts[idx] += cnt\n rel_cnts[idx] += 1\n rel_dict[r] = r_parts\n\n for i, edge_cnt in enumerate(edge_cnts):\n print('part {} has {} edges and {} relations'.format(i, edge_cnt, rel_cnts[i]))\n print('{}/{} duplicated relation across partitions'.format(num_cross_part, len(cnts)))\n\n parts = []\n for i in range(n):\n parts.append([])\n rel_parts[i] = np.array(rel_parts[i])\n\n for i, r in enumerate(rels):\n r_part = rel_dict[r][0]\n part_idx = r_part[0]\n cnt = r_part[1]\n parts[part_idx].append(i)\n cnt -= 1\n if cnt == 0:\n rel_dict[r].pop(0)\n else:\n rel_dict[r][0][1] = cnt\n\n for i, part in enumerate(parts):\n parts[i] = np.array(part, dtype=np.int64)\n shuffle_idx = np.concatenate(parts)\n heads[:] = heads[shuffle_idx]\n rels[:] = rels[shuffle_idx]\n tails[:] = tails[shuffle_idx]\n\n off = 0\n for i, part in enumerate(parts):\n parts[i] = np.arange(off, off + len(part))\n off += len(part)\n cross_rel_part = np.array(cross_rel_part)\n\n return parts, rel_parts, num_cross_part > 0, cross_rel_part\n\ndef BalancedRelationPartition(edges, n):\n \"\"\"This partitions a list of edges based on relations to make sure\n each partition has roughly the same number of edges and relations.\n Algo:\n For r in relations:\n Find partition with fewest edges\n if r.size() > num_of empty_slot\n put edges of r into this partition to fill the partition,\n find next partition with fewest edges to put r in.\n else\n put edges of r into this partition.\n\n Parameters\n ----------\n edges : (heads, rels, tails) triple\n Edge list to partition\n n : int\n number of partitions\n\n Returns\n -------\n List of np.array\n Edges of each partition\n List of np.array\n Edge types of each partition\n bool\n Whether there exists some relations belongs to multiple partitions\n \"\"\"\n heads, rels, tails = edges\n print('relation partition {} edges into {} parts'.format(len(heads), n))\n uniq, cnts = np.unique(rels, return_counts=True)\n idx = np.flip(np.argsort(cnts))\n cnts = cnts[idx]\n uniq = uniq[idx]\n assert cnts[0] > cnts[-1]\n edge_cnts = np.zeros(shape=(n,), dtype=np.int64)\n rel_cnts = np.zeros(shape=(n,), dtype=np.int64)\n rel_dict = {}\n rel_parts = []\n for _ in range(n):\n rel_parts.append([])\n\n max_edges = (len(rels) // n) + 1\n num_cross_part = 0\n for i in range(len(cnts)):\n cnt = cnts[i]\n r = uniq[i]\n r_parts = []\n\n while cnt > 0:\n idx = np.argmin(edge_cnts)\n if edge_cnts[idx] + cnt <= max_edges:\n r_parts.append([idx, cnt])\n rel_parts[idx].append(r)\n edge_cnts[idx] += cnt\n rel_cnts[idx] += 1\n cnt = 0\n else:\n cur_cnt = max_edges - edge_cnts[idx]\n r_parts.append([idx, cur_cnt])\n rel_parts[idx].append(r)\n edge_cnts[idx] += cur_cnt\n rel_cnts[idx] += 1\n num_cross_part += 1\n cnt -= cur_cnt\n rel_dict[r] = r_parts\n\n for i, edge_cnt in enumerate(edge_cnts):\n print('part {} has {} edges and {} relations'.format(i, edge_cnt, rel_cnts[i]))\n print('{}/{} duplicated relation across partitions'.format(num_cross_part, len(cnts)))\n\n parts = []\n for i in range(n):\n parts.append([])\n rel_parts[i] = np.array(rel_parts[i])\n\n for i, r in enumerate(rels):\n r_part = rel_dict[r][0]\n part_idx = r_part[0]\n cnt = r_part[1]\n parts[part_idx].append(i)\n cnt -= 1\n if cnt == 0:\n rel_dict[r].pop(0)\n else:\n rel_dict[r][0][1] = cnt\n\n for i, part in enumerate(parts):\n parts[i] = np.array(part, dtype=np.int64)\n shuffle_idx = np.concatenate(parts)\n heads[:] = heads[shuffle_idx]\n rels[:] = rels[shuffle_idx]\n tails[:] = tails[shuffle_idx]\n\n off = 0\n for i, part in enumerate(parts):\n parts[i] = np.arange(off, off + len(part))\n off += len(part)\n\n return parts, rel_parts, num_cross_part > 0\n\ndef RandomPartition(edges, n):\n \"\"\"This partitions a list of edges randomly across n partitions\n\n Parameters\n ----------\n edges : (heads, rels, tails) triple\n Edge list to partition\n n : int\n number of partitions\n\n Returns\n -------\n List of np.array\n Edges of each partition\n \"\"\"\n heads, rels, tails = edges\n print('random partition {} edges into {} parts'.format(len(heads), n))\n idx = np.random.permutation(len(heads))\n heads[:] = heads[idx]\n rels[:] = rels[idx]\n tails[:] = tails[idx]\n\n part_size = int(math.ceil(len(idx) / n))\n parts = []\n for i in range(n):\n start = part_size * i\n end = min(part_size * (i + 1), len(idx))\n parts.append(idx[start:end])\n print('part {} has {} edges'.format(i, len(parts[-1])))\n return parts\n\ndef ConstructGraph(edges, n_entities, args):\n \"\"\"Construct Graph for training\n\n Parameters\n ----------\n edges : (heads, rels, tails) triple\n Edge list\n n_entities : int\n number of entities\n args :\n Global configs.\n \"\"\"\n src, etype_id, dst = edges\n coo = sp.sparse.coo_matrix((np.ones(len(src)), (src, dst)), shape=[n_entities, n_entities])\n g = dgl.DGLGraph(coo, readonly=True, multigraph=True, sort_csr=True)\n g.edata['tid'] = F.tensor(etype_id, F.int64)\n return g\n\nclass TrainDataset(object):\n \"\"\"Dataset for training\n\n Parameters\n ----------\n dataset : KGDataset\n Original dataset.\n args :\n Global configs.\n ranks:\n Number of partitions.\n \"\"\"\n def __init__(self, dataset, args, ranks=64):\n triples = dataset.train\n num_train = len(triples[0])\n print('|Train|:', num_train)\n\n if ranks > 1 and args.rel_part:\n self.edge_parts, self.rel_parts, self.cross_part, self.cross_rels = \\\n SoftRelationPartition(triples, ranks)\n elif ranks > 1:\n self.edge_parts = RandomPartition(triples, ranks)\n self.cross_part = True\n else:\n self.edge_parts = [np.arange(num_train)]\n self.rel_parts = [np.arange(dataset.n_relations)]\n self.cross_part = False\n\n self.g = ConstructGraph(triples, dataset.n_entities, args)\n\n def create_sampler(self, batch_size, neg_sample_size=2, neg_chunk_size=None, mode='head', num_workers=32,\n shuffle=True, exclude_positive=False, rank=0):\n \"\"\"Create sampler for training\n\n Parameters\n ----------\n batch_size : int\n Batch size of each mini batch.\n neg_sample_size : int\n How many negative edges sampled for each node.\n neg_chunk_size : int\n How many edges in one chunk. We split one batch into chunks.\n mode : str\n Sampling mode.\n number_workers: int\n Number of workers used in parallel for this sampler\n shuffle : bool\n If True, shuffle the seed edges.\n If False, do not shuffle the seed edges.\n Default: False\n exclude_positive : bool\n If True, exlucde true positive edges in sampled negative edges\n If False, return all sampled negative edges even there are positive edges\n Default: False\n rank : int\n Which partition to sample.\n\n Returns\n -------\n dgl.contrib.sampling.EdgeSampler\n Edge sampler\n \"\"\"\n EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')\n assert batch_size % neg_sample_size == 0, 'batch_size should be divisible by B'\n return EdgeSampler(self.g,\n seed_edges=F.tensor(self.edge_parts[rank]),\n batch_size=batch_size,\n neg_sample_size=int(neg_sample_size/neg_chunk_size),\n chunk_size=neg_chunk_size,\n negative_mode=mode,\n num_workers=num_workers,\n shuffle=shuffle,\n exclude_positive=exclude_positive,\n return_false_neg=False)\n\n\nclass ChunkNegEdgeSubgraph(dgl.DGLGraph):\n \"\"\"Wrapper for negative graph\n\n Parameters\n ----------\n neg_g : DGLGraph\n Graph holding negative edges.\n num_chunks : int\n Number of chunks in sampled graph.\n chunk_size : int\n Info of chunk_size.\n neg_sample_size : int\n Info of neg_sample_size.\n neg_head : bool\n If True, negative_mode is 'head'\n If False, negative_mode is 'tail'\n \"\"\"\n def __init__(self, subg, num_chunks, chunk_size,\n neg_sample_size, neg_head):\n super(ChunkNegEdgeSubgraph, self).__init__(graph_data=subg.sgi.graph,\n readonly=True,\n parent=subg._parent)\n self.ndata[NID] = subg.sgi.induced_nodes.tousertensor()\n self.edata[EID] = subg.sgi.induced_edges.tousertensor()\n self.subg = subg\n self.num_chunks = num_chunks\n self.chunk_size = chunk_size\n self.neg_sample_size = neg_sample_size\n self.neg_head = neg_head\n\n @property\n def head_nid(self):\n return self.subg.head_nid\n\n @property\n def tail_nid(self):\n return self.subg.tail_nid\n\n\ndef create_neg_subgraph(pos_g, neg_g, chunk_size, neg_sample_size, is_chunked,\n neg_head, num_nodes):\n \"\"\"KG models need to know the number of chunks, the chunk size and negative sample size\n of a negative subgraph to perform the computation more efficiently.\n This function tries to infer all of these information of the negative subgraph\n and create a wrapper class that contains all of the information.\n\n Parameters\n ----------\n pos_g : DGLGraph\n Graph holding positive edges.\n neg_g : DGLGraph\n Graph holding negative edges.\n chunk_size : int\n Chunk size of negative subgrap.\n neg_sample_size : int\n Negative sample size of negative subgrap.\n is_chunked : bool\n If True, the sampled batch is chunked.\n neg_head : bool\n If True, negative_mode is 'head'\n If False, negative_mode is 'tail'\n num_nodes: int\n Total number of nodes in the whole graph.\n\n Returns\n -------\n ChunkNegEdgeSubgraph\n Negative graph wrapper\n \"\"\"\n assert neg_g.number_of_edges() % pos_g.number_of_edges() == 0\n # We use all nodes to create negative edges. Regardless of the sampling algorithm,\n # we can always view the subgraph with one chunk.\n if (neg_head and len(neg_g.head_nid) == num_nodes) \\\n or (not neg_head and len(neg_g.tail_nid) == num_nodes):\n num_chunks = 1\n chunk_size = pos_g.number_of_edges()\n elif is_chunked:\n # This is probably for evaluation.\n if pos_g.number_of_edges() < chunk_size \\\n and neg_g.number_of_edges() % neg_sample_size == 0:\n num_chunks = 1\n chunk_size = pos_g.number_of_edges()\n # This is probably the last batch in the training. Let's ignore it.\n elif pos_g.number_of_edges() % chunk_size > 0:\n return None\n else:\n num_chunks = int(pos_g.number_of_edges() / chunk_size)\n assert num_chunks * chunk_size == pos_g.number_of_edges()\n else:\n num_chunks = pos_g.number_of_edges()\n chunk_size = 1\n return ChunkNegEdgeSubgraph(neg_g, num_chunks, chunk_size,\n neg_sample_size, neg_head)\n\nclass EvalSampler(object):\n \"\"\"Sampler for validation and testing\n\n Parameters\n ----------\n g : DGLGraph\n Graph containing KG graph\n edges : tensor\n Seed edges\n batch_size : int\n Batch size of each mini batch.\n neg_sample_size : int\n How many negative edges sampled for each node.\n neg_chunk_size : int\n How many edges in one chunk. We split one batch into chunks.\n mode : str\n Sampling mode.\n number_workers: int\n Number of workers used in parallel for this sampler\n filter_false_neg : bool\n If True, exlucde true positive edges in sampled negative edges\n If False, return all sampled negative edges even there are positive edges\n Default: True\n \"\"\"\n def __init__(self, g, edges, batch_size, neg_sample_size, neg_chunk_size, mode, num_workers=32,\n filter_false_neg=True):\n EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')\n self.sampler = EdgeSampler(g,\n batch_size=batch_size,\n seed_edges=edges,\n neg_sample_size=neg_sample_size,\n chunk_size=neg_chunk_size,\n negative_mode=mode,\n num_workers=num_workers,\n shuffle=False,\n exclude_positive=False,\n relations=g.edata['tid'],\n return_false_neg=filter_false_neg)\n self.sampler_iter = iter(self.sampler)\n self.mode = mode\n self.neg_head = 'head' in mode\n self.g = g\n self.filter_false_neg = filter_false_neg\n self.neg_chunk_size = neg_chunk_size\n self.neg_sample_size = neg_sample_size\n\n def __iter__(self):\n return self\n\n def __next__(self):\n \"\"\"Get next batch\n\n Returns\n -------\n DGLGraph\n Sampled positive graph\n ChunkNegEdgeSubgraph\n Negative graph wrapper\n \"\"\"\n while True:\n pos_g, neg_g = next(self.sampler_iter)\n if self.filter_false_neg:\n neg_positive = neg_g.edata['false_neg']\n neg_g = create_neg_subgraph(pos_g, neg_g, \n self.neg_chunk_size, \n self.neg_sample_size, \n 'chunk' in self.mode, \n self.neg_head, \n self.g.number_of_nodes())\n if neg_g is not None:\n break\n\n pos_g.ndata['id'] = pos_g.parent_nid\n neg_g.ndata['id'] = neg_g.parent_nid\n pos_g.edata['id'] = pos_g._parent.edata['tid'][pos_g.parent_eid]\n if self.filter_false_neg:\n neg_g.edata['bias'] = F.astype(-neg_positive, F.float32)\n return pos_g, neg_g\n\n def reset(self):\n \"\"\"Reset the sampler\n \"\"\"\n self.sampler_iter = iter(self.sampler)\n return self\n\nclass EvalDataset(object):\n \"\"\"Dataset for validation or testing\n\n Parameters\n ----------\n dataset : KGDataset\n Original dataset.\n args :\n Global configs.\n \"\"\"\n def __init__(self, dataset, args):\n src = [dataset.train[0]]\n etype_id = [dataset.train[1]]\n dst = [dataset.train[2]]\n self.num_train = len(dataset.train[0])\n if dataset.valid is not None:\n src.append(dataset.valid[0])\n etype_id.append(dataset.valid[1])\n dst.append(dataset.valid[2])\n self.num_valid = len(dataset.valid[0])\n else:\n self.num_valid = 0\n if dataset.test is not None:\n src.append(dataset.test[0])\n etype_id.append(dataset.test[1])\n dst.append(dataset.test[2])\n self.num_test = len(dataset.test[0])\n else:\n self.num_test = 0\n assert len(src) > 1, \"we need to have at least validation set or test set.\"\n src = np.concatenate(src)\n etype_id = np.concatenate(etype_id)\n dst = np.concatenate(dst)\n\n coo = sp.sparse.coo_matrix((np.ones(len(src)), (src, dst)),\n shape=[dataset.n_entities, dataset.n_entities])\n g = dgl.DGLGraph(coo, readonly=True, multigraph=True, sort_csr=True)\n g.edata['tid'] = F.tensor(etype_id, F.int64)\n self.g = g\n\n if args.eval_percent < 1:\n self.valid = np.random.randint(0, self.num_valid,\n size=(int(self.num_valid * args.eval_percent),)) + self.num_train\n else:\n self.valid = np.arange(self.num_train, self.num_train + self.num_valid)\n print('|valid|:', len(self.valid))\n\n if args.eval_percent < 1:\n self.test = np.random.randint(0, self.num_test,\n size=(int(self.num_test * args.eval_percent,)))\n self.test += self.num_train + self.num_valid\n else:\n self.test = np.arange(self.num_train + self.num_valid, self.g.number_of_edges())\n print('|test|:', len(self.test))\n\n def get_edges(self, eval_type):\n \"\"\" Get all edges in this dataset\n\n Parameters\n ----------\n eval_type : str\n Sampling type, 'valid' for validation and 'test' for testing\n\n Returns\n -------\n np.array\n Edges\n \"\"\"\n if eval_type == 'valid':\n return self.valid\n elif eval_type == 'test':\n return self.test\n else:\n raise Exception('get invalid type: ' + eval_type)\n\n def create_sampler(self, eval_type, batch_size, neg_sample_size, neg_chunk_size,\n filter_false_neg, mode='head', num_workers=32, rank=0, ranks=1):\n \"\"\"Create sampler for validation or testing\n\n Parameters\n ----------\n eval_type : str\n Sampling type, 'valid' for validation and 'test' for testing\n batch_size : int\n Batch size of each mini batch.\n neg_sample_size : int\n How many negative edges sampled for each node.\n neg_chunk_size : int\n How many edges in one chunk. We split one batch into chunks.\n filter_false_neg : bool\n If True, exlucde true positive edges in sampled negative edges\n If False, return all sampled negative edges even there are positive edges\n mode : str\n Sampling mode.\n number_workers: int\n Number of workers used in parallel for this sampler\n rank : int\n Which partition to sample.\n ranks : int\n Total number of partitions.\n\n Returns\n -------\n dgl.contrib.sampling.EdgeSampler\n Edge sampler\n \"\"\"\n edges = self.get_edges(eval_type)\n beg = edges.shape[0] * rank // ranks\n end = min(edges.shape[0] * (rank + 1) // ranks, edges.shape[0])\n edges = edges[beg: end]\n return EvalSampler(self.g, edges, batch_size, neg_sample_size, neg_chunk_size,\n mode, num_workers, filter_false_neg)\n\nclass NewBidirectionalOneShotIterator:\n \"\"\"Grouped samper iterator\n\n Parameters\n ----------\n dataloader_head : dgl.contrib.sampling.EdgeSampler\n EdgeSampler in head mode\n dataloader_tail : dgl.contrib.sampling.EdgeSampler\n EdgeSampler in tail mode\n neg_chunk_size : int\n How many edges in one chunk. We split one batch into chunks.\n neg_sample_size : int\n How many negative edges sampled for each node.\n is_chunked : bool\n If True, the sampled batch is chunked.\n num_nodes : int\n Total number of nodes in the whole graph.\n \"\"\"\n def __init__(self, dataloader_head, dataloader_tail, neg_chunk_size, neg_sample_size,\n is_chunked, num_nodes):\n self.sampler_head = dataloader_head\n self.sampler_tail = dataloader_tail\n self.iterator_head = self.one_shot_iterator(dataloader_head, neg_chunk_size,\n neg_sample_size, is_chunked,\n True, num_nodes)\n self.iterator_tail = self.one_shot_iterator(dataloader_tail, neg_chunk_size,\n neg_sample_size, is_chunked,\n False, num_nodes)\n self.step = 0\n\n def __next__(self):\n self.step += 1\n if self.step % 2 == 0:\n pos_g, neg_g = next(self.iterator_head)\n else:\n pos_g, neg_g = next(self.iterator_tail)\n return pos_g, neg_g\n\n @staticmethod\n def one_shot_iterator(dataloader, neg_chunk_size, neg_sample_size, is_chunked,\n neg_head, num_nodes):\n while True:\n for pos_g, neg_g in dataloader:\n neg_g = create_neg_subgraph(pos_g, neg_g, neg_chunk_size, neg_sample_size,\n is_chunked, neg_head, num_nodes)\n if neg_g is None:\n continue\n\n pos_g.ndata['id'] = pos_g.parent_nid\n neg_g.ndata['id'] = neg_g.parent_nid\n pos_g.edata['id'] = pos_g._parent.edata['tid'][pos_g.parent_eid]\n yield pos_g, neg_g\n" ]
[ [ "numpy.zeros", "numpy.argmin", "numpy.argsort", "numpy.arange", "numpy.array", "numpy.concatenate", "numpy.unique" ] ]
hanqiu-hq/cvpods
[ "597fa669151fdad87c250fa118a9e3a555f4fb5e" ]
[ "cvpods/layers/tree_filter_v2.py" ]
[ "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# Copyright (C) 2019-2021 Megvii Inc. All rights reserved.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .tree_filter_core import MinimumSpanningTree, RandomSpanningTree, TreeFilter2D\n\n\nclass TreeFilterV2(nn.Module):\n def __init__(self, guide_channels, in_channels, embed_channels, num_groups=1, eps=1e-8):\n super(TreeFilterV2, self).__init__()\n ''' Hyper Parameters '''\n self.eps = eps\n self.guide_channels = guide_channels\n self.in_channels = in_channels\n self.embed_channels = embed_channels\n self.num_groups = num_groups\n\n ''' Embedding Layers '''\n self.embed_layer = nn.Conv2d(in_channels, embed_channels, kernel_size=1, bias=False)\n self.conf_layer = nn.Conv2d(in_channels, num_groups, kernel_size=1, bias=False)\n self.guide_layer = nn.Conv2d(guide_channels, self.embed_channels, kernel_size=1, bias=False)\n self.beta = nn.Parameter(torch.zeros(num_groups))\n self.gamma = nn.Parameter(torch.zeros(1))\n\n '''Core of Tree Filter'''\n self.rst_layer = RandomSpanningTree(TreeFilter2D.norm2_distance, torch.exp)\n self.mst_layer = MinimumSpanningTree(TreeFilter2D.norm2_distance, torch.exp)\n self.tree_filter_layer = TreeFilter2D(groups=num_groups)\n\n ''' Parameters init '''\n self.reset_parameter()\n\n def reset_parameter(self):\n nn.init.constant_(self.conf_layer.weight, 0)\n nn.init.normal_(self.embed_layer.weight, std=0.01)\n nn.init.normal_(self.guide_layer.weight, std=0.01)\n nn.init.constant_(self.gamma, 0)\n nn.init.constant_(self.beta, 0)\n\n def split_groups(self, x):\n x = x.reshape(x.shape[0] * self.num_groups, -1, *x.shape[2:])\n return x\n\n def expand_groups(self, x):\n target_dim = max(self.num_groups // x.shape[1], 1)\n x = x.unsqueeze(2)\n x = x.expand(*x.shape[:2], target_dim, *x.shape[3:])\n x = x.reshape(x.shape[0], -1, *x.shape[3:])\n return x\n\n def forward(self, feature, guide):\n latent = feature\n\n ''' Compute embedding features '''\n embed = self.embed_layer(feature)\n\n ''' Spanning tree process '''\n guide = F.adaptive_avg_pool2d(guide, feature.shape[-2:])\n guide_embed = self.guide_layer(guide)\n if self.training:\n tree = self.rst_layer(guide_embed)\n else:\n tree = self.mst_layer(guide_embed)\n\n ''' Reshape beta '''\n beta = self.beta.reshape(1, -1, 1, 1)\n beta = beta.expand(embed.shape[0], self.num_groups, *embed.shape[2:])\n\n ''' Compute confidence '''\n conf = self.conf_layer(feature).sigmoid()\n conf = self.expand_groups(conf)\n conf_norm = self.tree_filter_layer(conf, embed, tree, guide_embed, beta)\n\n ''' Feature transform '''\n feature = (self.split_groups(feature) * self.split_groups(conf)).reshape_as(feature)\n feature = self.tree_filter_layer(feature, embed, tree, guide_embed, beta)\n feature_size = feature.size()\n feature = self.split_groups(feature) / (self.eps + self.split_groups(conf_norm))\n feature = feature.reshape(feature_size)\n\n ''' Projection '''\n feature = self.gamma * feature\n feature = feature + latent\n\n return feature\n" ]
[ [ "torch.nn.init.constant_", "torch.nn.init.normal_", "torch.nn.functional.adaptive_avg_pool2d", "torch.nn.Conv2d", "torch.zeros" ] ]
live4dao/RLtrading
[ "c1655a7bfe1220c1d7ae5c0d46814bf3884e6cdb" ]
[ "__main__.py" ]
[ "import numpy as np\nimport pandas as pd\nimport trading_env\n\nfrom datetime import datetime\nst = datetime.now()\n## need to refactor the testcase\n\n# df = pd.read_csv('trading_env/test/data/SGXTWsample.csv', index_col=0, parse_dates=['datetime'])\ndf = pd.read_hdf('D:\\[AIA]\\TradingGym\\dataset\\SGXTWsample.h5', 'STW')\n\nenv = trading_env.make(env_id='training_v1', obs_data_len=256, step_len=128,\n df=df, fee=0.1, max_position=5, deal_col_name='Price', \n feature_names=['Price', 'Volume', \n 'Ask_price','Bid_price', \n 'Ask_deal_vol','Bid_deal_vol',\n 'Bid/Ask_deal', 'Updown'], \n fluc_div=100.0)\n\nenv.reset()\nenv.render()\nprint(env.df_sample['datetime'].iloc[0].date())\nfor i in range(500):\n # print(i)\n state, reward, done, info = env.step(np.random.randint(3))\n # print(state, reward)\n # env.render()\n if done:\n break\nprint(datetime.now() - st)" ]
[ [ "pandas.read_hdf", "numpy.random.randint" ] ]
dbis-uibk/NLP4MusA2020
[ "1d0ab42a7aea416110fbacd632e5bce359b863e8" ]
[ "src/nlp4musa2020/analytics.py" ]
[ "\"\"\"Module providing common functions used for analytics.\"\"\"\nimport os.path\n\nfrom dbispipeline.analytics import extract_gridsearch_parameters\nfrom dbispipeline.db import DB\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\ndef get_results(project_name, filter_git_dirty=True):\n \"\"\"Returns the results stored in the databes as a pandas dataframe.\n\n Args:\n project_name: The project name to fetch results.\n filter_git_dirty: defines if dirty commits are filterd.\n \"\"\"\n results = pd.read_sql_table(table_name='results', con=DB.engine)\n\n if filter_git_dirty:\n results = results[results['git_is_dirty'] == False] # noqa: E712\n\n return results[results['project_name'] == project_name]\n\n\ndef extract_best_result(result, score):\n \"\"\"Extracts the max value result for a given score column.\n\n Args:\n result: dataframe to extract results from.\n score: the column used to select the max value.\n \"\"\"\n result = result[result[score] >= result[score].max()]\n return result\n\n\ndef get_best_results(score_name, score_prefix='mean_test_', max_value=None):\n \"\"\"Returns the best results for a given score.\n\n Args:\n score_name: the name of the score that is prefixed with score_prefix.\n The result is the column used to extract the results.\n score_prefix: the prefix of the score name.\n max_value: If not None, include only results that have scores lower\n than this value.\n \"\"\"\n data = get_results(project_name='nlp4musa2020')\n\n score = score_prefix + score_name\n\n result = pd.DataFrame()\n for _, group in data.groupby(['sourcefile']):\n outcome = None\n try:\n outcome = extract_gridsearch_parameters(group, score_name=score)\n except KeyError:\n continue\n # FIXME: Remove after updating the dbispipeline\n outcome[score] = outcome['score']\n if '_neg_' in score:\n outcome[score] *= -1\n\n result = result.append(extract_best_result(outcome, score=score))\n\n if max_value is not None:\n result = result[result[score] < max_value]\n\n if len(result) < 1:\n raise Exception('No results found.')\n\n return result\n\n\ndef plot_best_results(score_name,\n score_prefix='mean_test_',\n max_value=None,\n result_path=None,\n file_ext='pdf'):\n \"\"\"Plots the to results for a given metric.\n\n Args:\n score_name: the name of the score that is prefixed with score_prefix.\n The result is the column used to extract the results.\n score_prefix: the prefix of the score name.\n max_value: If not None, include only results that have scores lower\n than this value.\n result_path: the path used to store result files.\n file_ext: the file extension used for the plots.\n \"\"\"\n result = get_best_results(score_name, score_prefix, max_value)\n\n score = score_prefix + score_name\n result[['sourcefile', score]].plot.bar(\n x='sourcefile',\n title=score_name,\n grid=True,\n figsize=(30, 10),\n )\n\n file_name = 'best_results_' + score_name + '.' + file_ext\n if result_path is not None:\n file_name = os.path.join(result_path, file_name)\n\n plt.savefig(file_name)\n" ]
[ [ "pandas.read_sql_table", "matplotlib.pyplot.savefig", "pandas.DataFrame" ] ]
afiolmahon/ducky25
[ "c740931bee73526e0edb22f3a1f9bf3d71287b1a" ]
[ "catkin_ws/src/lane_filter/src/lane_filter_node.py" ]
[ "#!/usr/bin/env python\nimport rospy\nimport numpy as np\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom sensor_msgs.msg import Image\nfrom std_msgs.msg import Float32\nfrom duckietown_msgs.msg import SegmentList, Segment, Pixel, LanePose, BoolStamped, Twist2DStamped\nfrom scipy.stats import multivariate_normal, entropy\nfrom scipy.ndimage.filters import gaussian_filter\nfrom math import floor, atan2, pi, cos, sin, sqrt\nimport time\n\n\nclass LaneFilterNode(object):\n \"\"\"\n \nLane Filter Node\n\nAuthor: Liam Paull\n\nInputs: SegmentList from line detector\n\nOutputs: LanePose - the d (lateral displacement) and phi (relative angle) \nof the car in the lane\n\nFor more info on algorithm and parameters please refer to the google doc:\n https://drive.google.com/open?id=0B49dGT7ubfmSX1k5ZVN1dEU4M2M\n\n \"\"\"\n def __init__(self):\n self.node_name = \"Lane Filter\"\n self.active = True\n self.updateParams(None)\n \n self.d,self.phi = np.mgrid[self.d_min:self.d_max:self.delta_d,self.phi_min:self.phi_max:self.delta_phi]\n self.beliefRV=np.empty(self.d.shape)\n self.initializeBelief()\n self.lanePose = LanePose()\n self.lanePose.d=self.mean_0[0]\n self.lanePose.phi=self.mean_0[1]\n\n self.dwa = -(self.zero_val*self.l_peak**2 + self.zero_val*self.l_max**2 - self.l_max**2*self.peak_val - 2*self.zero_val*self.l_peak*self.l_max + 2*self.l_peak*self.l_max*self.peak_val)/(self.l_peak**2*self.l_max*(self.l_peak - self.l_max)**2)\n self.dwb = (2*self.zero_val*self.l_peak**3 + self.zero_val*self.l_max**3 - self.l_max**3*self.peak_val - 3*self.zero_val*self.l_peak**2*self.l_max + 3*self.l_peak**2*self.l_max*self.peak_val)/(self.l_peak**2*self.l_max*(self.l_peak - self.l_max)**2)\n self.dwc = -(self.zero_val*self.l_peak**3 + 2*self.zero_val*self.l_max**3 - 2*self.l_max**3*self.peak_val - 3*self.zero_val*self.l_peak*self.l_max**2 + 3*self.l_peak*self.l_max**2*self.peak_val)/(self.l_peak*self.l_max*(self.l_peak - self.l_max)**2)\n\n\n self.t_last_update = rospy.get_time()\n self.v_current = 0\n self.w_current = 0\n self.v_last = 0\n self.w_last = 0\n self.v_avg = 0\n self.w_avg = 0\n\n # Subscribers\n if self.use_propagation:\n self.sub_velocity = rospy.Subscriber(\"~velocity\", Twist2DStamped, self.updateVelocity)\n self.sub = rospy.Subscriber(\"~segment_list\", SegmentList, self.processSegments, queue_size=1)\n\n # Publishers\n self.pub_lane_pose = rospy.Publisher(\"~lane_pose\", LanePose, queue_size=1)\n self.pub_belief_img = rospy.Publisher(\"~belief_img\", Image, queue_size=1)\n self.pub_entropy = rospy.Publisher(\"~entropy\",Float32, queue_size=1)\n \t#self.pub_prop_img = rospy.Publisher(\"~prop_img\", Image, queue_size=1)\n self.pub_in_lane = rospy.Publisher(\"~in_lane\",BoolStamped, queue_size=1)\n self.sub_switch = rospy.Subscriber(\"~switch\", BoolStamped, self.cbSwitch, queue_size=1)\n\n self.timer = rospy.Timer(rospy.Duration.from_sec(1.0), self.updateParams)\n\n\n def updateParams(self, event):\n self.mean_0 = [rospy.get_param(\"~mean_d_0\",0) , rospy.get_param(\"~mean_phi_0\",0)]\n self.cov_0 = [ [rospy.get_param(\"~sigma_d_0\",0.1) , 0] , [0, rospy.get_param(\"~sigma_phi_0\",0.01)] ]\n self.delta_d = rospy.get_param(\"~delta_d\",0.02) # in meters\n self.delta_phi = rospy.get_param(\"~delta_phi\",0.02) # in radians\n self.d_max = rospy.get_param(\"~d_max\",0.5)\n self.d_min = rospy.get_param(\"~d_min\",-0.7)\n self.phi_min = rospy.get_param(\"~phi_min\",-pi/2)\n self.phi_max = rospy.get_param(\"~phi_max\",pi/2)\n\n self.cov_v = rospy.get_param(\"~cov_v\",0.5) # linear velocity \"input\"\n self.cov_omega = rospy.get_param(\"~cov_omega\",0.01) # angular velocity \"input\"\n self.linewidth_white = rospy.get_param(\"~linewidth_white\",0.04)\n self.linewidth_yellow = rospy.get_param(\"~linewidth_yellow\",0.02)\n self.lanewidth = rospy.get_param(\"~lanewidth\",0.4)\n self.min_max = rospy.get_param(\"~min_max\", 0.3) # nats\n # For use of distance weighting (dw) function\n self.use_distance_weighting = rospy.get_param(\"~use_distance_weighting\",False)\n self.zero_val = rospy.get_param(\"~zero_val\",1)\n self.l_peak = rospy.get_param(\"~l_peak\",1)\n self.peak_val = rospy.get_param(\"~peak_val\",10)\n self.l_max = rospy.get_param(\"~l_max\",2)\n\n # For use of maximum segment distance\n self.use_max_segment_dist = rospy.get_param(\"~use_max_segment_dist\",False)\n self.max_segment_dist = rospy.get_param(\"~max_segment_dist\",1.0)\n\n # For use of minimum segment count\n self.use_min_segs = rospy.get_param(\"~use_min_segs\",False)\n self.min_segs = rospy.get_param(\"~min_segs\", 10)\n\n # For propagation\n self.use_propagation = rospy.get_param(\"~use_propagation\",False)\n self.cov_mask = [rospy.get_param(\"~sigma_d_mask\",0.05) , rospy.get_param(\"~sigma_phi_mask\",0.05)]\n\n def cbSwitch(self, switch_msg):\n self.active = switch_msg.data\n\n def processSegments(self,segment_list_msg):\n if not self.active:\n return\n t_start = rospy.get_time()\n\n if self.use_propagation:\n self.propagateBelief()\n self.t_last_update = rospy.get_time()\n\n # initialize measurement likelihood\n measurement_likelihood = np.zeros(self.d.shape)\n\n for segment in segment_list_msg.segments:\n if segment.color != segment.WHITE and segment.color != segment.YELLOW:\n continue\n if segment.points[0].x < 0 or segment.points[1].x < 0:\n continue\n\n d_i,phi_i,l_i = self.generateVote(segment)\n if d_i > self.d_max or d_i < self.d_min or phi_i < self.phi_min or phi_i>self.phi_max:\n continue\n if self.use_max_segment_dist and (l_i > self.max_segment_dist):\n continue\n\n i = int(floor((d_i - self.d_min)/self.delta_d))\n j = int(floor((phi_i - self.phi_min)/self.delta_phi))\n\n if self.use_distance_weighting: \n dist_weight = self.dwa*l_i**3+self.dwb*l_i**2+self.dwc*l_i+self.zero_val\n if dist_weight < 0:\n continue\n measurement_likelihood[i,j] = measurement_likelihood[i,j] + dist_weight\n else:\n measurement_likelihood[i,j] = measurement_likelihood[i,j] + 1/(l_i)\n\n\n if np.linalg.norm(measurement_likelihood) == 0:\n return\n measurement_likelihood = measurement_likelihood/np.sum(measurement_likelihood)\n\n if self.use_propagation:\n self.updateBelief(measurement_likelihood)\n else:\n self.beliefRV = measurement_likelihood\n\n # TODO entropy test:\n #print self.beliefRV.argmax()\n\n maxids = np.unravel_index(self.beliefRV.argmax(),self.beliefRV.shape)\n # rospy.loginfo('maxids: %s' % maxids)\n self.lanePose.header.stamp = segment_list_msg.header.stamp\n self.lanePose.d = self.d_min + maxids[0]*self.delta_d\n self.lanePose.phi = self.phi_min + maxids[1]*self.delta_phi\n self.lanePose.status = self.lanePose.NORMAL\n\n # publish the belief image\n bridge = CvBridge()\n belief_img = bridge.cv2_to_imgmsg((255*self.beliefRV).astype('uint8'), \"mono8\")\n belief_img.header.stamp = segment_list_msg.header.stamp\n \n max_val = self.beliefRV.max()\n self.lanePose.in_lane = max_val > self.min_max and len(segment_list_msg.segments) > self.min_segs and np.linalg.norm(measurement_likelihood) != 0\n self.pub_lane_pose.publish(self.lanePose)\n self.pub_belief_img.publish(belief_img)\n\n # print \"time to process segments:\"\n # print rospy.get_time() - t_start\n\n # Publish in_lane according to the ent\n in_lane_msg = BoolStamped()\n in_lane_msg.header.stamp = segment_list_msg.header.stamp\n in_lane_msg.data = self.lanePose.in_lane\n # ent = entropy(self.beliefRV)\n # if (ent < self.max_entropy):\n # in_lane_msg.data = True\n # else:\n # in_lane_msg.data = False\n self.pub_in_lane.publish(in_lane_msg)\n\n def updateVelocity(self,twist_msg):\n self.v_current = twist_msg.v\n self.w_current = twist_msg.omega\n \n #self.v_avg = (self.v_current + self.v_last)/2.0\n #self.w_avg = (self.w_current + self.w_last)/2.0\n\n #self.v_last = v_current\n #self.w_last = w_current\n\n def initializeBelief(self):\n pos = np.empty(self.d.shape + (2,))\n pos[:,:,0]=self.d\n pos[:,:,1]=self.phi\n self.cov_0\n RV = multivariate_normal(self.mean_0,self.cov_0)\n self.beliefRV=RV.pdf(pos)\n\n def propagateBelief(self):\n delta_t = rospy.get_time() - self.t_last_update\n\n d_t = self.d + self.v_current*delta_t*np.sin(self.phi)\n phi_t = self.phi + self.w_current*delta_t\n\n p_beliefRV = np.zeros(self.beliefRV.shape)\n\n for i in range(self.beliefRV.shape[0]):\n for j in range(self.beliefRV.shape[1]):\n if self.beliefRV[i,j] > 0:\n if d_t[i,j] > self.d_max or d_t[i,j] < self.d_min or phi_t[i,j] < self.phi_min or phi_t[i,j] > self.phi_max:\n continue\n i_new = floor((d_t[i,j] - self.d_min)/self.delta_d)\n j_new = floor((phi_t[i,j] - self.phi_min)/self.delta_phi)\n p_beliefRV[i_new,j_new] += self.beliefRV[i,j]\n\n s_beliefRV = np.zeros(self.beliefRV.shape)\n gaussian_filter(100*p_beliefRV, self.cov_mask, output=s_beliefRV, mode='constant')\n\n if np.sum(s_beliefRV) == 0:\n return\n self.beliefRV = s_beliefRV/np.sum(s_beliefRV)\n\n \t#bridge = CvBridge()\n #prop_img = bridge.cv2_to_imgmsg((255*self.beliefRV).astype('uint8'), \"mono8\")\n #self.pub_prop_img.publish(prop_img)\n \n return\n\n def updateBelief(self,measurement_likelihood):\n self.beliefRV=np.multiply(self.beliefRV+1,measurement_likelihood+1)-1\n self.beliefRV=self.beliefRV/np.sum(self.beliefRV)#np.linalg.norm(self.beliefRV)\n\n def generateVote(self,segment):\n p1 = np.array([segment.points[0].x, segment.points[0].y])\n p2 = np.array([segment.points[1].x, segment.points[1].y])\n t_hat = (p2-p1)/np.linalg.norm(p2-p1)\n n_hat = np.array([-t_hat[1],t_hat[0]])\n d1 = np.inner(n_hat,p1)\n d2 = np.inner(n_hat,p2)\n l1 = np.inner(t_hat,p1)\n l2 = np.inner(t_hat,p2)\n if (l1 < 0):\n l1 = -l1;\n if (l2 < 0):\n l2 = -l2;\n l_i = (l1+l2)/2\n d_i = (d1+d2)/2\n phi_i = np.arcsin(t_hat[1])\n if segment.color == segment.WHITE: # right lane is white\n if(p1[0] > p2[0]): # right edge of white lane\n d_i = d_i - self.linewidth_white\n else: # left edge of white lane\n d_i = - d_i\n phi_i = -phi_i\n d_i = d_i - self.lanewidth/2\n\n elif segment.color == segment.YELLOW: # left lane is yellow\n if (p2[0] > p1[0]): # left edge of yellow lane\n d_i = d_i - self.linewidth_yellow\n phi_i = -phi_i\n else: # right edge of white lane\n d_i = -d_i\n d_i = self.lanewidth/2 - d_i\n\n return d_i, phi_i, l_i\n\n def getSegmentDistance(self, segment):\n x_c = (segment.points[0].x + segment.points[1].x)/2\n y_c = (segment.points[0].y + segment.points[1].y)/2\n\n return sqrt(x_c**2 + y_c**2)\n\n def onShutdown(self):\n rospy.loginfo(\"[LaneFilterNode] Shutdown.\")\n\n\nif __name__ == '__main__':\n rospy.init_node('lane_filter',anonymous=False)\n lane_filter_node = LaneFilterNode()\n rospy.on_shutdown(lane_filter_node.onShutdown)\n rospy.spin()\n" ]
[ [ "numpy.sum", "numpy.multiply", "numpy.arcsin", "numpy.empty", "numpy.zeros", "numpy.sin", "numpy.array", "scipy.ndimage.filters.gaussian_filter", "numpy.inner", "numpy.linalg.norm", "scipy.stats.multivariate_normal" ] ]
Lisennlp/distributed_train_pytorch
[ "da43ac6b5f4484b5f7bc92e3c778539b9017cb82" ]
[ "main.py" ]
[ "import argparse\nimport os\nimport random\n\nimport torch\nfrom torch import distributed as dist\nfrom torch.utils.data import DataLoader\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.utils.data.distributed import DistributedSampler\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n\nclass LinModel(nn.Module):\n\n def __init__(self, in_dim, out_dim):\n super(LinModel, self).__init__()\n self.linear = nn.Linear(in_dim, out_dim)\n\n def forward(self, x):\n out = self.linear(x)\n out = F.softmax(out, dim=-1)\n return out\n\n\ndef reduce_loss(tensor, rank, world_size):\n with torch.no_grad():\n dist.reduce(tensor, dst=0)\n if rank == 0:\n tensor /= world_size\n\n\ndef build_fake_data(size=1000):\n x1 = [(random.uniform(0, 0.5), 0) for i in range(size // 2)]\n x2 = [(random.uniform(0.5, 1), 1) for i in range(size // 2)]\n return x1 + x2\n\n\ndef evaluate(valid_loader):\n net.eval()\n with torch.no_grad():\n cnt = 0\n total = 0\n for inputs, labels in valid_loader:\n inputs, labels = inputs.unsqueeze(1).float().cuda(), labels.long().cuda()\n output = net(inputs)\n predict = torch.argmax(output, dim=1)\n cnt += (predict == labels).sum().item()\n total += len(labels)\n # print(f'right = {(predict == labels).sum()}')\n cnt = torch.Tensor([cnt]).to(inputs.device)\n total = torch.Tensor([total]).to(inputs.device)\n reduced_param = torch.cat((cnt.view(1), total.view(1)))\n cnt = reduced_param[0].item()\n total = reduced_param[1].item()\n return cnt, total\n\n\ndef set_random_seed(seed):\n \"\"\"Set random seed for reproducability.\"\"\"\n if seed is not None and seed > 0:\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--local_rank', type=int, default=-1, help=\"local gpu id\")\nparser.add_argument('--batch_size', type=int, default=128, help=\"batch size\")\nparser.add_argument('--lr', type=float, default=0.1, help=\"learn rate\")\nparser.add_argument('--epochs', type=int, default=5, help=\"train epoch\")\nparser.add_argument('--seed', type=int, default=40, help=\"train epoch\")\n\n\nargs = parser.parse_args()\nargs.world_size = int(os.getenv(\"WORLD_SIZE\", '1'))\n\nset_random_seed(args.seed)\ndist.init_process_group(backend='nccl', init_method='env://')\ntorch.cuda.set_device(args.local_rank)\nglobal_rank = dist.get_rank()\n\nprint(f'global_rank = {global_rank} local_rank = {args.local_rank} world_size = {args.world_size}')\n\nnet = LinModel(1, 2)\nnet.cuda()\nnet = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net)\nnet = DDP(net, device_ids=[args.local_rank], output_device=args.local_rank)\n\ntrainset = build_fake_data(size=10000)\nvalidset = build_fake_data(size=10000)\n\ntrain_sampler = DistributedSampler(trainset)\nvalid_sampler = DistributedSampler(validset)\n\ntrain_loader = DataLoader(trainset,\n batch_size=args.batch_size,\n shuffle=False,\n pin_memory=True,\n sampler=train_sampler)\n\nvalid_loader = DataLoader(validset,\n batch_size=args.batch_size,\n shuffle=False,\n pin_memory=True,\n sampler=valid_sampler)\n\ncriterion = torch.nn.CrossEntropyLoss()\nopt = torch.optim.Adam(net.parameters(), lr=args.lr)\n\nnet.train()\nfor e in range(int(args.epochs)):\n\n for idx, (inputs, labels) in enumerate(train_loader):\n inputs = inputs.unsqueeze(1).float().cuda()\n labels = labels.long().cuda()\n output = net(inputs)\n loss = criterion(output, labels)\n opt.zero_grad()\n loss.backward()\n opt.step()\n reduce_loss(loss, global_rank, args.world_size)\n # if idx % 10 == 0 and global_rank == 0:\n # print('Epoch: {} step: {} loss: {}'.format(e, idx, loss.item()))\n cnt, total = evaluate(valid_loader)\n if global_rank == 0:\n print(f'epoch {e} || eval accuracy: {cnt / total}')\n\n# if global_rank == 0:\n# print(f'net weight = {net.state_dict()}')\n" ]
[ [ "torch.utils.data.DataLoader", "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "torch.distributed.reduce", "torch.nn.Linear", "torch.distributed.get_rank", "torch.utils.data.distributed.DistributedSampler", "torch.argmax", "torch.cuda.manual_seed", "torch.nn.functional.softmax", "torch.distributed.init_process_group", "torch.no_grad", "numpy.random.seed", "torch.manual_seed", "torch.nn.CrossEntropyLoss", "torch.nn.parallel.DistributedDataParallel", "torch.Tensor", "torch.cuda.set_device" ] ]
dvd42/synbols
[ "3f12a4d9354c7e05313fe0028a108b29409b7171" ]
[ "synbols/visualization.py" ]
[ "from matplotlib import pyplot as plt\n\nfrom .utils import make_img_grid\n\n\ndef plot_dataset(x, y, h_axis=\"char\", v_axis=\"font\", n_row=20, n_col=40, hide_axis=False):\n img_grid, h_values, v_values = make_img_grid(x, y, h_axis, v_axis, n_row, n_col)\n\n plt.tight_layout()\n\n plt.imshow(img_grid)\n\n plt.xlabel(h_axis)\n plt.ylabel(v_axis)\n\n if hide_axis:\n ax = plt.gca()\n\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n plt.gcf().tight_layout()\n return h_values, v_values\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.gca", "matplotlib.pyplot.gcf", "matplotlib.pyplot.imshow", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel" ] ]
ashutom/tensorflow-upstream
[ "3457a2b122e50b4d44ceaaed5a663d635e5c22df" ]
[ "tensorflow/python/autograph/pyct/transpiler.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Generic source code transformation infrastructure.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport inspect\nimport threading\nimport types\n\nimport gast\n\nfrom tensorflow.python.autograph.pyct import cache\nfrom tensorflow.python.autograph.pyct import inspect_utils\nfrom tensorflow.python.autograph.pyct import loader\nfrom tensorflow.python.autograph.pyct import naming\nfrom tensorflow.python.autograph.pyct import origin_info\nfrom tensorflow.python.autograph.pyct import parser\nfrom tensorflow.python.autograph.pyct import templates\nfrom tensorflow.python.autograph.pyct import transformer\nfrom tensorflow.python.autograph.utils import ag_logging as logging\n\n\ndef _wrap_into_factory(nodes, entity_name, inner_factory_name,\n outer_factory_name, closure_vars, factory_args,\n future_features):\n \"\"\"Wraps an AST into the body of a factory with consistent lexical context.\n\n The AST is expected to define some symbol with a name given by `entity_name`.\n\n This mechanism ensures that the resulting transformed entity has lexical\n scoping identical to that of the source entity, while allowing extra\n parametrization.\n\n Two nested factories achieve the following:\n\n 1. The inner factory dynamically creates the entity represented by `nodes`.\n 2. The inner factory is parametrized by a custom set of arguments.\n 3. The inner factory has a closure identical to that of the transformed\n entity.\n 4. The inner factory has local variables named like `args`, which `nodes` may\n use as additional parameters.\n 5. The inner factory returns the variables given by `entity_name`.\n 6. The outer factory is niladic.\n 7. The outer factory has no closure.\n 8. The outer factory creates the necessary lexical scope for the inner\n factory, so that the loaded code has the given configuration for\n closure/globals.\n 9. The outer factory returns the inner factory.\n\n Roughly speaking, the following code is generated:\n\n from __future__ import future_feature_1\n from __future__ import future_feature_2\n ...\n\n def outer_factory():\n closure_var_1 = None\n closure_var_2 = None\n ...\n\n def inner_factory(arg_1, arg_2, ...):\n <<nodes>>\n return entity\n\n return inner_factory\n\n The lexical scoping is created using dummy symbol declarations which create\n local variables in the body of the outer factory, so that the Python parser\n correctly marks them as free non-global variables upon load (that is, it\n creates cell slots for each symbol. These symbols are initialized with None,\n but their values are not expected to be used; instead, the caller is expected\n to replace them with the cells of the source entity. For more details, see:\n https://docs.python.org/3/reference/executionmodel.html#binding-of-names\n\n Args:\n nodes: Tuple[ast.AST], the source code to wrap.\n entity_name: Union[Text, ast.AST], the name of the principal entity that\n `nodes` define.\n inner_factory_name: Text, the name of the inner factory.\n outer_factory_name: Text, the name of the outer factory.\n closure_vars: Iterable[Text], names of the closure variables for the inner\n factory.\n factory_args: Iterable[Text], names of additional arguments for the\n inner factory. Useful to configure variables that the converted code can\n use. Typically, these are modules.\n future_features: Iterable[Text], names of future statements to associate the\n code with.\n\n Returns:\n ast.AST\n \"\"\"\n dummy_closure_defs = []\n for var_name in closure_vars:\n template = \"\"\"\n var_name = None\n \"\"\"\n dummy_closure_defs.extend(templates.replace(template, var_name=var_name))\n\n if future_features:\n future_imports = gast.ImportFrom(\n module='__future__',\n names=[gast.alias(name=name, asname=None) for name in future_features],\n level=0)\n else:\n future_imports = []\n\n factory_args = [\n gast.Name(name, ctx=gast.Param(), annotation=None, type_comment=None)\n for name in factory_args\n ]\n\n template = \"\"\"\n future_imports\n def outer_factory_name():\n dummy_closure_defs\n def inner_factory_name(factory_args):\n entity_defs\n return entity_name\n return inner_factory_name\n \"\"\"\n return templates.replace(\n template,\n dummy_closure_defs=dummy_closure_defs,\n entity_defs=nodes,\n entity_name=entity_name,\n factory_args=factory_args,\n future_imports=future_imports,\n inner_factory_name=inner_factory_name,\n outer_factory_name=outer_factory_name)\n\n\nclass _PythonFnFactory(object):\n \"\"\"Helper object that wraps a Python function factory.\"\"\"\n\n def __init__(self, name, freevars, extra_locals):\n \"\"\"Creates a new factory for a Python function.\n\n Args:\n name: The function name.\n freevars: The list of non-global free variables for the function.\n extra_locals: Dict[Text, Any], names and values for custom variables that\n are accessible to the generated code as local variables.\n \"\"\"\n self._name = name\n self._freevars = freevars\n self._extra_locals = extra_locals\n\n self._unbound_factory = None\n self.module = None\n self.source_map = None\n\n def create(self,\n nodes,\n namer,\n inner_factory_name='inner_factory',\n outer_factory_name='outer_factory',\n future_features=()):\n \"\"\"Initializes a function.\"\"\"\n if self._unbound_factory is not None:\n raise ValueError('double initialization; create a new object instead')\n\n inner_factory_name = namer.new_symbol(inner_factory_name, ())\n outer_factory_name = namer.new_symbol(outer_factory_name, ())\n nodes = _wrap_into_factory(nodes, self._name, inner_factory_name,\n outer_factory_name, self._freevars,\n self._extra_locals.keys(), future_features)\n\n module, _, source_map = loader.load_ast(\n nodes, include_source_map=True)\n outer_factory = getattr(module, outer_factory_name)\n self._unbound_factory = outer_factory()\n self.module = module\n self.source_map = source_map\n\n def instantiate(self,\n globals_,\n closure,\n defaults=None,\n kwdefaults=None):\n \"\"\"Creates a new function instance.\"\"\"\n if self._unbound_factory is None:\n raise ValueError('call create first')\n\n factory_code = self._unbound_factory.__code__\n factory_freevars = factory_code.co_freevars\n closure_map = dict(zip(self._freevars, closure))\n factory_closure = tuple(\n closure_map[name] for name in factory_code.co_freevars)\n if len(factory_closure) != len(closure):\n raise ValueError(\n 'closure mismatch, requested {}, but source function had {}'.format(\n self._freevars, factory_freevars))\n\n bound_factory = types.FunctionType(\n code=factory_code,\n globals=globals_,\n name=self._name,\n argdefs=(),\n closure=factory_closure)\n\n # The lint override is a false positive.\n new_fn = bound_factory(**self._extra_locals) # pylint:disable=not-callable\n\n if defaults:\n new_fn.__defaults__ = defaults\n if kwdefaults:\n new_fn.__kwdefaults__ = kwdefaults\n\n return new_fn\n\n\nclass GenericTranspiler(object):\n \"\"\"A generic transpiler for Python functions.\n\n Its interface is the `transform` API, which can process Python function\n objects. Internally, it handles parsing.\n\n Users typically subclass this, customizing the `transform_ast` method. The\n output of transformed_ast is returned directly by `transform`. Existing\n methods like `transform_function` may also be overloaded.\n\n Example:\n\n class MyTransformer(GenericTranspiler):\n\n def transform_ast(self, node, ctx):\n result = <<transform node>>\n return result\n\n transformer = MyTransfomer()\n\n result = transformer.transform(f, ...)\n # result is the output\n \"\"\"\n\n def get_transformed_name(self, node):\n \"\"\"Returns a name for the output function. Subclasses may override this.\"\"\"\n if isinstance(node, gast.Lambda):\n return 'lam'\n elif isinstance(node, gast.FunctionDef):\n return node.name\n raise ValueError('Unknown node type {}'.format(node))\n\n def transform_ast(self, node, ctx):\n \"\"\"Performs an actual transformation of a function's AST.\n\n Subclasses must implement this method, and do not usually call it.\n\n Args:\n node: One or more ast.AST nodes representing the AST to be transformed.\n ctx: transformer.Context.\n \"\"\"\n raise NotImplementedError('subclasses must override this')\n\n def transform(self, obj, user_context):\n \"\"\"Transforms a Python object.\n\n Users typically call this method.\n\n Args:\n obj: A Python object, function, type, etc.\n user_context: An opaque object (may be None) that is forwarded to\n transform_ast, through the ctx.user_context argument.\n Returns:\n The result of calling transform_function.\n\n Raises:\n NotImplementedError: if the type of obj is not handled.\n \"\"\"\n if inspect.isfunction(obj) or inspect.ismethod(obj):\n return self.transform_function(obj, user_context)\n\n raise NotImplementedError('Non-function: {}'.format(type(obj)))\n\n def _erase_arg_defaults(self, node):\n \"\"\"Erase arg default expressions, which would otherwise be unbound.\"\"\"\n args = node.args\n for i in range(len(args.defaults)):\n args.defaults[i] = parser.parse_expression('None')\n for i, d in enumerate(args.kw_defaults):\n if d is not None:\n args.kw_defaults[i] = parser.parse_expression('None')\n return node\n\n def transform_module(self, mod, user_context):\n \"\"\"Transforms a module.\n\n Subclasses may override this method. The return value is opaque.\n\n The method receives the original AST. The result is passed as-is to the\n output of `transform`.\n\n Args:\n mod: A Python module.\n user_context: An opaque object (may be None) that is forwarded to\n transform_ast, through the ctx.user_context argument.\n Returns:\n List[Tuple[Any, Any]]. By default it returns the output of transform_ast,\n evaluated on each supported member, other than modules, together with a\n `transformer.Context` containing information about the transformation\n process.\n \"\"\"\n result = []\n for member in mod.__dict__.values():\n if inspect.ismodule(member):\n continue # Not transforming modules recursively.\n try:\n result.append(self.transform(member, user_context))\n except NotImplementedError:\n pass # Skip unsupported elements.\n return result\n\n def transform_function(self, fn, user_context):\n \"\"\"Transforms a function.\n\n Subclasses may override this method. The return value is opaque.\n\n The method receives the original AST. The result is passed as-is to the\n output of `transform`.\n\n Args:\n fn: A function or lambda.\n user_context: An opaque object (may be None) that is forwarded to\n transform_ast, through the ctx.user_context argument.\n Returns:\n Tuple[Any, Any]. By default it returns the output of transform_ast,\n together with a `transformer.Context` containing information about the\n transformation process.\n \"\"\"\n future_features = inspect_utils.getfutureimports(fn)\n node, source = parser.parse_entity(fn, future_features=future_features)\n logging.log(3, 'Source code of %s:\\n\\n%s\\n', fn, source)\n\n origin_info.resolve_entity(node, source, fn)\n\n namespace = inspect_utils.getnamespace(fn)\n namer = naming.Namer(namespace)\n new_name = namer.new_symbol(self.get_transformed_name(node), ())\n entity_info = transformer.EntityInfo(\n name=new_name,\n source_code=source,\n source_file='<fragment>',\n future_features=future_features,\n namespace=namespace)\n context = transformer.Context(entity_info, namer, user_context)\n\n node = self._erase_arg_defaults(node)\n result = self.transform_ast(node, context)\n\n return result, context\n\n\nclass PyToPy(GenericTranspiler):\n \"\"\"A generic Python-to-Python transpiler.\n\n Its `transform` method offers a function-in, function-out interface.\n Internally, it takes care of parsing, caching and loading of the translated\n code.\n\n Users typically subclass this, overriding `transform_ast`.\n\n Usually, instances of this class are singletons, since each instance manages\n its own cache. The caching can be controlled by overriding `get_caching_key`.\n\n Example:\n\n class MyTransformer(PyToPy):\n\n def transform_ast(self, node, ctx):\n node = <<transform node, usually using ast.NodeTransformer classes>>\n return node\n\n transformer = MyTransfomer()\n\n new_f, module, source_map = transformer.transform_function(f, ...)\n # new_f is a function with signature identical to f\n\n The transformed function has access to the same namespace as the original\n function. To allow access to internal APIs, users may inject additional\n symbols by overriding `get_extra_locals`.\n \"\"\"\n\n def __init__(self):\n self._cache_lock = threading.RLock()\n self._cache = cache.CodeObjectCache()\n\n def get_extra_locals(self):\n \"\"\"Returns extra static local variables to be made to transformed code.\n\n Subclasses must override this.\n\n Returns:\n extra_locals: A Dict[Text, Any] containing additional variables to make\n available to the transformed code.\n \"\"\"\n raise NotImplementedError('subclasses must override this')\n\n def get_caching_key(self, user_context):\n \"\"\"Returns a unique key to use for caching.\n\n Subclasses must override this.\n\n Calls made to `transform_function` with functions that have the same code\n object and caching key will return a cached instance on subsequent\n invocations.\n\n Args:\n user_context: The context object which was passed to `transform`.\n\n Returns:\n extra_locals: A hashable.\n \"\"\"\n raise NotImplementedError('subclasses must override this')\n\n def _cached_factory(self, fn, cache_subkey):\n cached_factory = self._cache[fn][cache_subkey]\n logging.log(3, 'Cache hit for %s subkey %s: %s', fn, cache_subkey,\n cached_factory)\n return cached_factory\n\n def transform_function(self, fn, user_context):\n \"\"\"Transforms a function. See GenericTranspiler.trasnform_function.\n\n This overload wraps the parent's `transform_function`, adding caching and\n facilities to instantiate the output as a Python object. It also\n adds facilities to make new symbols available to the generated Python code,\n visible as local variables - see `get_extra_locals`.\n\n Args:\n fn: A function or lambda.\n user_context: An opaque object (may be None) that is forwarded to\n transform_ast, through the ctx.user_context argument.\n Returns:\n A tuple:\n * A function or lambda with the same signature and closure as `fn`\n * The temporary module into which the transformed function was loaded\n * The source map as a\n Dict[origin_info.LineLocation, origin_info.OriginInfo]\n \"\"\"\n cache_subkey = self.get_caching_key(user_context)\n\n if self._cache.has(fn, cache_subkey):\n # Fast path: use a lock-free check.\n factory = self._cached_factory(fn, cache_subkey)\n\n else:\n with self._cache_lock:\n # Check again under lock.\n if self._cache.has(fn, cache_subkey):\n factory = self._cached_factory(fn, cache_subkey)\n\n else:\n logging.log(1, '%s is not cached for subkey %s', fn, cache_subkey)\n # TODO(mdan): Confusing overloading pattern. Fix.\n nodes, ctx = super(PyToPy, self).transform_function(fn, user_context)\n\n if isinstance(nodes, gast.Lambda):\n nodes = gast.Assign(\n targets=[\n gast.Name(\n ctx.info.name,\n ctx=gast.Store(),\n annotation=None,\n type_comment=None)\n ],\n value=nodes)\n else:\n nodes.name = ctx.info.name\n\n if logging.has_verbosity(2):\n logging.log(2, 'Transformed %s:\\n\\n%s\\n', fn, parser.unparse(nodes))\n\n factory = _PythonFnFactory(\n ctx.info.name, fn.__code__.co_freevars, self.get_extra_locals())\n factory.create(\n nodes, ctx.namer, future_features=ctx.info.future_features)\n self._cache[fn][cache_subkey] = factory\n\n transformed_fn = factory.instantiate(\n globals_=fn.__globals__,\n closure=fn.__closure__ or (),\n defaults=fn.__defaults__,\n kwdefaults=getattr(fn, '__kwdefaults__', None))\n return transformed_fn, factory.module, factory.source_map\n" ]
[ [ "tensorflow.python.autograph.pyct.parser.parse_entity", "tensorflow.python.autograph.pyct.transformer.EntityInfo", "tensorflow.python.autograph.pyct.origin_info.resolve_entity", "tensorflow.python.autograph.pyct.transformer.Context", "tensorflow.python.autograph.utils.ag_logging.log", "tensorflow.python.autograph.pyct.templates.replace", "tensorflow.python.autograph.pyct.inspect_utils.getnamespace", "tensorflow.python.autograph.pyct.naming.Namer", "tensorflow.python.autograph.pyct.inspect_utils.getfutureimports", "tensorflow.python.autograph.pyct.loader.load_ast", "tensorflow.python.autograph.pyct.cache.CodeObjectCache", "tensorflow.python.autograph.pyct.parser.unparse", "tensorflow.python.autograph.pyct.parser.parse_expression", "tensorflow.python.autograph.utils.ag_logging.has_verbosity" ] ]
datability-io/incubator-superset
[ "ebb799140a20964802c0b427d8687ee1b45679b3" ]
[ "superset/views/core.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=C,R,W\nfrom datetime import datetime, timedelta\nimport inspect\nimport logging\nimport os\nimport re\nimport time\nimport traceback\nfrom urllib import parse\n\nfrom flask import (\n abort, flash, g, Markup, redirect, render_template, request, Response, url_for,\n)\nfrom flask_appbuilder import expose, SimpleFormView\nfrom flask_appbuilder.actions import action\nfrom flask_appbuilder.models.sqla.interface import SQLAInterface\nfrom flask_appbuilder.security.decorators import has_access, has_access_api\nfrom flask_babel import gettext as __\nfrom flask_babel import lazy_gettext as _\nimport pandas as pd\nimport simplejson as json\nimport sqlalchemy as sqla\nfrom sqlalchemy import and_, create_engine, MetaData, or_, update\nfrom sqlalchemy.engine.url import make_url\nfrom sqlalchemy.exc import IntegrityError\nfrom werkzeug.routing import BaseConverter\nfrom werkzeug.utils import secure_filename\n\nfrom superset import (\n app, appbuilder, cache, db, results_backend,\n security_manager, sql_lab, viz)\nfrom superset.connectors.connector_registry import ConnectorRegistry\nfrom superset.connectors.sqla.models import AnnotationDatasource, SqlaTable\nfrom superset.exceptions import SupersetException\nfrom superset.forms import CsvToDatabaseForm\nfrom superset.jinja_context import get_template_processor\nfrom superset.legacy import cast_form_data, update_time_range\nimport superset.models.core as models\nfrom superset.models.sql_lab import Query\nfrom superset.models.user_attributes import UserAttribute\nfrom superset.sql_parse import ParsedQuery\nfrom superset.utils import core as utils\nfrom superset.utils import dashboard_import_export\nfrom superset.utils.dates import now_as_float\nfrom .base import (\n api, BaseSupersetView,\n check_ownership,\n CsvResponse, data_payload_response, DeleteMixin, generate_download_headers,\n get_error_msg, handle_api_exception, json_error_response, json_success,\n SupersetFilter, SupersetModelView, YamlExportMixin,\n)\nfrom .utils import bootstrap_user_data\n\nconfig = app.config\nstats_logger = config.get('STATS_LOGGER')\nlog_this = models.Log.log_this\nDAR = models.DatasourceAccessRequest\nQueryStatus = utils.QueryStatus\n\n\nALL_DATASOURCE_ACCESS_ERR = __(\n 'This endpoint requires the `all_datasource_access` permission')\nDATASOURCE_MISSING_ERR = __('The data source seems to have been deleted')\nACCESS_REQUEST_MISSING_ERR = __(\n 'The access requests seem to have been deleted')\nUSER_MISSING_ERR = __('The user seems to have been deleted')\n\nFORM_DATA_KEY_BLACKLIST = []\nif not config.get('ENABLE_JAVASCRIPT_CONTROLS'):\n FORM_DATA_KEY_BLACKLIST = [\n 'js_tooltip',\n 'js_onclick_href',\n 'js_data_mutator',\n ]\n\n\ndef get_database_access_error_msg(database_name):\n return __('This view requires the database %(name)s or '\n '`all_datasource_access` permission', name=database_name)\n\n\ndef is_owner(obj, user):\n \"\"\" Check if user is owner of the slice \"\"\"\n return obj and user in obj.owners\n\n\nclass SliceFilter(SupersetFilter):\n def apply(self, query, func): # noqa\n if security_manager.all_datasource_access():\n return query\n perms = self.get_view_menus('datasource_access')\n # TODO(bogdan): add `schema_access` support here\n return query.filter(self.model.perm.in_(perms))\n\n\nclass DashboardFilter(SupersetFilter):\n\n \"\"\"List dashboards for which users have access to at least one slice or are owners\"\"\"\n\n def apply(self, query, func): # noqa\n if security_manager.all_datasource_access():\n return query\n Slice = models.Slice # noqa\n Dash = models.Dashboard # noqa\n User = security_manager.user_model\n # TODO(bogdan): add `schema_access` support here\n datasource_perms = self.get_view_menus('datasource_access')\n slice_ids_qry = (\n db.session\n .query(Slice.id)\n .filter(Slice.perm.in_(datasource_perms))\n )\n owner_ids_qry = (\n db.session\n .query(Dash.id)\n .join(Dash.owners)\n .filter(User.id == User.get_user_id())\n )\n query = query.filter(\n or_(Dash.id.in_(\n db.session.query(Dash.id)\n .distinct()\n .join(Dash.slices)\n .filter(Slice.id.in_(slice_ids_qry)),\n ), Dash.id.in_(owner_ids_qry)),\n )\n return query\n\n\nclass DatabaseView(SupersetModelView, DeleteMixin, YamlExportMixin): # noqa\n datamodel = SQLAInterface(models.Database)\n\n list_title = _('List Databases')\n show_title = _('Show Database')\n add_title = _('Add Database')\n edit_title = _('Edit Database')\n\n list_columns = [\n 'database_name', 'backend', 'allow_run_async',\n 'allow_dml', 'allow_csv_upload', 'expose_in_sqllab', 'creator', 'modified']\n order_columns = [\n 'database_name', 'allow_run_async', 'allow_dml',\n 'modified', 'allow_csv_upload', 'expose_in_sqllab',\n ]\n add_columns = [\n 'database_name', 'sqlalchemy_uri', 'cache_timeout', 'expose_in_sqllab',\n 'allow_run_async', 'allow_csv_upload',\n 'allow_ctas', 'allow_dml', 'force_ctas_schema', 'impersonate_user',\n 'allow_multi_schema_metadata_fetch', 'extra',\n ]\n search_exclude_columns = (\n 'password', 'tables', 'created_by', 'changed_by', 'queries',\n 'saved_queries')\n edit_columns = add_columns\n show_columns = [\n 'tables',\n 'cache_timeout',\n 'extra',\n 'database_name',\n 'sqlalchemy_uri',\n 'perm',\n 'created_by',\n 'created_on',\n 'changed_by',\n 'changed_on',\n ]\n add_template = 'superset/models/database/add.html'\n edit_template = 'superset/models/database/edit.html'\n base_order = ('changed_on', 'desc')\n description_columns = {\n 'sqlalchemy_uri': utils.markdown(\n 'Refer to the '\n '[SqlAlchemy docs]'\n '(http://docs.sqlalchemy.org/en/rel_1_2/core/engines.html#'\n 'database-urls) '\n 'for more information on how to structure your URI.', True),\n 'expose_in_sqllab': _('Expose this DB in SQL Lab'),\n 'allow_run_async': _(\n 'Operate the database in asynchronous mode, meaning '\n 'that the queries are executed on remote workers as opposed '\n 'to on the web server itself. '\n 'This assumes that you have a Celery worker setup as well '\n 'as a results backend. Refer to the installation docs '\n 'for more information.'),\n 'allow_ctas': _('Allow CREATE TABLE AS option in SQL Lab'),\n 'allow_dml': _(\n 'Allow users to run non-SELECT statements '\n '(UPDATE, DELETE, CREATE, ...) '\n 'in SQL Lab'),\n 'force_ctas_schema': _(\n 'When allowing CREATE TABLE AS option in SQL Lab, '\n 'this option forces the table to be created in this schema'),\n 'extra': utils.markdown(\n 'JSON string containing extra configuration elements.<br/>'\n '1. The ``engine_params`` object gets unpacked into the '\n '[sqlalchemy.create_engine]'\n '(http://docs.sqlalchemy.org/en/latest/core/engines.html#'\n 'sqlalchemy.create_engine) call, while the ``metadata_params`` '\n 'gets unpacked into the [sqlalchemy.MetaData]'\n '(http://docs.sqlalchemy.org/en/rel_1_0/core/metadata.html'\n '#sqlalchemy.schema.MetaData) call.<br/>'\n '2. The ``metadata_cache_timeout`` is a cache timeout setting '\n 'in seconds for metadata fetch of this database. Specify it as '\n '**\"metadata_cache_timeout\": {\"schema_cache_timeout\": 600, '\n '\"table_cache_timeout\": 600}**. '\n 'If unset, cache will not be enabled for the functionality. '\n 'A timeout of 0 indicates that the cache never expires.<br/>'\n '3. The ``schemas_allowed_for_csv_upload`` is a comma separated list '\n 'of schemas that CSVs are allowed to upload to. '\n 'Specify it as **\"schemas_allowed_for_csv_upload\": '\n '[\"public\", \"csv_upload\"]**. '\n 'If database flavor does not support schema or any schema is allowed '\n 'to be accessed, just leave the list empty', True),\n 'impersonate_user': _(\n 'If Presto, all the queries in SQL Lab are going to be executed as the '\n 'currently logged on user who must have permission to run them.<br/>'\n 'If Hive and hive.server2.enable.doAs is enabled, will run the queries as '\n 'service account, but impersonate the currently logged on user '\n 'via hive.server2.proxy.user property.'),\n 'allow_multi_schema_metadata_fetch': _(\n 'Allow SQL Lab to fetch a list of all tables and all views across '\n 'all database schemas. For large data warehouse with thousands of '\n 'tables, this can be expensive and put strain on the system.'),\n 'cache_timeout': _(\n 'Duration (in seconds) of the caching timeout for charts of this database. '\n 'A timeout of 0 indicates that the cache never expires. '\n 'Note this defaults to the global timeout if undefined.'),\n 'allow_csv_upload': _(\n 'If selected, please set the schemas allowed for csv upload in Extra.'),\n }\n label_columns = {\n 'expose_in_sqllab': _('Expose in SQL Lab'),\n 'allow_ctas': _('Allow CREATE TABLE AS'),\n 'allow_dml': _('Allow DML'),\n 'force_ctas_schema': _('CTAS Schema'),\n 'database_name': _('Database'),\n 'creator': _('Creator'),\n 'changed_on_': _('Last Changed'),\n 'sqlalchemy_uri': _('SQLAlchemy URI'),\n 'cache_timeout': _('Chart Cache Timeout'),\n 'extra': _('Extra'),\n 'allow_run_async': _('Asynchronous Query Execution'),\n 'impersonate_user': _('Impersonate the logged on user'),\n 'allow_csv_upload': _('Allow Csv Upload'),\n 'modified': _('Modified'),\n 'allow_multi_schema_metadata_fetch': _('Allow Multi Schema Metadata Fetch'),\n 'backend': _('Backend'),\n }\n\n def pre_add(self, db):\n self.check_extra(db)\n db.set_sqlalchemy_uri(db.sqlalchemy_uri)\n security_manager.merge_perm('database_access', db.perm)\n # adding a new database we always want to force refresh schema list\n for schema in db.all_schema_names():\n security_manager.merge_perm(\n 'schema_access', security_manager.get_schema_perm(db, schema))\n\n def pre_update(self, db):\n self.pre_add(db)\n\n def pre_delete(self, obj):\n if obj.tables:\n raise SupersetException(Markup(\n 'Cannot delete a database that has tables attached. '\n \"Here's the list of associated tables: \" +\n ', '.join('{}'.format(o) for o in obj.tables)))\n\n def _delete(self, pk):\n DeleteMixin._delete(self, pk)\n\n def check_extra(self, db):\n # this will check whether json.loads(extra) can succeed\n try:\n extra = db.get_extra()\n except Exception as e:\n raise Exception('Extra field cannot be decoded by JSON. {}'.format(str(e)))\n\n # this will check whether 'metadata_params' is configured correctly\n metadata_signature = inspect.signature(MetaData)\n for key in extra.get('metadata_params', {}):\n if key not in metadata_signature.parameters:\n raise Exception('The metadata_params in Extra field '\n 'is not configured correctly. The key '\n '{} is invalid.'.format(key))\n\n\nappbuilder.add_link(\n 'Import Dashboards',\n label=__('Import Dashboards'),\n href='/superset/import_dashboards',\n icon='fa-cloud-upload',\n category='Manage',\n category_label=__('Manage'),\n category_icon='fa-wrench')\n\n\nappbuilder.add_view(\n DatabaseView,\n 'Databases',\n label=__('Databases'),\n icon='fa-database',\n category='Sources',\n category_label=__('Sources'),\n category_icon='fa-database')\n\n\nclass DatabaseAsync(DatabaseView):\n list_columns = [\n 'id', 'database_name',\n 'expose_in_sqllab', 'allow_ctas', 'force_ctas_schema',\n 'allow_run_async', 'allow_dml',\n 'allow_multi_schema_metadata_fetch', 'allow_csv_upload',\n 'allows_subquery', 'backend',\n ]\n\n\nappbuilder.add_view_no_menu(DatabaseAsync)\n\n\nclass CsvToDatabaseView(SimpleFormView):\n form = CsvToDatabaseForm\n form_template = 'superset/form_view/csv_to_database_view/edit.html'\n form_title = _('CSV to Database configuration')\n add_columns = ['database', 'schema', 'table_name']\n\n def form_get(self, form):\n form.sep.data = ','\n form.header.data = 0\n form.mangle_dupe_cols.data = True\n form.skipinitialspace.data = False\n form.skip_blank_lines.data = True\n form.infer_datetime_format.data = True\n form.decimal.data = '.'\n form.if_exists.data = 'fail'\n\n def form_post(self, form):\n database = form.con.data\n schema_name = form.schema.data or ''\n\n if not self.is_schema_allowed(database, schema_name):\n message = _('Database \"{0}\" Schema \"{1}\" is not allowed for csv uploads. '\n 'Please contact Superset Admin'.format(database.database_name,\n schema_name))\n flash(message, 'danger')\n return redirect('/csvtodatabaseview/form')\n\n csv_file = form.csv_file.data\n form.csv_file.data.filename = secure_filename(form.csv_file.data.filename)\n csv_filename = form.csv_file.data.filename\n path = os.path.join(config['UPLOAD_FOLDER'], csv_filename)\n try:\n utils.ensure_path_exists(config['UPLOAD_FOLDER'])\n csv_file.save(path)\n table = SqlaTable(table_name=form.name.data)\n table.database = form.data.get('con')\n table.database_id = table.database.id\n table.database.db_engine_spec.create_table_from_csv(form, table)\n except Exception as e:\n try:\n os.remove(path)\n except OSError:\n pass\n message = 'Table name {} already exists. Please pick another'.format(\n form.name.data) if isinstance(e, IntegrityError) else e\n flash(\n message,\n 'danger')\n stats_logger.incr('failed_csv_upload')\n return redirect('/csvtodatabaseview/form')\n\n os.remove(path)\n # Go back to welcome page / splash screen\n db_name = table.database.database_name\n message = _('CSV file \"{0}\" uploaded to table \"{1}\" in '\n 'database \"{2}\"'.format(csv_filename,\n form.name.data,\n db_name))\n flash(message, 'info')\n stats_logger.incr('successful_csv_upload')\n return redirect('/tablemodelview/list/')\n\n def is_schema_allowed(self, database, schema):\n if not database.allow_csv_upload:\n return False\n schemas = database.get_schema_access_for_csv_upload()\n if schemas:\n return schema in schemas\n return (security_manager.database_access(database) or\n security_manager.all_datasource_access())\n\n\nappbuilder.add_view_no_menu(CsvToDatabaseView)\n\n\nclass DatabaseTablesAsync(DatabaseView):\n list_columns = ['id', 'all_table_names_in_database', 'all_schema_names']\n\n\nappbuilder.add_view_no_menu(DatabaseTablesAsync)\n\n\nif config.get('ENABLE_ACCESS_REQUEST'):\n class AccessRequestsModelView(SupersetModelView, DeleteMixin):\n datamodel = SQLAInterface(DAR)\n list_columns = [\n 'username', 'user_roles', 'datasource_link',\n 'roles_with_datasource', 'created_on']\n order_columns = ['created_on']\n base_order = ('changed_on', 'desc')\n label_columns = {\n 'username': _('User'),\n 'user_roles': _('User Roles'),\n 'database': _('Database URL'),\n 'datasource_link': _('Datasource'),\n 'roles_with_datasource': _('Roles to grant'),\n 'created_on': _('Created On'),\n }\n\n appbuilder.add_view(\n AccessRequestsModelView,\n 'Access requests',\n label=__('Access requests'),\n category='Security',\n category_label=__('Security'),\n icon='fa-table')\n\n\nclass SliceModelView(SupersetModelView, DeleteMixin): # noqa\n route_base = '/chart'\n datamodel = SQLAInterface(models.Slice)\n\n list_title = _('List Charts')\n show_title = _('Show Chart')\n add_title = _('Add Chart')\n edit_title = _('Edit Chart')\n\n can_add = False\n label_columns = {\n 'datasource_link': _('Datasource'),\n }\n search_columns = (\n 'slice_name', 'description', 'viz_type', 'datasource_name', 'owners',\n )\n list_columns = [\n 'slice_link', 'viz_type', 'datasource_link', 'creator', 'modified']\n order_columns = ['viz_type', 'datasource_link', 'modified']\n edit_columns = [\n 'slice_name', 'description', 'viz_type', 'owners', 'dashboards',\n 'params', 'cache_timeout']\n base_order = ('changed_on', 'desc')\n description_columns = {\n 'description': Markup(\n 'The content here can be displayed as widget headers in the '\n 'dashboard view. Supports '\n '<a href=\"https://daringfireball.net/projects/markdown/\"\">'\n 'markdown</a>'),\n 'params': _(\n 'These parameters are generated dynamically when clicking '\n 'the save or overwrite button in the explore view. This JSON '\n 'object is exposed here for reference and for power users who may '\n 'want to alter specific parameters.',\n ),\n 'cache_timeout': _(\n 'Duration (in seconds) of the caching timeout for this chart. '\n 'Note this defaults to the datasource/table timeout if undefined.'),\n }\n base_filters = [['id', SliceFilter, lambda: []]]\n label_columns = {\n 'cache_timeout': _('Cache Timeout'),\n 'creator': _('Creator'),\n 'dashboards': _('Dashboards'),\n 'datasource_link': _('Datasource'),\n 'description': _('Description'),\n 'modified': _('Last Modified'),\n 'owners': _('Owners'),\n 'params': _('Parameters'),\n 'slice_link': _('Chart'),\n 'slice_name': _('Name'),\n 'table': _('Table'),\n 'viz_type': _('Visualization Type'),\n }\n\n def pre_add(self, obj):\n utils.validate_json(obj.params)\n\n def pre_update(self, obj):\n utils.validate_json(obj.params)\n check_ownership(obj)\n\n def pre_delete(self, obj):\n check_ownership(obj)\n\n @expose('/add', methods=['GET', 'POST'])\n @has_access\n def add(self):\n datasources = ConnectorRegistry.get_all_datasources(db.session)\n datasources = [\n {'value': str(d.id) + '__' + d.type, 'label': repr(d)}\n for d in datasources\n ]\n return self.render_template(\n 'superset/add_slice.html',\n bootstrap_data=json.dumps({\n 'datasources': sorted(datasources, key=lambda d: d['label']),\n }),\n )\n\n\nappbuilder.add_view(\n SliceModelView,\n 'Charts',\n label=__('Charts'),\n icon='fa-bar-chart',\n category='',\n category_icon='')\n\n\nclass SliceAsync(SliceModelView): # noqa\n route_base = '/sliceasync'\n list_columns = [\n 'id', 'slice_link', 'viz_type', 'slice_name',\n 'creator', 'modified', 'icons']\n label_columns = {\n 'icons': ' ',\n 'slice_link': _('Chart'),\n }\n\n\nappbuilder.add_view_no_menu(SliceAsync)\n\n\nclass SliceAddView(SliceModelView): # noqa\n route_base = '/sliceaddview'\n list_columns = [\n 'id', 'slice_name', 'slice_url', 'edit_url', 'viz_type', 'params',\n 'description', 'description_markeddown', 'datasource_id', 'datasource_type',\n 'datasource_name_text', 'datasource_link',\n 'owners', 'modified', 'changed_on']\n\n\nappbuilder.add_view_no_menu(SliceAddView)\n\n\nclass DashboardModelView(SupersetModelView, DeleteMixin): # noqa\n route_base = '/dashboard'\n datamodel = SQLAInterface(models.Dashboard)\n\n list_title = _('List Dashboards')\n show_title = _('Show Dashboard')\n add_title = _('Add Dashboard')\n edit_title = _('Edit Dashboard')\n\n list_columns = ['dashboard_link', 'creator', 'modified']\n order_columns = ['modified']\n edit_columns = [\n 'dashboard_title', 'slug', 'owners', 'position_json', 'css',\n 'json_metadata']\n show_columns = edit_columns + ['table_names', 'slices']\n search_columns = ('dashboard_title', 'slug', 'owners')\n add_columns = edit_columns\n base_order = ('changed_on', 'desc')\n description_columns = {\n 'position_json': _(\n 'This json object describes the positioning of the widgets in '\n 'the dashboard. It is dynamically generated when adjusting '\n 'the widgets size and positions by using drag & drop in '\n 'the dashboard view'),\n 'css': _(\n 'The CSS for individual dashboards can be altered here, or '\n 'in the dashboard view where changes are immediately '\n 'visible'),\n 'slug': _('To get a readable URL for your dashboard'),\n 'json_metadata': _(\n 'This JSON object is generated dynamically when clicking '\n 'the save or overwrite button in the dashboard view. It '\n 'is exposed here for reference and for power users who may '\n 'want to alter specific parameters.'),\n 'owners': _('Owners is a list of users who can alter the dashboard.'),\n }\n base_filters = [['slice', DashboardFilter, lambda: []]]\n label_columns = {\n 'dashboard_link': _('Dashboard'),\n 'dashboard_title': _('Title'),\n 'slug': _('Slug'),\n 'slices': _('Charts'),\n 'owners': _('Owners'),\n 'creator': _('Creator'),\n 'modified': _('Modified'),\n 'position_json': _('Position JSON'),\n 'css': _('CSS'),\n 'json_metadata': _('JSON Metadata'),\n 'table_names': _('Underlying Tables'),\n }\n\n def pre_add(self, obj):\n obj.slug = obj.slug.strip() or None\n if obj.slug:\n obj.slug = obj.slug.replace(' ', '-')\n obj.slug = re.sub(r'[^\\w\\-]+', '', obj.slug)\n if g.user not in obj.owners:\n obj.owners.append(g.user)\n utils.validate_json(obj.json_metadata)\n utils.validate_json(obj.position_json)\n owners = [o for o in obj.owners]\n for slc in obj.slices:\n slc.owners = list(set(owners) | set(slc.owners))\n\n def pre_update(self, obj):\n check_ownership(obj)\n self.pre_add(obj)\n\n def pre_delete(self, obj):\n check_ownership(obj)\n\n @action('mulexport', __('Export'), __('Export dashboards?'), 'fa-database')\n def mulexport(self, items):\n if not isinstance(items, list):\n items = [items]\n ids = ''.join('&id={}'.format(d.id) for d in items)\n return redirect(\n '/dashboard/export_dashboards_form?{}'.format(ids[1:]))\n\n @log_this\n @has_access\n @expose('/export_dashboards_form')\n def download_dashboards(self):\n if request.args.get('action') == 'go':\n ids = request.args.getlist('id')\n return Response(\n models.Dashboard.export_dashboards(ids),\n headers=generate_download_headers('json'),\n mimetype='application/text')\n return self.render_template(\n 'superset/export_dashboards.html',\n dashboards_url='/dashboard/list',\n )\n\n\nappbuilder.add_view(\n DashboardModelView,\n 'Dashboards',\n label=__('Dashboards'),\n icon='fa-dashboard',\n category='',\n category_icon='')\n\n\nclass DashboardModelViewAsync(DashboardModelView): # noqa\n route_base = '/dashboardasync'\n list_columns = [\n 'id', 'dashboard_link', 'creator', 'modified', 'dashboard_title',\n 'changed_on', 'url', 'changed_by_name',\n ]\n label_columns = {\n 'dashboard_link': _('Dashboard'),\n 'dashboard_title': _('Title'),\n 'creator': _('Creator'),\n 'modified': _('Modified'),\n }\n\n\nappbuilder.add_view_no_menu(DashboardModelViewAsync)\n\n\nclass DashboardAddView(DashboardModelView): # noqa\n route_base = '/dashboardaddview'\n list_columns = [\n 'id', 'dashboard_link', 'creator', 'modified', 'dashboard_title',\n 'changed_on', 'url', 'changed_by_name',\n ]\n show_columns = list(set(DashboardModelView.edit_columns + list_columns))\n\n\nappbuilder.add_view_no_menu(DashboardAddView)\n\n\nclass LogModelView(SupersetModelView):\n datamodel = SQLAInterface(models.Log)\n\n list_title = _('List Log')\n show_title = _('Show Log')\n add_title = _('Add Log')\n edit_title = _('Edit Log')\n\n list_columns = ('user', 'action', 'dttm')\n edit_columns = ('user', 'action', 'dttm', 'json')\n base_order = ('dttm', 'desc')\n label_columns = {\n 'user': _('User'),\n 'action': _('Action'),\n 'dttm': _('dttm'),\n 'json': _('JSON'),\n }\n\n\nappbuilder.add_view(\n LogModelView,\n 'Action Log',\n label=__('Action Log'),\n category='Security',\n category_label=__('Security'),\n icon='fa-list-ol')\n\n\[email protected]('/health')\ndef health():\n return 'OK'\n\n\[email protected]('/healthcheck')\ndef healthcheck():\n return 'OK'\n\n\[email protected]('/ping')\ndef ping():\n return 'OK'\n\n\nclass KV(BaseSupersetView):\n\n \"\"\"Used for storing and retrieving key value pairs\"\"\"\n\n @log_this\n @has_access_api\n @expose('/store/', methods=['POST'])\n def store(self):\n try:\n value = request.form.get('data')\n obj = models.KeyValue(value=value)\n db.session.add(obj)\n db.session.commit()\n except Exception as e:\n return json_error_response(e)\n return Response(\n json.dumps({'id': obj.id}),\n status=200)\n\n @log_this\n @has_access_api\n @expose('/<key_id>/', methods=['GET'])\n def get_value(self, key_id):\n kv = None\n try:\n kv = db.session.query(models.KeyValue).filter_by(id=key_id).one()\n except Exception as e:\n return json_error_response(e)\n return Response(kv.value, status=200)\n\n\nappbuilder.add_view_no_menu(KV)\n\n\nclass R(BaseSupersetView):\n\n \"\"\"used for short urls\"\"\"\n\n @log_this\n @expose('/<url_id>')\n def index(self, url_id):\n url = db.session.query(models.Url).filter_by(id=url_id).first()\n if url and url.url:\n explore_url = '//superset/explore/?'\n if url.url.startswith(explore_url):\n explore_url += f'r={url_id}'\n return redirect(explore_url[1:])\n else:\n return redirect(url.url[1:])\n else:\n flash('URL to nowhere...', 'danger')\n return redirect('/')\n\n @log_this\n @has_access_api\n @expose('/shortner/', methods=['POST'])\n def shortner(self):\n url = request.form.get('data')\n obj = models.Url(url=url)\n db.session.add(obj)\n db.session.commit()\n return Response(\n '{scheme}://{request.headers[Host]}/r/{obj.id}'.format(\n scheme=request.scheme, request=request, obj=obj),\n mimetype='text/plain')\n\n\nappbuilder.add_view_no_menu(R)\n\n\nclass Superset(BaseSupersetView):\n \"\"\"The base views for Superset!\"\"\"\n @has_access_api\n @expose('/datasources/')\n def datasources(self):\n datasources = ConnectorRegistry.get_all_datasources(db.session)\n datasources = [o.short_data for o in datasources]\n datasources = sorted(datasources, key=lambda o: o['name'])\n return self.json_response(datasources)\n\n @has_access_api\n @expose('/override_role_permissions/', methods=['POST'])\n def override_role_permissions(self):\n \"\"\"Updates the role with the give datasource permissions.\n\n Permissions not in the request will be revoked. This endpoint should\n be available to admins only. Expects JSON in the format:\n {\n 'role_name': '{role_name}',\n 'database': [{\n 'datasource_type': '{table|druid}',\n 'name': '{database_name}',\n 'schema': [{\n 'name': '{schema_name}',\n 'datasources': ['{datasource name}, {datasource name}']\n }]\n }]\n }\n \"\"\"\n data = request.get_json(force=True)\n role_name = data['role_name']\n databases = data['database']\n\n db_ds_names = set()\n for dbs in databases:\n for schema in dbs['schema']:\n for ds_name in schema['datasources']:\n fullname = utils.get_datasource_full_name(\n dbs['name'], ds_name, schema=schema['name'])\n db_ds_names.add(fullname)\n\n existing_datasources = ConnectorRegistry.get_all_datasources(db.session)\n datasources = [\n d for d in existing_datasources if d.full_name in db_ds_names]\n role = security_manager.find_role(role_name)\n # remove all permissions\n role.permissions = []\n # grant permissions to the list of datasources\n granted_perms = []\n for datasource in datasources:\n view_menu_perm = security_manager.find_permission_view_menu(\n view_menu_name=datasource.perm,\n permission_name='datasource_access')\n # prevent creating empty permissions\n if view_menu_perm and view_menu_perm.view_menu:\n role.permissions.append(view_menu_perm)\n granted_perms.append(view_menu_perm.view_menu.name)\n db.session.commit()\n return self.json_response({\n 'granted': granted_perms,\n 'requested': list(db_ds_names),\n }, status=201)\n\n @log_this\n @has_access\n @expose('/request_access/')\n def request_access(self):\n datasources = set()\n dashboard_id = request.args.get('dashboard_id')\n if dashboard_id:\n dash = (\n db.session.query(models.Dashboard)\n .filter_by(id=int(dashboard_id))\n .one()\n )\n datasources |= dash.datasources\n datasource_id = request.args.get('datasource_id')\n datasource_type = request.args.get('datasource_type')\n if datasource_id:\n ds_class = ConnectorRegistry.sources.get(datasource_type)\n datasource = (\n db.session.query(ds_class)\n .filter_by(id=int(datasource_id))\n .one()\n )\n datasources.add(datasource)\n\n has_access = all(\n (\n datasource and security_manager.datasource_access(datasource)\n for datasource in datasources\n ))\n if has_access:\n return redirect('/superset/dashboard/{}'.format(dashboard_id))\n\n if request.args.get('action') == 'go':\n for datasource in datasources:\n access_request = DAR(\n datasource_id=datasource.id,\n datasource_type=datasource.type)\n db.session.add(access_request)\n db.session.commit()\n flash(__('Access was requested'), 'info')\n return redirect('/')\n\n return self.render_template(\n 'superset/request_access.html',\n datasources=datasources,\n datasource_names=', '.join([o.name for o in datasources]),\n )\n\n @log_this\n @has_access\n @expose('/approve')\n def approve(self):\n def clean_fulfilled_requests(session):\n for r in session.query(DAR).all():\n datasource = ConnectorRegistry.get_datasource(\n r.datasource_type, r.datasource_id, session)\n if not datasource or \\\n security_manager.datasource_access(datasource):\n # datasource does not exist anymore\n session.delete(r)\n session.commit()\n datasource_type = request.args.get('datasource_type')\n datasource_id = request.args.get('datasource_id')\n created_by_username = request.args.get('created_by')\n role_to_grant = request.args.get('role_to_grant')\n role_to_extend = request.args.get('role_to_extend')\n\n session = db.session\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, session)\n\n if not datasource:\n flash(DATASOURCE_MISSING_ERR, 'alert')\n return json_error_response(DATASOURCE_MISSING_ERR)\n\n requested_by = security_manager.find_user(username=created_by_username)\n if not requested_by:\n flash(USER_MISSING_ERR, 'alert')\n return json_error_response(USER_MISSING_ERR)\n\n requests = (\n session.query(DAR)\n .filter(\n DAR.datasource_id == datasource_id,\n DAR.datasource_type == datasource_type,\n DAR.created_by_fk == requested_by.id)\n .all()\n )\n\n if not requests:\n flash(ACCESS_REQUEST_MISSING_ERR, 'alert')\n return json_error_response(ACCESS_REQUEST_MISSING_ERR)\n\n # check if you can approve\n if (\n security_manager.all_datasource_access() or\n check_ownership(datasource, raise_if_false=False)\n ):\n # can by done by admin only\n if role_to_grant:\n role = security_manager.find_role(role_to_grant)\n requested_by.roles.append(role)\n msg = __(\n '%(user)s was granted the role %(role)s that gives access '\n 'to the %(datasource)s',\n user=requested_by.username,\n role=role_to_grant,\n datasource=datasource.full_name)\n utils.notify_user_about_perm_udate(\n g.user, requested_by, role, datasource,\n 'email/role_granted.txt', app.config)\n flash(msg, 'info')\n\n if role_to_extend:\n perm_view = security_manager.find_permission_view_menu(\n 'email/datasource_access', datasource.perm)\n role = security_manager.find_role(role_to_extend)\n security_manager.add_permission_role(role, perm_view)\n msg = __('Role %(r)s was extended to provide the access to '\n 'the datasource %(ds)s', r=role_to_extend,\n ds=datasource.full_name)\n utils.notify_user_about_perm_udate(\n g.user, requested_by, role, datasource,\n 'email/role_extended.txt', app.config)\n flash(msg, 'info')\n clean_fulfilled_requests(session)\n else:\n flash(__('You have no permission to approve this request'),\n 'danger')\n return redirect('/accessrequestsmodelview/list/')\n for r in requests:\n session.delete(r)\n session.commit()\n return redirect('/accessrequestsmodelview/list/')\n\n def get_form_data(self, slice_id=None, use_slice_data=False):\n form_data = {}\n post_data = request.form.get('form_data')\n request_args_data = request.args.get('form_data')\n # Supporting POST\n if post_data:\n form_data.update(json.loads(post_data))\n # request params can overwrite post body\n if request_args_data:\n form_data.update(json.loads(request_args_data))\n\n url_id = request.args.get('r')\n if url_id:\n saved_url = db.session.query(models.Url).filter_by(id=url_id).first()\n if saved_url:\n url_str = parse.unquote_plus(\n saved_url.url.split('?')[1][10:], encoding='utf-8', errors=None)\n url_form_data = json.loads(url_str)\n # allow form_date in request override saved url\n url_form_data.update(form_data)\n form_data = url_form_data\n\n if request.args.get('viz_type'):\n # Converting old URLs\n form_data = cast_form_data(form_data)\n\n form_data = {\n k: v\n for k, v in form_data.items()\n if k not in FORM_DATA_KEY_BLACKLIST\n }\n\n # When a slice_id is present, load from DB and override\n # the form_data from the DB with the other form_data provided\n slice_id = form_data.get('slice_id') or slice_id\n slc = None\n\n # Check if form data only contains slice_id\n contains_only_slc_id = not any(key != 'slice_id' for key in form_data)\n\n # Include the slice_form_data if request from explore or slice calls\n # or if form_data only contains slice_id\n if slice_id and (use_slice_data or contains_only_slc_id):\n slc = db.session.query(models.Slice).filter_by(id=slice_id).one_or_none()\n if slc:\n slice_form_data = slc.form_data.copy()\n slice_form_data.update(form_data)\n form_data = slice_form_data\n\n update_time_range(form_data)\n\n return form_data, slc\n\n def get_viz(\n self,\n slice_id=None,\n form_data=None,\n datasource_type=None,\n datasource_id=None,\n force=False,\n ):\n if slice_id:\n slc = (\n db.session.query(models.Slice)\n .filter_by(id=slice_id)\n .one()\n )\n return slc.get_viz()\n else:\n viz_type = form_data.get('viz_type', 'table')\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, db.session)\n viz_obj = viz.viz_types[viz_type](\n datasource,\n form_data=form_data,\n force=force,\n )\n return viz_obj\n\n @has_access\n @expose('/slice/<slice_id>/')\n def slice(self, slice_id):\n form_data, slc = self.get_form_data(slice_id, use_slice_data=True)\n if not slc:\n abort(404)\n endpoint = '/superset/explore/?form_data={}'.format(\n parse.quote(json.dumps(form_data)),\n )\n if request.args.get('standalone') == 'true':\n endpoint += '&standalone=true'\n return redirect(endpoint)\n\n def get_query_string_response(self, viz_obj):\n query = None\n try:\n query_obj = viz_obj.query_obj()\n if query_obj:\n query = viz_obj.datasource.get_query_str(query_obj)\n except Exception as e:\n logging.exception(e)\n return json_error_response(e)\n\n if query_obj and query_obj['prequeries']:\n query_obj['prequeries'].append(query)\n query = ';\\n\\n'.join(query_obj['prequeries'])\n if query:\n query += ';'\n else:\n query = 'No query.'\n\n return self.json_response({\n 'query': query,\n 'language': viz_obj.datasource.query_language,\n })\n\n def get_raw_results(self, viz_obj):\n return self.json_response({\n 'data': viz_obj.get_df().to_dict('records'),\n })\n\n def get_samples(self, viz_obj):\n return self.json_response({\n 'data': viz_obj.get_samples(),\n })\n\n def generate_json(\n self, datasource_type, datasource_id, form_data,\n csv=False, query=False, force=False, results=False,\n samples=False,\n ):\n viz_obj = self.get_viz(\n datasource_type=datasource_type,\n datasource_id=datasource_id,\n form_data=form_data,\n force=force,\n )\n security_manager.assert_datasource_permission(viz_obj.datasource)\n\n if csv:\n return CsvResponse(\n viz_obj.get_csv(),\n status=200,\n headers=generate_download_headers('csv'),\n mimetype='application/csv')\n\n if query:\n return self.get_query_string_response(viz_obj)\n\n if results:\n return self.get_raw_results(viz_obj)\n\n if samples:\n return self.get_samples(viz_obj)\n\n payload = viz_obj.get_payload()\n return data_payload_response(*viz_obj.payload_json_and_has_error(payload))\n\n @log_this\n @api\n @has_access_api\n @expose('/slice_json/<slice_id>')\n def slice_json(self, slice_id):\n form_data, slc = self.get_form_data(slice_id, use_slice_data=True)\n datasource_type = slc.datasource.type\n datasource_id = slc.datasource.id\n\n return self.generate_json(datasource_type=datasource_type,\n datasource_id=datasource_id,\n form_data=form_data)\n\n @log_this\n @api\n @has_access_api\n @expose('/annotation_json/<layer_id>')\n def annotation_json(self, layer_id):\n form_data = self.get_form_data()[0]\n form_data['layer_id'] = layer_id\n form_data['filters'] = [{'col': 'layer_id',\n 'op': '==',\n 'val': layer_id}]\n datasource = AnnotationDatasource()\n viz_obj = viz.viz_types['table'](\n datasource,\n form_data=form_data,\n force=False,\n )\n payload = viz_obj.get_payload()\n return data_payload_response(*viz_obj.payload_json_and_has_error(payload))\n\n @log_this\n @api\n @has_access_api\n @handle_api_exception\n @expose('/explore_json/<datasource_type>/<datasource_id>/', methods=['GET', 'POST'])\n @expose('/explore_json/', methods=['GET', 'POST'])\n def explore_json(self, datasource_type=None, datasource_id=None):\n \"\"\"Serves all request that GET or POST form_data\n\n This endpoint evolved to be the entry point of many different\n requests that GETs or POSTs a form_data.\n\n `self.generate_json` receives this input and returns different\n payloads based on the request args in the first block\n\n TODO: break into one endpoint for each return shape\"\"\"\n csv = request.args.get('csv') == 'true'\n query = request.args.get('query') == 'true'\n results = request.args.get('results') == 'true'\n samples = request.args.get('samples') == 'true'\n force = request.args.get('force') == 'true'\n\n form_data = self.get_form_data()[0]\n datasource_id, datasource_type = self.datasource_info(\n datasource_id, datasource_type, form_data)\n\n return self.generate_json(\n datasource_type=datasource_type,\n datasource_id=datasource_id,\n form_data=form_data,\n csv=csv,\n query=query,\n results=results,\n force=force,\n samples=samples,\n )\n\n @log_this\n @has_access\n @expose('/import_dashboards', methods=['GET', 'POST'])\n def import_dashboards(self):\n \"\"\"Overrides the dashboards using json instances from the file.\"\"\"\n f = request.files.get('file')\n if request.method == 'POST' and f:\n dashboard_import_export.import_dashboards(db.session, f.stream)\n return redirect('/dashboard/list/')\n return self.render_template('superset/import_dashboards.html')\n\n @log_this\n @has_access\n @expose('/explorev2/<datasource_type>/<datasource_id>/')\n def explorev2(self, datasource_type, datasource_id):\n \"\"\"Deprecated endpoint, here for backward compatibility of urls\"\"\"\n return redirect(url_for(\n 'Superset.explore',\n datasource_type=datasource_type,\n datasource_id=datasource_id,\n **request.args))\n\n @staticmethod\n def datasource_info(datasource_id, datasource_type, form_data):\n \"\"\"Compatibility layer for handling of datasource info\n\n datasource_id & datasource_type used to be passed in the URL\n directory, now they should come as part of the form_data,\n This function allows supporting both without duplicating code\"\"\"\n datasource = form_data.get('datasource', '')\n if '__' in datasource:\n datasource_id, datasource_type = datasource.split('__')\n # The case where the datasource has been deleted\n datasource_id = None if datasource_id == 'None' else datasource_id\n\n if not datasource_id:\n raise Exception(\n 'The datasource associated with this chart no longer exists')\n datasource_id = int(datasource_id)\n return datasource_id, datasource_type\n\n @log_this\n @has_access\n @expose('/explore/<datasource_type>/<datasource_id>/', methods=['GET', 'POST'])\n @expose('/explore/', methods=['GET', 'POST'])\n def explore(self, datasource_type=None, datasource_id=None):\n user_id = g.user.get_id() if g.user else None\n form_data, slc = self.get_form_data(use_slice_data=True)\n\n datasource_id, datasource_type = self.datasource_info(\n datasource_id, datasource_type, form_data)\n\n error_redirect = '/chart/list/'\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, db.session)\n if not datasource:\n flash(DATASOURCE_MISSING_ERR, 'danger')\n return redirect(error_redirect)\n\n if config.get('ENABLE_ACCESS_REQUEST') and (\n not security_manager.datasource_access(datasource)\n ):\n flash(\n __(security_manager.get_datasource_access_error_msg(datasource)),\n 'danger')\n return redirect(\n 'superset/request_access/?'\n f'datasource_type={datasource_type}&'\n f'datasource_id={datasource_id}&')\n\n viz_type = form_data.get('viz_type')\n if not viz_type and datasource.default_endpoint:\n return redirect(datasource.default_endpoint)\n\n # slc perms\n slice_add_perm = security_manager.can_access('can_add', 'SliceModelView')\n slice_overwrite_perm = is_owner(slc, g.user)\n slice_download_perm = security_manager.can_access(\n 'can_download', 'SliceModelView')\n\n form_data['datasource'] = str(datasource_id) + '__' + datasource_type\n\n # On explore, merge legacy and extra filters into the form data\n utils.convert_legacy_filters_into_adhoc(form_data)\n utils.merge_extra_filters(form_data)\n\n # merge request url params\n if request.method == 'GET':\n utils.merge_request_params(form_data, request.args)\n\n # handle save or overwrite\n action = request.args.get('action')\n\n if action == 'overwrite' and not slice_overwrite_perm:\n return json_error_response(\n _('You don\\'t have the rights to ') + _('alter this ') + _('chart'),\n status=400)\n\n if action == 'saveas' and not slice_add_perm:\n return json_error_response(\n _('You don\\'t have the rights to ') + _('create a ') + _('chart'),\n status=400)\n\n if action in ('saveas', 'overwrite'):\n return self.save_or_overwrite_slice(\n request.args,\n slc, slice_add_perm,\n slice_overwrite_perm,\n slice_download_perm,\n datasource_id,\n datasource_type,\n datasource.name)\n\n standalone = request.args.get('standalone') == 'true'\n bootstrap_data = {\n 'can_add': slice_add_perm,\n 'can_download': slice_download_perm,\n 'can_overwrite': slice_overwrite_perm,\n 'datasource': datasource.data,\n 'form_data': form_data,\n 'datasource_id': datasource_id,\n 'datasource_type': datasource_type,\n 'slice': slc.data if slc else None,\n 'standalone': standalone,\n 'user_id': user_id,\n 'forced_height': request.args.get('height'),\n 'common': self.common_bootsrap_payload(),\n }\n table_name = datasource.table_name \\\n if datasource_type == 'table' \\\n else datasource.datasource_name\n if slc:\n title = slc.slice_name\n else:\n title = _('Explore - %(table)s', table=table_name)\n return self.render_template(\n 'superset/basic.html',\n bootstrap_data=json.dumps(bootstrap_data),\n entry='explore',\n title=title,\n standalone_mode=standalone)\n\n @api\n @handle_api_exception\n @has_access_api\n @expose('/filter/<datasource_type>/<datasource_id>/<column>/')\n def filter(self, datasource_type, datasource_id, column):\n \"\"\"\n Endpoint to retrieve values for specified column.\n\n :param datasource_type: Type of datasource e.g. table\n :param datasource_id: Datasource id\n :param column: Column name to retrieve values for\n :return:\n \"\"\"\n # TODO: Cache endpoint by user, datasource and column\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, db.session)\n if not datasource:\n return json_error_response(DATASOURCE_MISSING_ERR)\n security_manager.assert_datasource_permission(datasource)\n payload = json.dumps(\n datasource.values_for_column(\n column,\n config.get('FILTER_SELECT_ROW_LIMIT', 10000),\n ),\n default=utils.json_int_dttm_ser)\n return json_success(payload)\n\n def save_or_overwrite_slice(\n self, args, slc, slice_add_perm, slice_overwrite_perm, slice_download_perm,\n datasource_id, datasource_type, datasource_name):\n \"\"\"Save or overwrite a slice\"\"\"\n slice_name = args.get('slice_name')\n action = args.get('action')\n form_data, _ = self.get_form_data()\n\n if action in ('saveas'):\n if 'slice_id' in form_data:\n form_data.pop('slice_id') # don't save old slice_id\n slc = models.Slice(owners=[g.user] if g.user else [])\n\n slc.params = json.dumps(form_data, indent=2, sort_keys=True)\n slc.datasource_name = datasource_name\n slc.viz_type = form_data['viz_type']\n slc.datasource_type = datasource_type\n slc.datasource_id = datasource_id\n slc.slice_name = slice_name\n\n if action in ('saveas') and slice_add_perm:\n self.save_slice(slc)\n elif action == 'overwrite' and slice_overwrite_perm:\n self.overwrite_slice(slc)\n\n # Adding slice to a dashboard if requested\n dash = None\n if request.args.get('add_to_dash') == 'existing':\n dash = (\n db.session.query(models.Dashboard)\n .filter_by(id=int(request.args.get('save_to_dashboard_id')))\n .one()\n )\n\n # check edit dashboard permissions\n dash_overwrite_perm = check_ownership(dash, raise_if_false=False)\n if not dash_overwrite_perm:\n return json_error_response(\n _('You don\\'t have the rights to ') + _('alter this ') +\n _('dashboard'),\n status=400)\n\n flash(\n 'Slice [{}] was added to dashboard [{}]'.format(\n slc.slice_name,\n dash.dashboard_title),\n 'info')\n elif request.args.get('add_to_dash') == 'new':\n # check create dashboard permissions\n dash_add_perm = security_manager.can_access('can_add', 'DashboardModelView')\n if not dash_add_perm:\n return json_error_response(\n _('You don\\'t have the rights to ') + _('create a ') + _('dashboard'),\n status=400)\n\n dash = models.Dashboard(\n dashboard_title=request.args.get('new_dashboard_name'),\n owners=[g.user] if g.user else [])\n flash(\n 'Dashboard [{}] just got created and slice [{}] was added '\n 'to it'.format(\n dash.dashboard_title,\n slc.slice_name),\n 'info')\n\n if dash and slc not in dash.slices:\n dash.slices.append(slc)\n db.session.commit()\n\n response = {\n 'can_add': slice_add_perm,\n 'can_download': slice_download_perm,\n 'can_overwrite': is_owner(slc, g.user),\n 'form_data': slc.form_data,\n 'slice': slc.data,\n }\n\n if request.args.get('goto_dash') == 'true':\n response.update({'dashboard': dash.url})\n\n return json_success(json.dumps(response))\n\n def save_slice(self, slc):\n session = db.session()\n msg = _('Chart [{}] has been saved').format(slc.slice_name)\n session.add(slc)\n session.commit()\n flash(msg, 'info')\n\n def overwrite_slice(self, slc):\n session = db.session()\n session.merge(slc)\n session.commit()\n msg = _('Chart [{}] has been overwritten').format(slc.slice_name)\n flash(msg, 'info')\n\n @api\n @has_access_api\n @expose('/checkbox/<model_view>/<id_>/<attr>/<value>', methods=['GET'])\n def checkbox(self, model_view, id_, attr, value):\n \"\"\"endpoint for checking/unchecking any boolean in a sqla model\"\"\"\n modelview_to_model = {\n '{}ColumnInlineView'.format(name.capitalize()): source.column_class\n for name, source in ConnectorRegistry.sources.items()\n }\n model = modelview_to_model[model_view]\n col = db.session.query(model).filter_by(id=id_).first()\n checked = value == 'true'\n if col:\n setattr(col, attr, checked)\n if checked:\n metrics = col.get_metrics().values()\n col.datasource.add_missing_metrics(metrics)\n db.session.commit()\n return json_success('OK')\n\n @api\n @has_access_api\n @expose('/schemas/<db_id>/')\n @expose('/schemas/<db_id>/<force_refresh>/')\n def schemas(self, db_id, force_refresh='false'):\n db_id = int(db_id)\n force_refresh = force_refresh.lower() == 'true'\n database = (\n db.session\n .query(models.Database)\n .filter_by(id=db_id)\n .one()\n )\n schemas = database.all_schema_names(cache=database.schema_cache_enabled,\n cache_timeout=database.schema_cache_timeout,\n force=force_refresh)\n schemas = security_manager.schemas_accessible_by_user(database, schemas)\n return Response(\n json.dumps({'schemas': schemas}),\n mimetype='application/json')\n\n @api\n @has_access_api\n @expose('/tables/<db_id>/<schema>/<substr>/')\n @expose('/tables/<db_id>/<schema>/<substr>/<force_refresh>/')\n def tables(self, db_id, schema, substr, force_refresh='false'):\n \"\"\"Endpoint to fetch the list of tables for given database\"\"\"\n db_id = int(db_id)\n force_refresh = force_refresh.lower() == 'true'\n schema = utils.js_string_to_python(schema)\n substr = utils.js_string_to_python(substr)\n database = db.session.query(models.Database).filter_by(id=db_id).one()\n\n if schema:\n table_names = database.all_table_names_in_schema(\n schema=schema, force=force_refresh,\n cache=database.table_cache_enabled,\n cache_timeout=database.table_cache_timeout)\n view_names = database.all_view_names_in_schema(\n schema=schema, force=force_refresh,\n cache=database.table_cache_enabled,\n cache_timeout=database.table_cache_timeout)\n else:\n table_names = database.all_table_names_in_database(\n cache=True, force=False, cache_timeout=24 * 60 * 60)\n view_names = database.all_view_names_in_database(\n cache=True, force=False, cache_timeout=24 * 60 * 60)\n table_names = security_manager.accessible_by_user(database, table_names, schema)\n view_names = security_manager.accessible_by_user(database, view_names, schema)\n\n if substr:\n table_names = [tn for tn in table_names if substr in tn]\n view_names = [vn for vn in view_names if substr in vn]\n\n max_items = config.get('MAX_TABLE_NAMES') or len(table_names)\n total_items = len(table_names) + len(view_names)\n max_tables = len(table_names)\n max_views = len(view_names)\n if total_items and substr:\n max_tables = max_items * len(table_names) // total_items\n max_views = max_items * len(view_names) // total_items\n\n table_options = [{'value': tn, 'label': tn}\n for tn in table_names[:max_tables]]\n table_options.extend([{'value': vn, 'label': '[view] {}'.format(vn)}\n for vn in view_names[:max_views]])\n payload = {\n 'tableLength': len(table_names) + len(view_names),\n 'options': table_options,\n }\n return json_success(json.dumps(payload))\n\n @api\n @has_access_api\n @expose('/copy_dash/<dashboard_id>/', methods=['GET', 'POST'])\n def copy_dash(self, dashboard_id):\n \"\"\"Copy dashboard\"\"\"\n session = db.session()\n data = json.loads(request.form.get('data'))\n dash = models.Dashboard()\n original_dash = (\n session\n .query(models.Dashboard)\n .filter_by(id=dashboard_id).first())\n\n dash.owners = [g.user] if g.user else []\n dash.dashboard_title = data['dashboard_title']\n\n if data['duplicate_slices']:\n # Duplicating slices as well, mapping old ids to new ones\n old_to_new_sliceids = {}\n for slc in original_dash.slices:\n new_slice = slc.clone()\n new_slice.owners = [g.user] if g.user else []\n session.add(new_slice)\n session.flush()\n new_slice.dashboards.append(dash)\n old_to_new_sliceids['{}'.format(slc.id)] = \\\n '{}'.format(new_slice.id)\n\n # update chartId of layout entities\n # in v2_dash positions json data, chartId should be integer,\n # while in older version slice_id is string type\n for value in data['positions'].values():\n if (\n isinstance(value, dict) and value.get('meta') and\n value.get('meta').get('chartId')\n ):\n old_id = '{}'.format(value.get('meta').get('chartId'))\n new_id = int(old_to_new_sliceids[old_id])\n value['meta']['chartId'] = new_id\n else:\n dash.slices = original_dash.slices\n dash.params = original_dash.params\n\n self._set_dash_metadata(dash, data)\n session.add(dash)\n session.commit()\n dash_json = json.dumps(dash.data)\n session.close()\n return json_success(dash_json)\n\n @api\n @has_access_api\n @expose('/save_dash/<dashboard_id>/', methods=['GET', 'POST'])\n def save_dash(self, dashboard_id):\n \"\"\"Save a dashboard's metadata\"\"\"\n session = db.session()\n dash = (session\n .query(models.Dashboard)\n .filter_by(id=dashboard_id).first())\n check_ownership(dash, raise_if_false=True)\n data = json.loads(request.form.get('data'))\n self._set_dash_metadata(dash, data)\n session.merge(dash)\n session.commit()\n session.close()\n return json_success(json.dumps({'status': 'SUCCESS'}))\n\n @staticmethod\n def _set_dash_metadata(dashboard, data):\n positions = data['positions']\n # find slices in the position data\n slice_ids = []\n slice_id_to_name = {}\n for value in positions.values():\n if (\n isinstance(value, dict) and value.get('meta') and\n value.get('meta').get('chartId')\n ):\n slice_id = value.get('meta').get('chartId')\n slice_ids.append(slice_id)\n slice_id_to_name[slice_id] = value.get('meta').get('sliceName')\n\n session = db.session()\n Slice = models.Slice # noqa\n current_slices = session.query(Slice).filter(\n Slice.id.in_(slice_ids)).all()\n\n dashboard.slices = current_slices\n\n # update slice names. this assumes user has permissions to update the slice\n for slc in dashboard.slices:\n new_name = slice_id_to_name[slc.id]\n if slc.slice_name != new_name:\n slc.slice_name = new_name\n session.merge(slc)\n session.flush()\n\n # remove leading and trailing white spaces in the dumped json\n dashboard.position_json = json.dumps(\n positions, indent=None, separators=(',', ':'), sort_keys=True)\n md = dashboard.params_dict\n dashboard.css = data.get('css')\n dashboard.dashboard_title = data['dashboard_title']\n\n if 'filter_immune_slices' not in md:\n md['filter_immune_slices'] = []\n if 'timed_refresh_immune_slices' not in md:\n md['timed_refresh_immune_slices'] = []\n if 'filter_immune_slice_fields' not in md:\n md['filter_immune_slice_fields'] = {}\n md['expanded_slices'] = data['expanded_slices']\n default_filters_data = json.loads(data.get('default_filters', '{}'))\n applicable_filters = \\\n {key: v for key, v in default_filters_data.items()\n if int(key) in slice_ids}\n md['default_filters'] = json.dumps(applicable_filters)\n dashboard.json_metadata = json.dumps(md)\n\n @api\n @has_access_api\n @expose('/add_slices/<dashboard_id>/', methods=['POST'])\n def add_slices(self, dashboard_id):\n \"\"\"Add and save slices to a dashboard\"\"\"\n data = json.loads(request.form.get('data'))\n session = db.session()\n Slice = models.Slice # noqa\n dash = (\n session.query(models.Dashboard).filter_by(id=dashboard_id).first())\n check_ownership(dash, raise_if_false=True)\n new_slices = session.query(Slice).filter(\n Slice.id.in_(data['slice_ids']))\n dash.slices += new_slices\n session.merge(dash)\n session.commit()\n session.close()\n return 'SLICES ADDED'\n\n @api\n @has_access_api\n @expose('/testconn', methods=['POST', 'GET'])\n def testconn(self):\n \"\"\"Tests a sqla connection\"\"\"\n try:\n username = g.user.username if g.user is not None else None\n uri = request.json.get('uri')\n db_name = request.json.get('name')\n impersonate_user = request.json.get('impersonate_user')\n database = None\n if db_name:\n database = (\n db.session\n .query(models.Database)\n .filter_by(database_name=db_name)\n .first()\n )\n if database and uri == database.safe_sqlalchemy_uri():\n # the password-masked uri was passed\n # use the URI associated with this database\n uri = database.sqlalchemy_uri_decrypted\n\n configuration = {}\n\n if database and uri:\n url = make_url(uri)\n db_engine = models.Database.get_db_engine_spec_for_backend(\n url.get_backend_name())\n db_engine.patch()\n\n masked_url = database.get_password_masked_url_from_uri(uri)\n logging.info('Superset.testconn(). Masked URL: {0}'.format(masked_url))\n\n configuration.update(\n db_engine.get_configuration_for_impersonation(uri,\n impersonate_user,\n username),\n )\n\n engine_params = (\n request.json\n .get('extras', {})\n .get('engine_params', {}))\n connect_args = engine_params.get('connect_args')\n\n if configuration:\n connect_args['configuration'] = configuration\n\n engine = create_engine(uri, **engine_params)\n engine.connect()\n return json_success(json.dumps(engine.table_names(), indent=4))\n except Exception as e:\n logging.exception(e)\n return json_error_response((\n 'Connection failed!\\n\\n'\n 'The error message returned was:\\n{}').format(e))\n\n @api\n @has_access_api\n @expose('/recent_activity/<user_id>/', methods=['GET'])\n def recent_activity(self, user_id):\n \"\"\"Recent activity (actions) for a given user\"\"\"\n M = models # noqa\n\n if request.args.get('limit'):\n limit = int(request.args.get('limit'))\n else:\n limit = 1000\n\n qry = (\n db.session.query(M.Log, M.Dashboard, M.Slice)\n .outerjoin(\n M.Dashboard,\n M.Dashboard.id == M.Log.dashboard_id,\n )\n .outerjoin(\n M.Slice,\n M.Slice.id == M.Log.slice_id,\n )\n .filter(\n sqla.and_(\n ~M.Log.action.in_(('queries', 'shortner', 'sql_json')),\n M.Log.user_id == user_id,\n ),\n )\n .order_by(M.Log.dttm.desc())\n .limit(limit)\n )\n payload = []\n for log in qry.all():\n item_url = None\n item_title = None\n if log.Dashboard:\n item_url = log.Dashboard.url\n item_title = log.Dashboard.dashboard_title\n elif log.Slice:\n item_url = log.Slice.slice_url\n item_title = log.Slice.slice_name\n\n payload.append({\n 'action': log.Log.action,\n 'item_url': item_url,\n 'item_title': item_title,\n 'time': log.Log.dttm,\n })\n return json_success(\n json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose('/csrf_token/', methods=['GET'])\n def csrf_token(self):\n return Response(\n self.render_template('superset/csrf_token.json'),\n mimetype='text/json',\n )\n\n @api\n @has_access_api\n @expose('/fave_dashboards_by_username/<username>/', methods=['GET'])\n def fave_dashboards_by_username(self, username):\n \"\"\"This lets us use a user's username to pull favourite dashboards\"\"\"\n user = security_manager.find_user(username=username)\n return self.fave_dashboards(user.get_id())\n\n @api\n @has_access_api\n @expose('/fave_dashboards/<user_id>/', methods=['GET'])\n def fave_dashboards(self, user_id):\n qry = (\n db.session.query(\n models.Dashboard,\n models.FavStar.dttm,\n )\n .join(\n models.FavStar,\n sqla.and_(\n models.FavStar.user_id == int(user_id),\n models.FavStar.class_name == 'Dashboard',\n models.Dashboard.id == models.FavStar.obj_id,\n ),\n )\n .order_by(\n models.FavStar.dttm.desc(),\n )\n )\n payload = []\n for o in qry.all():\n d = {\n 'id': o.Dashboard.id,\n 'dashboard': o.Dashboard.dashboard_link(),\n 'title': o.Dashboard.dashboard_title,\n 'url': o.Dashboard.url,\n 'dttm': o.dttm,\n }\n if o.Dashboard.created_by:\n user = o.Dashboard.created_by\n d['creator'] = str(user)\n d['creator_url'] = '/superset/profile/{}/'.format(\n user.username)\n payload.append(d)\n return json_success(\n json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose('/created_dashboards/<user_id>/', methods=['GET'])\n def created_dashboards(self, user_id):\n Dash = models.Dashboard # noqa\n qry = (\n db.session.query(\n Dash,\n )\n .filter(\n sqla.or_(\n Dash.created_by_fk == user_id,\n Dash.changed_by_fk == user_id,\n ),\n )\n .order_by(\n Dash.changed_on.desc(),\n )\n )\n payload = [{\n 'id': o.id,\n 'dashboard': o.dashboard_link(),\n 'title': o.dashboard_title,\n 'url': o.url,\n 'dttm': o.changed_on,\n } for o in qry.all()]\n return json_success(\n json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose('/user_slices', methods=['GET'])\n @expose('/user_slices/<user_id>/', methods=['GET'])\n def user_slices(self, user_id=None):\n \"\"\"List of slices a user created, or faved\"\"\"\n if not user_id:\n user_id = g.user.id\n Slice = models.Slice # noqa\n FavStar = models.FavStar # noqa\n qry = (\n db.session.query(Slice,\n FavStar.dttm).join(\n models.FavStar,\n sqla.and_(\n models.FavStar.user_id == int(user_id),\n models.FavStar.class_name == 'slice',\n models.Slice.id == models.FavStar.obj_id,\n ),\n isouter=True).filter(\n sqla.or_(\n Slice.created_by_fk == user_id,\n Slice.changed_by_fk == user_id,\n FavStar.user_id == user_id,\n ),\n )\n .order_by(Slice.slice_name.asc())\n )\n payload = [{\n 'id': o.Slice.id,\n 'title': o.Slice.slice_name,\n 'url': o.Slice.slice_url,\n 'data': o.Slice.form_data,\n 'dttm': o.dttm if o.dttm else o.Slice.changed_on,\n 'viz_type': o.Slice.viz_type,\n } for o in qry.all()]\n return json_success(\n json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose('/created_slices', methods=['GET'])\n @expose('/created_slices/<user_id>/', methods=['GET'])\n def created_slices(self, user_id=None):\n \"\"\"List of slices created by this user\"\"\"\n if not user_id:\n user_id = g.user.id\n Slice = models.Slice # noqa\n qry = (\n db.session.query(Slice)\n .filter(\n sqla.or_(\n Slice.created_by_fk == user_id,\n Slice.changed_by_fk == user_id,\n ),\n )\n .order_by(Slice.changed_on.desc())\n )\n payload = [{\n 'id': o.id,\n 'title': o.slice_name,\n 'url': o.slice_url,\n 'dttm': o.changed_on,\n 'viz_type': o.viz_type,\n } for o in qry.all()]\n return json_success(\n json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose('/fave_slices', methods=['GET'])\n @expose('/fave_slices/<user_id>/', methods=['GET'])\n def fave_slices(self, user_id=None):\n \"\"\"Favorite slices for a user\"\"\"\n if not user_id:\n user_id = g.user.id\n qry = (\n db.session.query(\n models.Slice,\n models.FavStar.dttm,\n )\n .join(\n models.FavStar,\n sqla.and_(\n models.FavStar.user_id == int(user_id),\n models.FavStar.class_name == 'slice',\n models.Slice.id == models.FavStar.obj_id,\n ),\n )\n .order_by(\n models.FavStar.dttm.desc(),\n )\n )\n payload = []\n for o in qry.all():\n d = {\n 'id': o.Slice.id,\n 'title': o.Slice.slice_name,\n 'url': o.Slice.slice_url,\n 'dttm': o.dttm,\n 'viz_type': o.Slice.viz_type,\n }\n if o.Slice.created_by:\n user = o.Slice.created_by\n d['creator'] = str(user)\n d['creator_url'] = '/superset/profile/{}/'.format(\n user.username)\n payload.append(d)\n return json_success(\n json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose('/warm_up_cache/', methods=['GET'])\n def warm_up_cache(self):\n \"\"\"Warms up the cache for the slice or table.\n\n Note for slices a force refresh occurs.\n \"\"\"\n slices = None\n session = db.session()\n slice_id = request.args.get('slice_id')\n table_name = request.args.get('table_name')\n db_name = request.args.get('db_name')\n\n if not slice_id and not (table_name and db_name):\n return json_error_response(__(\n 'Malformed request. slice_id or table_name and db_name '\n 'arguments are expected'), status=400)\n if slice_id:\n slices = session.query(models.Slice).filter_by(id=slice_id).all()\n if not slices:\n return json_error_response(__(\n 'Chart %(id)s not found', id=slice_id), status=404)\n elif table_name and db_name:\n SqlaTable = ConnectorRegistry.sources['table']\n table = (\n session.query(SqlaTable)\n .join(models.Database)\n .filter(\n models.Database.database_name == db_name or\n SqlaTable.table_name == table_name)\n ).first()\n if not table:\n return json_error_response(__(\n \"Table %(t)s wasn't found in the database %(d)s\",\n t=table_name, s=db_name), status=404)\n slices = session.query(models.Slice).filter_by(\n datasource_id=table.id,\n datasource_type=table.type).all()\n\n for slc in slices:\n try:\n form_data = self.get_form_data(slc.id, use_slice_data=True)[0]\n obj = self.get_viz(\n datasource_type=slc.datasource.type,\n datasource_id=slc.datasource.id,\n form_data=form_data,\n force=True,\n )\n obj.get_json()\n except Exception as e:\n return json_error_response(utils.error_msg_from_exception(e))\n return json_success(json.dumps(\n [{'slice_id': slc.id, 'slice_name': slc.slice_name}\n for slc in slices]))\n\n @has_access_api\n @expose('/favstar/<class_name>/<obj_id>/<action>/')\n def favstar(self, class_name, obj_id, action):\n \"\"\"Toggle favorite stars on Slices and Dashboard\"\"\"\n session = db.session()\n FavStar = models.FavStar # noqa\n count = 0\n favs = session.query(FavStar).filter_by(\n class_name=class_name, obj_id=obj_id,\n user_id=g.user.get_id()).all()\n if action == 'select':\n if not favs:\n session.add(\n FavStar(\n class_name=class_name,\n obj_id=obj_id,\n user_id=g.user.get_id(),\n dttm=datetime.now(),\n ),\n )\n count = 1\n elif action == 'unselect':\n for fav in favs:\n session.delete(fav)\n else:\n count = len(favs)\n session.commit()\n return json_success(json.dumps({'count': count}))\n\n @has_access\n @expose('/dashboard/<dashboard_id>/')\n def dashboard(self, dashboard_id):\n \"\"\"Server side rendering for a dashboard\"\"\"\n session = db.session()\n qry = session.query(models.Dashboard)\n if dashboard_id.isdigit():\n qry = qry.filter_by(id=int(dashboard_id))\n else:\n qry = qry.filter_by(slug=dashboard_id)\n\n dash = qry.one_or_none()\n if not dash:\n abort(404)\n datasources = set()\n for slc in dash.slices:\n datasource = slc.datasource\n if datasource:\n datasources.add(datasource)\n\n if config.get('ENABLE_ACCESS_REQUEST'):\n for datasource in datasources:\n if datasource and not security_manager.datasource_access(datasource):\n flash(\n __(security_manager.get_datasource_access_error_msg(datasource)),\n 'danger')\n return redirect(\n 'superset/request_access/?'\n f'dashboard_id={dash.id}&')\n\n dash_edit_perm = check_ownership(dash, raise_if_false=False) and \\\n security_manager.can_access('can_save_dash', 'Superset')\n dash_save_perm = security_manager.can_access('can_save_dash', 'Superset')\n superset_can_explore = security_manager.can_access('can_explore', 'Superset')\n slice_can_edit = security_manager.can_access('can_edit', 'SliceModelView')\n\n standalone_mode = request.args.get('standalone') == 'true'\n edit_mode = request.args.get('edit') == 'true'\n\n # Hack to log the dashboard_id properly, even when getting a slug\n @log_this\n def dashboard(**kwargs): # noqa\n pass\n dashboard(\n dashboard_id=dash.id,\n dashboard_version='v2',\n dash_edit_perm=dash_edit_perm,\n edit_mode=edit_mode)\n\n dashboard_data = dash.data\n dashboard_data.update({\n 'standalone_mode': standalone_mode,\n 'dash_save_perm': dash_save_perm,\n 'dash_edit_perm': dash_edit_perm,\n 'superset_can_explore': superset_can_explore,\n 'slice_can_edit': slice_can_edit,\n })\n\n bootstrap_data = {\n 'user_id': g.user.get_id(),\n 'dashboard_data': dashboard_data,\n 'datasources': {ds.uid: ds.data for ds in datasources},\n 'common': self.common_bootsrap_payload(),\n 'editMode': edit_mode,\n }\n\n if request.args.get('json') == 'true':\n return json_success(json.dumps(bootstrap_data))\n\n return self.render_template(\n 'superset/dashboard.html',\n entry='dashboard',\n standalone_mode=standalone_mode,\n title=dash.dashboard_title,\n bootstrap_data=json.dumps(bootstrap_data),\n )\n\n @api\n @log_this\n @expose('/log/', methods=['POST'])\n def log(self):\n return Response(status=200)\n\n @has_access\n @expose('/sync_druid/', methods=['POST'])\n @log_this\n def sync_druid_source(self):\n \"\"\"Syncs the druid datasource in main db with the provided config.\n\n The endpoint takes 3 arguments:\n user - user name to perform the operation as\n cluster - name of the druid cluster\n config - configuration stored in json that contains:\n name: druid datasource name\n dimensions: list of the dimensions, they become druid columns\n with the type STRING\n metrics_spec: list of metrics (dictionary). Metric consists of\n 2 attributes: type and name. Type can be count,\n etc. `count` type is stored internally as longSum\n other fields will be ignored.\n\n Example: {\n 'name': 'test_click',\n 'metrics_spec': [{'type': 'count', 'name': 'count'}],\n 'dimensions': ['affiliate_id', 'campaign', 'first_seen']\n }\n \"\"\"\n payload = request.get_json(force=True)\n druid_config = payload['config']\n user_name = payload['user']\n cluster_name = payload['cluster']\n\n user = security_manager.find_user(username=user_name)\n DruidDatasource = ConnectorRegistry.sources['druid']\n DruidCluster = DruidDatasource.cluster_class\n if not user:\n err_msg = __(\"Can't find User '%(name)s', please ask your admin \"\n 'to create one.', name=user_name)\n logging.error(err_msg)\n return json_error_response(err_msg)\n cluster = db.session.query(DruidCluster).filter_by(\n cluster_name=cluster_name).first()\n if not cluster:\n err_msg = __(\"Can't find DruidCluster with cluster_name = \"\n \"'%(name)s'\", name=cluster_name)\n logging.error(err_msg)\n return json_error_response(err_msg)\n try:\n DruidDatasource.sync_to_db_from_config(\n druid_config, user, cluster)\n except Exception as e:\n logging.exception(utils.error_msg_from_exception(e))\n return json_error_response(utils.error_msg_from_exception(e))\n return Response(status=201)\n\n @has_access\n @expose('/sqllab_viz/', methods=['POST'])\n @log_this\n def sqllab_viz(self):\n SqlaTable = ConnectorRegistry.sources['table']\n data = json.loads(request.form.get('data'))\n table_name = data.get('datasourceName')\n table = (\n db.session.query(SqlaTable)\n .filter_by(table_name=table_name)\n .first()\n )\n if not table:\n table = SqlaTable(table_name=table_name)\n table.database_id = data.get('dbId')\n table.schema = data.get('schema')\n table.template_params = data.get('templateParams')\n table.is_sqllab_view = True\n q = ParsedQuery(data.get('sql'))\n table.sql = q.stripped()\n db.session.add(table)\n cols = []\n for config in data.get('columns'):\n column_name = config.get('name')\n SqlaTable = ConnectorRegistry.sources['table']\n TableColumn = SqlaTable.column_class\n SqlMetric = SqlaTable.metric_class\n col = TableColumn(\n column_name=column_name,\n filterable=True,\n groupby=True,\n is_dttm=config.get('is_date', False),\n type=config.get('type', False),\n )\n cols.append(col)\n\n table.columns = cols\n table.metrics = [\n SqlMetric(metric_name='count', expression='count(*)'),\n ]\n db.session.commit()\n return self.json_response(json.dumps({\n 'table_id': table.id,\n }))\n\n @has_access\n @expose('/table/<database_id>/<path:table_name>/<schema>/')\n @log_this\n def table(self, database_id, table_name, schema):\n schema = utils.js_string_to_python(schema)\n table_name = parse.unquote_plus(table_name)\n mydb = db.session.query(models.Database).filter_by(id=database_id).one()\n payload_columns = []\n indexes = []\n primary_key = []\n foreign_keys = []\n try:\n columns = mydb.get_columns(table_name, schema)\n indexes = mydb.get_indexes(table_name, schema)\n primary_key = mydb.get_pk_constraint(table_name, schema)\n foreign_keys = mydb.get_foreign_keys(table_name, schema)\n except Exception as e:\n return json_error_response(utils.error_msg_from_exception(e))\n keys = []\n if primary_key and primary_key.get('constrained_columns'):\n primary_key['column_names'] = primary_key.pop('constrained_columns')\n primary_key['type'] = 'pk'\n keys += [primary_key]\n for fk in foreign_keys:\n fk['column_names'] = fk.pop('constrained_columns')\n fk['type'] = 'fk'\n keys += foreign_keys\n for idx in indexes:\n idx['type'] = 'index'\n keys += indexes\n\n for col in columns:\n dtype = ''\n try:\n dtype = '{}'.format(col['type'])\n except Exception:\n # sqla.types.JSON __str__ has a bug, so using __class__.\n dtype = col['type'].__class__.__name__\n pass\n payload_columns.append({\n 'name': col['name'],\n 'type': dtype.split('(')[0] if '(' in dtype else dtype,\n 'longType': dtype,\n 'keys': [\n k for k in keys\n if col['name'] in k.get('column_names')\n ],\n })\n tbl = {\n 'name': table_name,\n 'columns': payload_columns,\n 'selectStar': mydb.select_star(\n table_name, schema=schema, show_cols=True, indent=True,\n cols=columns, latest_partition=True),\n 'primaryKey': primary_key,\n 'foreignKeys': foreign_keys,\n 'indexes': keys,\n }\n return json_success(json.dumps(tbl))\n\n @has_access\n @expose('/extra_table_metadata/<database_id>/<path:table_name>/<schema>/')\n @log_this\n def extra_table_metadata(self, database_id, table_name, schema):\n schema = utils.js_string_to_python(schema)\n table_name = parse.unquote_plus(table_name)\n mydb = db.session.query(models.Database).filter_by(id=database_id).one()\n payload = mydb.db_engine_spec.extra_table_metadata(\n mydb, table_name, schema)\n return json_success(json.dumps(payload))\n\n @has_access\n @expose('/select_star/<database_id>/<table_name>')\n @expose('/select_star/<database_id>/<table_name>/<schema>')\n @log_this\n def select_star(self, database_id, table_name, schema=None):\n mydb = db.session.query(\n models.Database).filter_by(id=database_id).first()\n return json_success(\n mydb.select_star(\n table_name,\n schema,\n latest_partition=True,\n show_cols=True,\n ),\n )\n\n @expose('/theme/')\n def theme(self):\n return self.render_template('superset/theme.html')\n\n @has_access_api\n @expose('/cached_key/<key>/')\n @log_this\n def cached_key(self, key):\n \"\"\"Returns a key from the cache\"\"\"\n resp = cache.get(key)\n if resp:\n return resp\n return 'nope'\n\n @has_access_api\n @expose('/cache_key_exist/<key>/')\n @log_this\n def cache_key_exist(self, key):\n \"\"\"Returns if a key from cache exist\"\"\"\n key_exist = True if cache.get(key) else False\n status = 200 if key_exist else 404\n return json_success(json.dumps({'key_exist': key_exist}),\n status=status)\n\n @has_access_api\n @expose('/results/<key>/')\n @log_this\n def results(self, key):\n \"\"\"Serves a key off of the results backend\"\"\"\n if not results_backend:\n return json_error_response(\"Results backend isn't configured\")\n\n read_from_results_backend_start = now_as_float()\n blob = results_backend.get(key)\n stats_logger.timing(\n 'sqllab.query.results_backend_read',\n now_as_float() - read_from_results_backend_start,\n )\n if not blob:\n return json_error_response(\n 'Data could not be retrieved. '\n 'You may want to re-run the query.',\n status=410,\n )\n\n query = db.session.query(Query).filter_by(results_key=key).one()\n rejected_tables = security_manager.rejected_datasources(\n query.sql, query.database, query.schema)\n if rejected_tables:\n return json_error_response(security_manager.get_table_access_error_msg(\n '{}'.format(rejected_tables)), status=403)\n\n payload = utils.zlib_decompress_to_string(blob)\n display_limit = app.config.get('DEFAULT_SQLLAB_LIMIT', None)\n if display_limit:\n payload_json = json.loads(payload)\n payload_json['data'] = payload_json['data'][:display_limit]\n return json_success(\n json.dumps(\n payload_json,\n default=utils.json_iso_dttm_ser,\n ignore_nan=True,\n ),\n )\n\n @has_access_api\n @expose('/stop_query/', methods=['POST'])\n @log_this\n def stop_query(self):\n client_id = request.form.get('client_id')\n try:\n query = (\n db.session.query(Query)\n .filter_by(client_id=client_id).one()\n )\n query.status = QueryStatus.STOPPED\n db.session.commit()\n except Exception:\n pass\n return self.json_response('OK')\n\n @has_access_api\n @expose('/sql_json/', methods=['POST', 'GET'])\n @log_this\n def sql_json(self):\n \"\"\"Runs arbitrary sql and returns and json\"\"\"\n async_ = request.form.get('runAsync') == 'true'\n sql = request.form.get('sql')\n database_id = request.form.get('database_id')\n schema = request.form.get('schema') or None\n template_params = json.loads(\n request.form.get('templateParams') or '{}')\n limit = int(request.form.get('queryLimit', 0))\n if limit < 0:\n logging.warning(\n 'Invalid limit of {} specified. Defaulting to max limit.'.format(limit))\n limit = 0\n limit = limit or app.config.get('SQL_MAX_ROW')\n\n session = db.session()\n mydb = session.query(models.Database).filter_by(id=database_id).first()\n\n if not mydb:\n json_error_response(\n 'Database with id {} is missing.'.format(database_id))\n\n rejected_tables = security_manager.rejected_datasources(sql, mydb, schema)\n if rejected_tables:\n return json_error_response(\n security_manager.get_table_access_error_msg(rejected_tables),\n link=security_manager.get_table_access_link(rejected_tables),\n status=403)\n session.commit()\n\n select_as_cta = request.form.get('select_as_cta') == 'true'\n tmp_table_name = request.form.get('tmp_table_name')\n if select_as_cta and mydb.force_ctas_schema:\n tmp_table_name = '{}.{}'.format(\n mydb.force_ctas_schema,\n tmp_table_name,\n )\n\n client_id = request.form.get('client_id') or utils.shortid()[:10]\n limits = [mydb.db_engine_spec.get_limit_from_sql(sql), limit]\n query = Query(\n database_id=int(database_id),\n limit=min(lim for lim in limits if lim is not None),\n sql=sql,\n schema=schema,\n select_as_cta=request.form.get('select_as_cta') == 'true',\n start_time=now_as_float(),\n tab_name=request.form.get('tab'),\n status=QueryStatus.PENDING if async_ else QueryStatus.RUNNING,\n sql_editor_id=request.form.get('sql_editor_id'),\n tmp_table_name=tmp_table_name,\n user_id=g.user.get_id() if g.user else None,\n client_id=client_id,\n )\n session.add(query)\n session.flush()\n query_id = query.id\n session.commit() # shouldn't be necessary\n if not query_id:\n raise Exception(_('Query record was not created as expected.'))\n logging.info('Triggering query_id: {}'.format(query_id))\n\n try:\n template_processor = get_template_processor(\n database=query.database, query=query)\n rendered_query = template_processor.process_template(\n query.sql,\n **template_params)\n except Exception as e:\n return json_error_response(\n 'Template rendering failed: {}'.format(utils.error_msg_from_exception(e)))\n\n # Async request.\n if async_:\n logging.info('Running query on a Celery worker')\n # Ignore the celery future object and the request may time out.\n try:\n sql_lab.get_sql_results.delay(\n query_id,\n rendered_query,\n return_results=False,\n store_results=not query.select_as_cta,\n user_name=g.user.username if g.user else None,\n start_time=now_as_float())\n except Exception as e:\n logging.exception(e)\n msg = _(\n 'Failed to start remote query on a worker. '\n 'Tell your administrator to verify the availability of '\n 'the message queue.')\n query.status = QueryStatus.FAILED\n query.error_message = msg\n session.commit()\n return json_error_response('{}'.format(msg))\n\n resp = json_success(json.dumps(\n {'query': query.to_dict()}, default=utils.json_int_dttm_ser,\n ignore_nan=True), status=202)\n session.commit()\n return resp\n\n # Sync request.\n try:\n timeout = config.get('SQLLAB_TIMEOUT')\n timeout_msg = (\n f'The query exceeded the {timeout} seconds timeout.')\n with utils.timeout(seconds=timeout,\n error_message=timeout_msg):\n # pylint: disable=no-value-for-parameter\n data = sql_lab.get_sql_results(\n query_id,\n rendered_query,\n return_results=True,\n user_name=g.user.username if g.user else None)\n payload = json.dumps(\n data,\n default=utils.pessimistic_json_iso_dttm_ser,\n ignore_nan=True,\n encoding=None,\n )\n except Exception as e:\n logging.exception(e)\n return json_error_response('{}'.format(e))\n if data.get('status') == QueryStatus.FAILED:\n return json_error_response(payload=data)\n return json_success(payload)\n\n @has_access\n @expose('/csv/<client_id>')\n @log_this\n def csv(self, client_id):\n \"\"\"Download the query results as csv.\"\"\"\n logging.info('Exporting CSV file [{}]'.format(client_id))\n query = (\n db.session.query(Query)\n .filter_by(client_id=client_id)\n .one()\n )\n\n rejected_tables = security_manager.rejected_datasources(\n query.sql, query.database, query.schema)\n if rejected_tables:\n flash(\n security_manager.get_table_access_error_msg('{}'.format(rejected_tables)))\n return redirect('/')\n blob = None\n if results_backend and query.results_key:\n logging.info(\n 'Fetching CSV from results backend '\n '[{}]'.format(query.results_key))\n blob = results_backend.get(query.results_key)\n if blob:\n logging.info('Decompressing')\n json_payload = utils.zlib_decompress_to_string(blob)\n obj = json.loads(json_payload)\n columns = [c['name'] for c in obj['columns']]\n df = pd.DataFrame.from_records(obj['data'], columns=columns)\n logging.info('Using pandas to convert to CSV')\n csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))\n else:\n logging.info('Running a query to turn into CSV')\n sql = query.select_sql or query.executed_sql\n df = query.database.get_df(sql, query.schema)\n # TODO(bkyryliuk): add compression=gzip for big files.\n csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))\n response = Response(csv, mimetype='text/csv')\n response.headers['Content-Disposition'] = f'attachment; filename={query.name}.csv'\n logging.info('Ready to return response')\n return response\n\n @api\n @handle_api_exception\n @has_access\n @expose('/fetch_datasource_metadata')\n @log_this\n def fetch_datasource_metadata(self):\n datasource_id, datasource_type = (\n request.args.get('datasourceKey').split('__'))\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, db.session)\n # Check if datasource exists\n if not datasource:\n return json_error_response(DATASOURCE_MISSING_ERR)\n\n # Check permission for datasource\n security_manager.assert_datasource_permission(datasource)\n return json_success(json.dumps(datasource.data))\n\n @has_access_api\n @expose('/queries/<last_updated_ms>')\n def queries(self, last_updated_ms):\n \"\"\"Get the updated queries.\"\"\"\n stats_logger.incr('queries')\n if not g.user.get_id():\n return json_error_response(\n 'Please login to access the queries.', status=403)\n\n # Unix time, milliseconds.\n last_updated_ms_int = int(float(last_updated_ms)) if last_updated_ms else 0\n\n # UTC date time, same that is stored in the DB.\n last_updated_dt = utils.EPOCH + timedelta(seconds=last_updated_ms_int / 1000)\n\n sql_queries = (\n db.session.query(Query)\n .filter(\n Query.user_id == g.user.get_id(),\n Query.changed_on >= last_updated_dt,\n )\n .all()\n )\n dict_queries = {q.client_id: q.to_dict() for q in sql_queries}\n\n now = int(round(time.time() * 1000))\n\n unfinished_states = [\n QueryStatus.PENDING,\n QueryStatus.RUNNING,\n ]\n\n queries_to_timeout = [\n client_id for client_id, query_dict in dict_queries.items()\n if (\n query_dict['state'] in unfinished_states and (\n now - query_dict['startDttm'] >\n config.get('SQLLAB_ASYNC_TIME_LIMIT_SEC') * 1000\n )\n )\n ]\n\n if queries_to_timeout:\n update(Query).where(\n and_(\n Query.user_id == g.user.get_id(),\n Query.client_id in queries_to_timeout,\n ),\n ).values(state=QueryStatus.TIMED_OUT)\n\n for client_id in queries_to_timeout:\n dict_queries[client_id]['status'] = QueryStatus.TIMED_OUT\n\n return json_success(\n json.dumps(dict_queries, default=utils.json_int_dttm_ser))\n\n @has_access\n @expose('/search_queries')\n @log_this\n def search_queries(self):\n \"\"\"Search for queries.\"\"\"\n query = db.session.query(Query)\n search_user_id = request.args.get('user_id')\n database_id = request.args.get('database_id')\n search_text = request.args.get('search_text')\n status = request.args.get('status')\n # From and To time stamp should be Epoch timestamp in seconds\n from_time = request.args.get('from')\n to_time = request.args.get('to')\n\n if search_user_id:\n # Filter on db Id\n query = query.filter(Query.user_id == search_user_id)\n\n if database_id:\n # Filter on db Id\n query = query.filter(Query.database_id == database_id)\n\n if status:\n # Filter on status\n query = query.filter(Query.status == status)\n\n if search_text:\n # Filter on search text\n query = query \\\n .filter(Query.sql.like('%{}%'.format(search_text)))\n\n if from_time:\n query = query.filter(Query.start_time > int(from_time))\n\n if to_time:\n query = query.filter(Query.start_time < int(to_time))\n\n query_limit = config.get('QUERY_SEARCH_LIMIT', 1000)\n sql_queries = (\n query.order_by(Query.start_time.asc())\n .limit(query_limit)\n .all()\n )\n\n dict_queries = [q.to_dict() for q in sql_queries]\n\n return Response(\n json.dumps(dict_queries, default=utils.json_int_dttm_ser),\n status=200,\n mimetype='application/json')\n\n @app.errorhandler(500)\n def show_traceback(self):\n return render_template(\n 'superset/traceback.html',\n error_msg=get_error_msg(),\n ), 500\n\n @expose('/welcome')\n def welcome(self):\n \"\"\"Personalized welcome page\"\"\"\n if not g.user or not g.user.get_id():\n return redirect(appbuilder.get_url_for_login)\n\n welcome_dashboard_id = (\n db.session\n .query(UserAttribute.welcome_dashboard_id)\n .filter_by(user_id=g.user.get_id())\n .scalar()\n )\n if welcome_dashboard_id:\n return self.dashboard(str(welcome_dashboard_id))\n\n payload = {\n 'user': bootstrap_user_data(),\n 'common': self.common_bootsrap_payload(),\n }\n\n return self.render_template(\n 'superset/basic.html',\n entry='welcome',\n title='Superset',\n bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser),\n )\n\n @has_access\n @expose('/profile/<username>/')\n def profile(self, username):\n \"\"\"User profile page\"\"\"\n if not username and g.user:\n username = g.user.username\n\n payload = {\n 'user': bootstrap_user_data(username, include_perms=True),\n 'common': self.common_bootsrap_payload(),\n }\n\n return self.render_template(\n 'superset/basic.html',\n title=_(\"%(user)s's profile\", user=username),\n entry='profile',\n bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser),\n )\n\n @has_access\n @expose('/sqllab')\n def sqllab(self):\n \"\"\"SQL Editor\"\"\"\n d = {\n 'defaultDbId': config.get('SQLLAB_DEFAULT_DBID'),\n 'common': self.common_bootsrap_payload(),\n }\n return self.render_template(\n 'superset/basic.html',\n entry='sqllab',\n bootstrap_data=json.dumps(d, default=utils.json_iso_dttm_ser),\n )\n\n @api\n @handle_api_exception\n @has_access_api\n @expose('/slice_query/<slice_id>/')\n def slice_query(self, slice_id):\n \"\"\"\n This method exposes an API endpoint to\n get the database query string for this slice\n \"\"\"\n viz_obj = self.get_viz(slice_id)\n security_manager.assert_datasource_permission(viz_obj.datasource)\n return self.get_query_string_response(viz_obj)\n\n @api\n @has_access_api\n @expose('/schemas_access_for_csv_upload')\n def schemas_access_for_csv_upload(self):\n \"\"\"\n This method exposes an API endpoint to\n get the schema access control settings for csv upload in this database\n \"\"\"\n if not request.args.get('db_id'):\n return json_error_response(\n 'No database is allowed for your csv upload')\n\n db_id = int(request.args.get('db_id'))\n database = (\n db.session\n .query(models.Database)\n .filter_by(id=db_id)\n .one()\n )\n try:\n schemas_allowed = database.get_schema_access_for_csv_upload()\n if (security_manager.database_access(database) or\n security_manager.all_datasource_access()):\n return self.json_response(schemas_allowed)\n # the list schemas_allowed should not be empty here\n # and the list schemas_allowed_processed returned from security_manager\n # should not be empty either,\n # otherwise the database should have been filtered out\n # in CsvToDatabaseForm\n schemas_allowed_processed = security_manager.schemas_accessible_by_user(\n database, schemas_allowed, False)\n return self.json_response(schemas_allowed_processed)\n except Exception:\n return json_error_response((\n 'Failed to fetch schemas allowed for csv upload in this database! '\n 'Please contact Superset Admin!\\n\\n'\n 'The error message returned was:\\n{}').format(traceback.format_exc()))\n\n\nappbuilder.add_view_no_menu(Superset)\n\n\nclass CssTemplateModelView(SupersetModelView, DeleteMixin):\n datamodel = SQLAInterface(models.CssTemplate)\n\n list_title = _('List Css Template')\n show_title = _('Show Css Template')\n add_title = _('Add Css Template')\n edit_title = _('Edit Css Template')\n\n list_columns = ['template_name']\n edit_columns = ['template_name', 'css']\n add_columns = edit_columns\n label_columns = {\n 'template_name': _('Template Name'),\n }\n\n\nclass CssTemplateAsyncModelView(CssTemplateModelView):\n list_columns = ['template_name', 'css']\n\n\nappbuilder.add_separator('Sources')\nappbuilder.add_view(\n CssTemplateModelView,\n 'CSS Templates',\n label=__('CSS Templates'),\n icon='fa-css3',\n category='Manage',\n category_label=__('Manage'),\n category_icon='')\n\n\nappbuilder.add_view_no_menu(CssTemplateAsyncModelView)\n\nappbuilder.add_link(\n 'SQL Editor',\n label=_('SQL Editor'),\n href='/superset/sqllab',\n category_icon='fa-flask',\n icon='fa-flask',\n category='SQL Lab',\n category_label=__('SQL Lab'),\n)\n\nappbuilder.add_link(\n 'Query Search',\n label=_('Query Search'),\n href='/superset/sqllab#search',\n icon='fa-search',\n category_icon='fa-flask',\n category='SQL Lab',\n category_label=__('SQL Lab'),\n)\n\nappbuilder.add_link(\n 'Upload a CSV',\n label=__('Upload a CSV'),\n href='/csvtodatabaseview/form',\n icon='fa-upload',\n category='Sources',\n category_label=__('Sources'),\n category_icon='fa-wrench')\nappbuilder.add_separator('Sources')\n\n\[email protected]_request\ndef apply_caching(response):\n \"\"\"Applies the configuration's http headers to all responses\"\"\"\n for k, v in config.get('HTTP_HEADERS').items():\n response.headers[k] = v\n return response\n\n\n# ---------------------------------------------------------------------\n# Redirecting URL from previous names\nclass RegexConverter(BaseConverter):\n def __init__(self, url_map, *items):\n super(RegexConverter, self).__init__(url_map)\n self.regex = items[0]\n\n\napp.url_map.converters['regex'] = RegexConverter\n\n\[email protected]('/<regex(\"panoramix\\/.*\"):url>')\ndef panoramix(url): # noqa\n return redirect(request.full_path.replace('panoramix', 'superset'))\n\n\[email protected]('/<regex(\"caravel\\/.*\"):url>')\ndef caravel(url): # noqa\n return redirect(request.full_path.replace('caravel', 'superset'))\n\n\n# ---------------------------------------------------------------------\n" ]
[ [ "pandas.DataFrame.from_records" ] ]
rucmlcv/IEPT_FSL
[ "54b0a58b5928771ef8dcc6dcd6f4314739ffe14a" ]
[ "SSL/dataloader/samplers.py" ]
[ "import torch\nimport numpy as np\n\n\nclass CategoriesSamplerBak():\n\n def __init__(self, label, n_batch, n_cls, n_per): #n_batch 为 一个epoch的episode数亩\n \n self.n_batch = n_batch\n self.n_cls = n_cls\n self.n_per = n_per\n self.n_step = 0\n self.mark = {}\n self.r_clses = None\n\n label = np.array(label)\n self.m_ind = []\n for i in range(max(label) + 1):\n ind = np.argwhere(label == i).reshape(-1)\n ind = torch.from_numpy(ind)\n self.m_ind.append(ind)\n\n def __len__(self):\n return self.n_batch\n \n def __iter__(self):\n for i_batch in range(self.n_batch):\n batch = []\n if self.r_clses is None:\n classes = torch.randperm(len(self.m_ind))[:self.n_cls]\n self.r_clses = classes\n else:\n classes = self.r_clses\n self.r_clses = None\n\n for c in classes:\n l = self.m_ind[c]\n self.mark[l] = True\n\n pos = torch.randperm(len(l))[:self.n_per]\n batch.append(l[pos])\n batch = torch.stack(batch).t().reshape(-1)\n yield batch\n\n def getmark(self):\n count = 0\n for c in self.m_ind:\n if c not in self.mark:\n count += 1\n print(count)\n\n\nclass CategoriesSampler():\n\n def __init__(self, label, n_batch, n_cls, n_per): #n_batch 为 一个epoch的episode数亩\n \n self.n_batch = n_batch\n self.n_cls = n_cls\n self.n_per = n_per\n self.n_step = 0\n\n label = np.array(label)\n self.m_ind = []\n for i in range(max(label) + 1):\n ind = np.argwhere(label == i).reshape(-1)\n ind = torch.from_numpy(ind)\n self.m_ind.append(ind)\n\n def __len__(self):\n return self.n_batch\n \n def __iter__(self):\n for i_batch in range(self.n_batch):\n batch = []\n\n classes = torch.randperm(len(self.m_ind))[:self.n_cls]\n\n for c in classes:\n l = self.m_ind[c]\n pos = torch.randperm(len(l))[:self.n_per]\n batch.append(l[pos])\n batch = torch.stack(batch).t().reshape(-1)\n yield batch\n\n\n" ]
[ [ "numpy.array", "torch.stack", "torch.from_numpy", "numpy.argwhere" ] ]
jianwang-ntu/deeplift_tf2.0
[ "957511e3e307fdb93f65bf54cc2b5214e5374f49" ]
[ "tests/conversion/sequential/test_conv2d_model_same_padding.py" ]
[ "from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport unittest\nfrom unittest import skip\nimport sys\nimport os\nimport numpy as np\nnp.random.seed(1234)\nfrom deeplift.conversion import kerasapi_conversion as kc\nimport deeplift.layers as layers\nfrom deeplift.layers import NonlinearMxtsMode\nfrom deeplift.util import compile_func\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import models\nfrom tensorflow.keras import backend as K\n\n\nclass TestConvolutionalModel(unittest.TestCase):\n\n\n def setUp(self):\n self.inp = (np.random.randn(10*10*51*51)\n .reshape(10,10,51,51)).transpose(0,2,3,1)\n self.keras_model = keras.models.Sequential()\n conv_layer1 = keras.layers.convolutional.Convolution2D(\n nb_filter=20, nb_row=4, nb_col=4, subsample=(2,2),\n activation=\"relu\",\n padding='same',\n input_shape=(51,51,10))\n self.keras_model.add(conv_layer1)\n conv_layer2 = keras.layers.convolutional.Convolution2D(\n nb_filter=10, nb_row=4, nb_col=4, subsample=(2,2),\n activation=\"relu\",\n padding='same')\n self.keras_model.add(conv_layer2)\n self.keras_model.add(keras.layers.pooling.MaxPooling2D(\n pool_size=(4,4), strides=(2,2),\n padding='same')) \n self.keras_model.add(keras.layers.pooling.AveragePooling2D(\n pool_size=(4,4), strides=(2,2),\n padding='same')) \n self.keras_model.add(keras.layers.Flatten())\n self.keras_model.add(keras.layers.Dense(output_dim=1))\n self.keras_model.add(keras.layers.core.Activation(\"sigmoid\"))\n self.keras_model.compile(loss=\"mse\", optimizer=\"sgd\")\n self.keras_output_fprop_func = compile_func(\n [self.keras_model.layers[0].input,\n K.learning_phase()],\n self.keras_model.layers[-1].output)\n\n grad = tf.gradients(ys=tf.reduce_sum(\n input_tensor=self.keras_model.layers[-2].output[:,0]),\n xs=[self.keras_model.layers[0].input])[0]\n self.grad_func = compile_func(\n [self.keras_model.layers[0].input,\n K.learning_phase()], grad)\n\n self.saved_file_path = \"conv2model_samepadding.h5\"\n if (os.path.isfile(self.saved_file_path)):\n os.remove(self.saved_file_path)\n self.keras_model.save(self.saved_file_path)\n \n def test_convert_conv2d_model_forward_prop(self): \n deeplift_model =\\\n kc.convert_model_from_saved_files(\n self.saved_file_path,\n nonlinear_mxts_mode=NonlinearMxtsMode.Rescale) \n deeplift_fprop_func = compile_func(\n [deeplift_model.get_layers()[0].get_activation_vars()],\n deeplift_model.get_layers()[-1].get_activation_vars())\n np.testing.assert_almost_equal(\n deeplift_fprop_func(self.inp),\n self.keras_output_fprop_func([self.inp, 0]),\n decimal=6)\n \n def test_convert_conv2d_model_compute_scores(self): \n deeplift_model =\\\n kc.convert_model_from_saved_files(\n self.saved_file_path,\n nonlinear_mxts_mode=NonlinearMxtsMode.Rescale) \n deeplift_contribs_func = deeplift_model.\\\n get_target_contribs_func(\n find_scores_layer_idx=0,\n target_layer_idx=-2)\n np.testing.assert_almost_equal(\n deeplift_contribs_func(task_idx=0,\n input_data_list=[self.inp],\n batch_size=10,\n progress_update=None),\n #when biases are 0 and ref is 0, deeplift is the same as grad*inp \n self.grad_func([self.inp, 0])*self.inp, decimal=6)\n\n def tearDown(self):\n if (os.path.isfile(self.saved_file_path)):\n os.remove(self.saved_file_path)\n" ]
[ [ "tensorflow.keras.layers.pooling.MaxPooling2D", "tensorflow.keras.models.Sequential", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.convolutional.Convolution2D", "numpy.random.seed", "numpy.random.randn", "tensorflow.keras.backend.learning_phase", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.pooling.AveragePooling2D", "tensorflow.keras.layers.core.Activation", "tensorflow.reduce_sum" ] ]
Rhcsky/KoSpeech
[ "dbff78140d150dcc71d14d65f81c011847e9574d" ]
[ "kospeech/optim/adamp.py" ]
[ "# AdamP\r\n# Copyright (c) 2020-present NAVER Corp.\r\n# MIT license\r\n\r\nimport torch\r\nfrom torch.optim.optimizer import Optimizer\r\nimport math\r\n\r\n\r\nclass AdamP(Optimizer):\r\n \"\"\"\r\n Paper: \"AdamP: Slowing Down the Slowdown for Momentum Optimizers on Scale-invariant Weights\"\r\n\r\n Copied from https://github.com/clovaai/AdamP/\r\n Copyright (c) 2020 Naver Corp.\r\n MIT License\r\n \"\"\"\r\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,\r\n weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False):\r\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,\r\n delta=delta, wd_ratio=wd_ratio, nesterov=nesterov)\r\n super(AdamP, self).__init__(params, defaults)\r\n\r\n def _channel_view(self, x):\r\n return x.view(x.size(0), -1)\r\n\r\n def _layer_view(self, x):\r\n return x.view(1, -1)\r\n\r\n def _cosine_similarity(self, x, y, eps, view_func):\r\n x = view_func(x)\r\n y = view_func(y)\r\n\r\n x_norm = x.norm(dim=1).add_(eps)\r\n y_norm = y.norm(dim=1).add_(eps)\r\n dot = (x * y).sum(dim=1)\r\n\r\n return dot.abs() / x_norm / y_norm\r\n\r\n def _projection(self, p, grad, perturb, delta, wd_ratio, eps):\r\n wd = 1\r\n expand_size = [-1] + [1] * (len(p.shape) - 1)\r\n for view_func in [self._channel_view, self._layer_view]:\r\n\r\n cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)\r\n\r\n if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):\r\n p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps)\r\n perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size)\r\n wd = wd_ratio\r\n\r\n return perturb, wd\r\n\r\n return perturb, wd\r\n\r\n def step(self, closure=None):\r\n loss = None\r\n if closure is not None:\r\n loss = closure()\r\n\r\n for group in self.param_groups:\r\n for p in group['params']:\r\n if p.grad is None:\r\n continue\r\n\r\n grad = p.grad.data\r\n beta1, beta2 = group['betas']\r\n nesterov = group['nesterov']\r\n\r\n state = self.state[p]\r\n\r\n # State initialization\r\n if len(state) == 0:\r\n state['step'] = 0\r\n state['exp_avg'] = torch.zeros_like(p.data)\r\n state['exp_avg_sq'] = torch.zeros_like(p.data)\r\n\r\n # Adam\r\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\r\n\r\n state['step'] += 1\r\n bias_correction1 = 1 - beta1 ** state['step']\r\n bias_correction2 = 1 - beta2 ** state['step']\r\n\r\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\r\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\r\n\r\n denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])\r\n step_size = group['lr'] / bias_correction1\r\n\r\n if nesterov:\r\n perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom\r\n else:\r\n perturb = exp_avg / denom\r\n\r\n # Projection\r\n wd_ratio = 1\r\n if len(p.shape) > 1:\r\n perturb, wd_ratio = self._projection(p, grad, perturb, group['delta'], group['wd_ratio'],\r\n group['eps'])\r\n\r\n # Weight decay\r\n if group['weight_decay'] > 0:\r\n p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio)\r\n\r\n # Step\r\n p.data.add_(-step_size, perturb)\r\n\r\n return loss\r\n" ]
[ [ "torch.zeros_like" ] ]
jlmaurer/PyRate
[ "bf1a3d916f1c83e7a0dda3ecc15858f8f1e4ee84" ]
[ "tests/test_mst.py" ]
[ "# This Python module is part of the PyRate software package.\n#\n# Copyright 2017 Geoscience Australia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n'''\nThis module contains tests for the mst.py PyRate module.\n'''\n\nimport glob\nimport os\nimport shutil\nimport subprocess\nimport tempfile\nimport unittest\nfrom itertools import product\nfrom numpy import empty, array, nan, isnan, sum as nsum\n\nimport numpy as np\nfrom tests.common import MockIfg, small5_mock_ifgs, small_data_setup\n\nfrom pyrate import algorithm\nfrom pyrate import config as cf\nfrom pyrate import mst\nfrom pyrate.scripts import run_pyrate, run_prepifg\nfrom pyrate.shared import IfgPart, Tile, create_tiles\nfrom tests import common\n\n\nclass MSTTests(unittest.TestCase):\n '''Basic verification of minimum spanning tree (MST) functionality.'''\n\n def setUp(self):\n self.ifgs = small_data_setup()\n\n def test_mst_matrix_as_array(self):\n # Verifies MST matrix func returns array with dict/trees in each cell\n for i in self.ifgs[3:]:\n i.phase_data[0, 1] = 0 # partial stack of NODATA to one cell\n\n for i in self.ifgs:\n i.convert_to_nans() # zeros to NaN/NODATA\n\n epochs = algorithm.get_epochs(self.ifgs)[0]\n res = mst._mst_matrix_as_array(self.ifgs)\n ys, xs = res.shape\n\n for y, x in product(range(ys), range(xs)):\n r = res[y, x]\n num_nodes = len(r)\n self.assertTrue(num_nodes < len(epochs.dates))\n\n stack = array([i.phase_data[y, x] for i in self.ifgs]) # 17 ifg stack\n self.assertTrue(0 == nsum(stack == 0)) # all 0s should be converted\n nc = nsum(isnan(stack))\n exp_count = len(epochs.dates) - 1\n\n if nc == 0:\n self.assertEqual(num_nodes, exp_count)\n elif nc > 5:\n # rough test: too many nans must reduce the total tree size\n self.assertTrue(num_nodes <= (17-nc))\n\n def test_mst_matrix_as_ifgs(self):\n # ensure only ifgs are returned, not individual MST graphs\n ifgs = small5_mock_ifgs()\n nifgs = len(ifgs)\n ys, xs = ifgs[0].shape\n result = mst._mst_matrix_ifgs_only(ifgs)\n\n for coord in product(range(ys), range(xs)):\n stack = (i.phase_data[coord] for i in self.ifgs)\n nc = nsum([isnan(n) for n in stack])\n self.assertTrue(len(result[coord]) <= (nifgs - nc))\n\n # HACK: type testing here is a bit grubby\n self.assertTrue(all([isinstance(i, MockIfg) for i in ifgs]))\n\n def test_partial_nan_pixel_stack(self):\n # Ensure a limited # of coherent cells results in a smaller MST tree\n num_coherent = 3\n\n def assert_equal():\n res = mst._mst_matrix_as_array(mock_ifgs)\n self.assertEqual(len(res[0,0]), num_coherent)\n\n mock_ifgs = [MockIfg(i, 1, 1) for i in self.ifgs]\n for m in mock_ifgs[num_coherent:]:\n m.phase_data[:] = nan\n assert_equal()\n\n # fill in more nans leaving only one ifg\n for m in mock_ifgs[1:num_coherent]:\n m.phase_data[:] = nan\n num_coherent = 1\n assert_equal()\n\n def test_all_nan_pixel_stack(self):\n # ensure full stack of NaNs in an MST pixel classifies to NaN\n mock_ifgs = [MockIfg(i, 1, 1) for i in self.ifgs]\n for m in mock_ifgs:\n m.phase_data[:] = nan\n\n res = mst._mst_matrix_as_array(mock_ifgs)\n exp = empty((1,1), dtype=object)\n exp[:] = nan\n\n shape = (mock_ifgs[0].nrows, mock_ifgs[0].ncols)\n self.assertTrue(res.shape == shape)\n self.assertEqual(exp, res)\n\n\nclass DefaultMSTTests(unittest.TestCase):\n\n def test_default_mst(self):\n # default MST from full set of Ifgs shouldn't drop any nodes\n ifgs = small5_mock_ifgs()\n dates = [(i.master, i.slave) for i in ifgs]\n\n res = mst.mst_from_ifgs(ifgs)[0]\n num_edges = len(res)\n self.assertEqual(num_edges, len(ifgs))\n\n # test edges, note node order can be reversed\n for edge in res:\n self.assertTrue(edge in dates or (edge[1], edge[0]) in dates)\n\n # check all nodes exist in this default tree\n mst_dates = set(res)\n mst_dates = list(sum(mst_dates, ()))\n for i in ifgs:\n for node in (i.master, i.slave):\n self.assertIn(node, mst_dates)\n\n\nclass NetworkxMSTTreeCheck(unittest.TestCase):\n def setUp(self):\n self.ifgs = small_data_setup()\n\n def test_assert_is_not_tree(self):\n non_overlapping = [1, 2, 5, 6, 12, 13, 14, 15, 16, 17]\n ifgs_non_overlapping = [ifg for i, ifg in enumerate(self.ifgs)\n if i+1 in non_overlapping]\n edges, is_tree, ntrees, _ = mst.mst_from_ifgs(ifgs_non_overlapping)\n self.assertFalse(is_tree)\n self.assertEqual(4, ntrees)\n\n def test_small_data_tree(self):\n self.assertTrue(mst.mst_from_ifgs(self.ifgs)[1])\n\n def test_assert_is_tree(self):\n overlapping = [1, 2, 3, 4, 6, 7, 10, 11, 16, 17]\n\n ifgs_overlapping = [ifg for i, ifg in enumerate(self.ifgs)\n if (i+1 in overlapping)]\n edges, is_tree, ntrees, _ = mst.mst_from_ifgs(ifgs_overlapping)\n self.assertFalse(is_tree)\n self.assertEqual(4, ntrees)\n\n def test_assert_two_trees_overlapping(self):\n overlapping = [3, 4, 5, 6, 7, 8, 9, 10, 11, 16, 17]\n\n ifgs_overlapping = [ifg for i, ifg in enumerate(self.ifgs)\n if (i+1 in overlapping)]\n edges, is_tree, ntrees, _ = mst.mst_from_ifgs(ifgs_overlapping)\n self.assertFalse(is_tree)\n self.assertEqual(2, ntrees)\n\n def test_assert_two_trees_non_overlapping(self):\n non_overlapping = [2, 5, 6, 12, 13, 15]\n ifgs_non_overlapping = [ifg for i, ifg in enumerate(self.ifgs)\n if i+1 in non_overlapping]\n edges, is_tree, ntrees, _ = mst.mst_from_ifgs(ifgs_non_overlapping)\n self.assertFalse(is_tree)\n self.assertEqual(2, ntrees)\n\n\nclass IfgPartTest(unittest.TestCase):\n\n def setUp(self):\n self.ifgs = small_data_setup()\n self.params = cf.get_config_params(common.TEST_CONF_ROIPAC)\n\n def test_ifg_part_shape_and_slice(self):\n r_start = 0\n r_end = 10\n for i in self.ifgs:\n tile = Tile(0, top_left=(r_start, 0), bottom_right=(r_end, i.ncols))\n ifg_part = IfgPart(i.data_path, tile)\n self.assertEqual(ifg_part.phase_data.shape,\n (r_end-r_start, i.phase_data.shape[1]))\n np.testing.assert_array_equal(ifg_part.phase_data,\n i.phase_data[r_start:r_end, :])\n\n def test_mst_multiprocessing_serial(self):\n self.params[cf.PARALLEL] = False\n original_mst = mst.mst_boolean_array(self.ifgs)\n parallel_mst = mst.mst_parallel(self.ifgs, params=self.params)\n np.testing.assert_array_equal(original_mst, parallel_mst)\n\n def test_mst_multiprocessing(self):\n self.params[cf.PARALLEL] = True\n original_mst = mst.mst_boolean_array(self.ifgs)\n parallel_mst = mst.mst_parallel(self.ifgs, params=self.params)\n np.testing.assert_array_equal(original_mst, parallel_mst)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.sum", "numpy.empty", "numpy.testing.assert_array_equal", "numpy.isnan", "numpy.array" ] ]
ohad83/pandas
[ "c5576293859f4351e508471811948e9a1dac4a30" ]
[ "pandas/core/window/expanding.py" ]
[ "from textwrap import dedent\n\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._decorators import Appender, Substitution\n\nfrom pandas.core.window.common import WindowGroupByMixin, _doc_template, _shared_docs\nfrom pandas.core.window.rolling import _Rolling_and_Expanding\n\n\nclass Expanding(_Rolling_and_Expanding):\n \"\"\"\n Provide expanding transformations.\n\n Parameters\n ----------\n min_periods : int, default 1\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n center : bool, default False\n Set the labels at the center of the window.\n axis : int or str, default 0\n\n Returns\n -------\n a Window sub-classed for the particular operation\n\n See Also\n --------\n rolling : Provides rolling window calculations.\n ewm : Provides exponential weighted functions.\n\n Notes\n -----\n By default, the result is set to the right edge of the window. This can be\n changed to the center of the window by setting ``center=True``.\n\n Examples\n --------\n\n >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})\n B\n 0 0.0\n 1 1.0\n 2 2.0\n 3 NaN\n 4 4.0\n\n >>> df.expanding(2).sum()\n B\n 0 NaN\n 1 1.0\n 2 3.0\n 3 3.0\n 4 7.0\n \"\"\"\n\n _attributes = [\"min_periods\", \"center\", \"axis\"]\n\n def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs):\n super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis)\n\n @property\n def _constructor(self):\n return Expanding\n\n def _get_window(self, other=None, **kwargs):\n \"\"\"\n Get the window length over which to perform some operation.\n\n Parameters\n ----------\n other : object, default None\n The other object that is involved in the operation.\n Such an object is involved for operations like covariance.\n\n Returns\n -------\n window : int\n The window length.\n \"\"\"\n axis = self.obj._get_axis(self.axis)\n length = len(axis) + (other is not None) * len(axis)\n\n other = self.min_periods or -1\n return max(length, other)\n\n _agg_see_also_doc = dedent(\n \"\"\"\n See Also\n --------\n DataFrame.expanding.aggregate\n DataFrame.rolling.aggregate\n DataFrame.aggregate\n \"\"\"\n )\n\n _agg_examples_doc = dedent(\n \"\"\"\n Examples\n --------\n\n >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])\n >>> df\n A B C\n 0 -2.385977 -0.102758 0.438822\n 1 -1.004295 0.905829 -0.954544\n 2 0.735167 -0.165272 -1.619346\n 3 -0.702657 -1.340923 -0.706334\n 4 -0.246845 0.211596 -0.901819\n 5 2.463718 3.157577 -1.380906\n 6 -1.142255 2.340594 -0.039875\n 7 1.396598 -1.647453 1.677227\n 8 -0.543425 1.761277 -0.220481\n 9 -0.640505 0.289374 -1.550670\n\n >>> df.ewm(alpha=0.5).mean()\n A B C\n 0 -2.385977 -0.102758 0.438822\n 1 -1.464856 0.569633 -0.490089\n 2 -0.207700 0.149687 -1.135379\n 3 -0.471677 -0.645305 -0.906555\n 4 -0.355635 -0.203033 -0.904111\n 5 1.076417 1.503943 -1.146293\n 6 -0.041654 1.925562 -0.588728\n 7 0.680292 0.132049 0.548693\n 8 0.067236 0.948257 0.163353\n 9 -0.286980 0.618493 -0.694496\n \"\"\"\n )\n\n @Substitution(\n see_also=_agg_see_also_doc,\n examples=_agg_examples_doc,\n versionadded=\"\",\n klass=\"Series/Dataframe\",\n axis=\"\",\n )\n @Appender(_shared_docs[\"aggregate\"])\n def aggregate(self, func, *args, **kwargs):\n return super().aggregate(func, *args, **kwargs)\n\n agg = aggregate\n\n @Substitution(name=\"expanding\")\n @Appender(_shared_docs[\"count\"])\n def count(self, **kwargs):\n return super().count(**kwargs)\n\n @Substitution(name=\"expanding\")\n @Appender(_shared_docs[\"apply\"])\n def apply(self, func, raw=None, args=(), kwargs={}):\n return super().apply(func, raw=raw, args=args, kwargs=kwargs)\n\n @Substitution(name=\"expanding\")\n @Appender(_shared_docs[\"sum\"])\n def sum(self, *args, **kwargs):\n nv.validate_expanding_func(\"sum\", args, kwargs)\n return super().sum(*args, **kwargs)\n\n @Substitution(name=\"expanding\")\n @Appender(_doc_template)\n @Appender(_shared_docs[\"max\"])\n def max(self, *args, **kwargs):\n nv.validate_expanding_func(\"max\", args, kwargs)\n return super().max(*args, **kwargs)\n\n @Substitution(name=\"expanding\")\n @Appender(_shared_docs[\"min\"])\n def min(self, *args, **kwargs):\n nv.validate_expanding_func(\"min\", args, kwargs)\n return super().min(*args, **kwargs)\n\n @Substitution(name=\"expanding\")\n @Appender(_shared_docs[\"mean\"])\n def mean(self, *args, **kwargs):\n nv.validate_expanding_func(\"mean\", args, kwargs)\n return super().mean(*args, **kwargs)\n\n @Substitution(name=\"expanding\")\n @Appender(_shared_docs[\"median\"])\n def median(self, **kwargs):\n return super().median(**kwargs)\n\n @Substitution(name=\"expanding\", versionadded=\"\")\n @Appender(_shared_docs[\"std\"])\n def std(self, ddof=1, *args, **kwargs):\n nv.validate_expanding_func(\"std\", args, kwargs)\n return super().std(ddof=ddof, **kwargs)\n\n @Substitution(name=\"expanding\", versionadded=\"\")\n @Appender(_shared_docs[\"var\"])\n def var(self, ddof=1, *args, **kwargs):\n nv.validate_expanding_func(\"var\", args, kwargs)\n return super().var(ddof=ddof, **kwargs)\n\n @Substitution(name=\"expanding\")\n @Appender(_doc_template)\n @Appender(_shared_docs[\"skew\"])\n def skew(self, **kwargs):\n return super().skew(**kwargs)\n\n _agg_doc = dedent(\n \"\"\"\n Examples\n --------\n\n The example below will show an expanding calculation with a window size of\n four matching the equivalent function call using `scipy.stats`.\n\n >>> arr = [1, 2, 3, 4, 999]\n >>> import scipy.stats\n >>> fmt = \"{0:.6f}\" # limit the printed precision to 6 digits\n >>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))\n -1.200000\n >>> print(fmt.format(scipy.stats.kurtosis(arr, bias=False)))\n 4.999874\n >>> s = pd.Series(arr)\n >>> s.expanding(4).kurt()\n 0 NaN\n 1 NaN\n 2 NaN\n 3 -1.200000\n 4 4.999874\n dtype: float64\n \"\"\"\n )\n\n @Appender(_agg_doc)\n @Substitution(name=\"expanding\")\n @Appender(_shared_docs[\"kurt\"])\n def kurt(self, **kwargs):\n return super().kurt(**kwargs)\n\n @Substitution(name=\"expanding\")\n @Appender(_shared_docs[\"quantile\"])\n def quantile(self, quantile, interpolation=\"linear\", **kwargs):\n return super().quantile(\n quantile=quantile, interpolation=interpolation, **kwargs\n )\n\n @Substitution(name=\"expanding\")\n @Appender(_doc_template)\n @Appender(_shared_docs[\"cov\"])\n def cov(self, other=None, pairwise=None, ddof=1, **kwargs):\n return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)\n\n @Substitution(name=\"expanding\")\n @Appender(_shared_docs[\"corr\"])\n def corr(self, other=None, pairwise=None, **kwargs):\n return super().corr(other=other, pairwise=pairwise, **kwargs)\n\n\nclass ExpandingGroupby(WindowGroupByMixin, Expanding):\n \"\"\"\n Provide a expanding groupby implementation.\n \"\"\"\n\n @property\n def _constructor(self):\n return Expanding\n" ]
[ [ "pandas.util._decorators.Substitution", "pandas.util._decorators.Appender", "pandas.compat.numpy.function.validate_expanding_func" ] ]
myelintek/tensorpack
[ "8d5ae5cc2cfcf2e4e53b4d1064ac9e727f736d09" ]
[ "examples/ConvolutionalPoseMachines/load-cpm.py" ]
[ "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# File: load-cpm.py\n# Author: Yuxin Wu <[email protected]>\n\nimport cv2\nimport tensorflow as tf\nimport numpy as np\nimport argparse\n\nfrom tensorpack import *\nfrom tensorpack.utils import viz\nfrom tensorpack.utils.argtools import memoized\n\n\"\"\"\n15 channels:\n0-1 head, neck\n2-4 right shoulder, right elbow, right wrist\n5-7 left shoulder, left elbow, left wrist\n8-10 right hip, right knee, right ankle\n11-13 left hip, left knee, left ankle\n14: background\n\"\"\"\n\n\ndef colorize(img, heatmap):\n \"\"\" img: bgr, [0,255]\n heatmap: [0,1]\n \"\"\"\n heatmap = viz.intensity_to_rgb(heatmap, cmap='jet')[:, :, ::-1]\n return img * 0.5 + heatmap * 0.5\n\n\n@memoized\ndef get_gaussian_map():\n gaussian_map = np.zeros((368, 368), dtype='float32')\n for x_p in range(368):\n for y_p in range(368):\n dist_sq = (x_p - 368 / 2) * (x_p - 368 / 2) + \\\n (y_p - 368 / 2) * (y_p - 368 / 2)\n exponent = dist_sq / 2.0 / (21**2)\n gaussian_map[y_p, x_p] = np.exp(-exponent)\n return gaussian_map.reshape((1, 368, 368, 1))\n\n\ndef CPM(image):\n image = image / 256.0 - 0.5\n\n gmap = tf.constant(get_gaussian_map())\n gmap = tf.pad(gmap, [[0, 0], [0, 1], [0, 1], [0, 0]])\n pool_center = AvgPooling('mappool', gmap, 9, stride=8, padding='VALID')\n with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu,\n W_init=tf.random_normal_initializer(stddev=0.01)):\n shared = (LinearWrap(image)\n .Conv2D('conv1_1', 64)\n .Conv2D('conv1_2', 64)\n .MaxPooling('pool1', 2)\n # 184\n .Conv2D('conv2_1', 128)\n .Conv2D('conv2_2', 128)\n .MaxPooling('pool2', 2)\n # 92\n .Conv2D('conv3_1', 256)\n .Conv2D('conv3_2', 256)\n .Conv2D('conv3_3', 256)\n .Conv2D('conv3_4', 256)\n .MaxPooling('pool3', 2)\n # 46\n .Conv2D('conv4_1', 512)\n .Conv2D('conv4_2', 512)\n .Conv2D('conv4_3_CPM', 256)\n .Conv2D('conv4_4_CPM', 256)\n .Conv2D('conv4_5_CPM', 256)\n .Conv2D('conv4_6_CPM', 256)\n .Conv2D('conv4_7_CPM', 128)())\n\n def add_stage(stage, l):\n l = tf.concat([l, shared, pool_center], 3,\n name='concat_stage{}'.format(stage))\n for i in range(1, 6):\n l = Conv2D('Mconv{}_stage{}'.format(i, stage), l, 128)\n l = Conv2D('Mconv6_stage{}'.format(stage), l, 128, kernel_shape=1)\n l = Conv2D('Mconv7_stage{}'.format(stage),\n l, 15, kernel_shape=1, nl=tf.identity)\n return l\n\n with argscope(Conv2D, kernel_shape=7, nl=tf.nn.relu):\n out1 = (LinearWrap(shared)\n .Conv2D('conv5_1_CPM', 512, kernel_shape=1)\n .Conv2D('conv5_2_CPM', 15, kernel_shape=1, nl=tf.identity)())\n out2 = add_stage(2, out1)\n out3 = add_stage(3, out2)\n out4 = add_stage(4, out3)\n out5 = add_stage(5, out4)\n out6 = add_stage(6, out5)\n tf.image.resize_bilinear(out6, [368, 368], name='resized_map')\n\n\ndef run_test(model_path, img_file):\n param_dict = np.load(model_path, encoding='latin1').item()\n predict_func = OfflinePredictor(PredictConfig(\n inputs_desc=[InputDesc(tf.float32, (None, 368, 368, 3), 'input')],\n tower_func=CPM,\n session_init=DictRestore(param_dict),\n input_names=['input'],\n output_names=['resized_map']\n ))\n\n im = cv2.imread(img_file, cv2.IMREAD_COLOR).astype('float32')\n im = cv2.resize(im, (368, 368))\n out = predict_func(im[None, :, :, :])[0][0]\n hm = out[:, :, :14].sum(axis=2)\n viz = colorize(im, hm)\n cv2.imwrite(\"output.jpg\", viz)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--load', required=True, help='.npy model file')\n parser.add_argument('--input', required=True, help='input image')\n args = parser.parse_args()\n run_test(args.load, args.input)\n" ]
[ [ "tensorflow.pad", "numpy.load", "numpy.zeros", "numpy.exp", "tensorflow.random_normal_initializer", "tensorflow.image.resize_bilinear" ] ]