repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
adcrn/knest
[ "a274dc9ddb642cc30f837e225f000bf33430eb43", "a274dc9ddb642cc30f837e225f000bf33430eb43" ]
[ "utils/compare.py", "utils/image_man.py" ]
[ "# UCF Senior Design 2017-18\n# Group 38\n\nfrom PIL import Image\nimport cv2\nimport imagehash\nimport math\nimport numpy as np\n\nDIFF_THRES = 20\nLIMIT = 2\nRESIZE = 1000\n\n\ndef calc_hash(img):\n \"\"\"\n Calculate the wavelet hash of the image\n img: (ndarray) image file\n \"\"\"\n # resize image if height > 1000\n img = resize(img)\n return imagehash.whash(Image.fromarray(img))\n\n\ndef compare(hash1, hash2):\n \"\"\"\n Calculate the difference between two images\n hash1: (array) first wavelet hash\n hash2: (array) second wavelet hash\n \"\"\"\n return hash1 - hash2\n\n\ndef limit(img, std_hash, count):\n \"\"\"\n Determine whether image should be removed from image dictionary in main.py\n img: (ndarray) image file\n std_hash: (array) wavelet hash of comparison standard\n count: (int) global count of images similar to comparison standard\n \"\"\"\n # calculate hash for given image\n cmp_hash = calc_hash(img)\n\n # compare to standard\n diff = compare(std_hash, cmp_hash)\n\n # image is similar to standard\n if diff <= DIFF_THRES:\n # if there are 3 similar images already, remove image\n if count >= LIMIT:\n return 'remove'\n\n # non-similar image found\n else:\n # update comparison standard\n return 'update_std'\n\n # else continue reading images with same standard\n return 'continue'\n\n\ndef resize(img):\n \"\"\"\n Resize an image\n img: (ndarray) RGB color image\n \"\"\"\n # get dimensions of image\n width = np.shape(img)[1]\n height = np.shape(img)[0]\n\n # if height of image is greater than 1000, resize it to 1000\n if width > RESIZE:\n # keep resize proportional\n scale = RESIZE / width\n resized_img = cv2.resize(\n img, (RESIZE, math.floor(height / scale)), cv2.INTER_AREA)\n # return resized image\n return resized_img\n\n # if height of image is less than 1000, return image unresized\n return img\n\n\ndef set_standard(images, filename):\n \"\"\"\n Set new comparison standard and update information\n images: (dictionary) dictionary containing all the image data\n filename: (String) name of the image file\n \"\"\"\n return filename, calc_hash(images[filename]), 0\n", "# UCF Senior Design 2017-18\n# Group 38\n\nfrom PIL import Image\nimport math\nimport numpy as np\nimport piexif\n\nSCALING_FACTOR = 3\n\n\ndef man(boxes, image_array, landscape=True, scaling_factor=SCALING_FACTOR):\n \"\"\"\n Crop and manipulate the image for final output.\n image_array: (Array) array representation of the image\n boxes: (Dict) bounding box around subject;\n form: (ymin, xmin, ymax, xmax)\n scaling_factor (Integer) the amount by which to scale the\n bounding box of the object; this is done as a way\n to include some of the environment in the final image\n \"\"\"\n image = Image.fromarray(image_array)\n width, height = image.size\n img_center_x = width / 2\n\n dist_face_center_x = math.inf\n central_face_x, central_face_y = 0, 0\n\n for i in boxes['faces']:\n\n # Get the bounding box coordinates of the face.\n fb_xmin, fb_ymin, fb_xmax, fb_ymax = i\n\n # Calculate the center of the face bounding box.\n fb_center_x, fb_center_y = (\n fb_xmax + fb_xmin) / 2, (fb_ymax + fb_ymin) / 2\n\n # Calculate the distance of the x-component of the\n # the face box centerfrom the center of the image.\n delta_center_x = math.fabs(fb_center_x - img_center_x)\n\n # Get the closest face box center coordinates over all face boxes.\n if delta_center_x < dist_face_center_x:\n dist_face_center_x = delta_center_x\n central_face_x, central_face_y = fb_center_x, fb_center_y\n\n # Initialize bounding box extrema.\n sm_xmin, sm_ymin, lar_xmax, lar_ymax = math.inf, math.inf, 0, 0\n\n # Calculate the factor by which we multiply the width and height of box.\n factor = math.sqrt(scaling_factor)\n\n # Get the extrema of the bounding boxes returned from the detection graph.\n for i in boxes['birds']:\n sm_xmin = min(sm_xmin, i[0])\n sm_ymin = min(sm_ymin, i[1])\n lar_xmax = max(lar_xmax, i[2])\n lar_ymax = max(lar_ymax, i[3])\n\n # Calculate the width and height of the final crop area.\n bb_width, bb_height = lar_xmax - sm_xmin, lar_ymax - sm_ymin\n new_width, new_height = round(\n bb_width * factor, 0), round(bb_height * factor, 0)\n\n if landscape:\n if new_width * 1.5 < new_height or math.fabs(new_width - new_height) < new_height * .5:\n new_width = new_height * 1.5\n\n if landscape:\n if new_width * 1.5 < new_height or math.fabs(new_width - new_height) < new_height * .5:\n new_width = new_height * 1.5\n\n # Calculate the amounts by which to adjust the face_box coordinates.\n width_diff, height_diff = new_width / 2, new_height / 2\n\n # Set the new dimensions for the final crop box.\n final_xmin, final_xmax = central_face_x - \\\n width_diff, central_face_x + width_diff\n final_ymin, final_ymax = central_face_y - \\\n height_diff, central_face_y + height_diff\n\n # Edge case handling.\n if final_xmin < 0: final_xmin = 0\n if final_xmax > width: final_xmax = width\n if final_ymin < 0: final_ymin = 0\n if final_ymax > height: final_ymax = height\n\n # Crop and attempt to save image.\n cropped_area = image.crop((final_xmin, final_ymin, final_xmax, final_ymax))\n\n try:\n final_image = np.asarray(cropped_area)\n return final_image, True\n except IOError:\n print(\"File could not be written properly.\")\n return False\n\n\ndef exif(filename, image_array):\n \"\"\"\n Transfer the EXIF metadata from the original photograph to the\n cropped version, along with some changes to a few values.\n filename: (String) filename from original photo\n new_array: (Array) array_representation of cropped image\n \"\"\"\n try:\n # Get the EXIF metadata from the original image.\n exif_dict = piexif.load(filename)\n except piexif._exceptions.InvalidImageDataError:\n return None\n\n # Some photos mysteriously do not contain EXIF data, so\n # check if it actually exists and then return properly.\n if len(exif_dict[\"Exif\"].items()) == 0:\n return None\n\n else:\n # Set the image height and width of new EXIF to the new\n # crop dimensions. Values come from the Piexif documentation.\n exif_dict[\"Exif\"][40963], exif_dict[\"Exif\"][40962], _ = image_array.shape\n\n # Convert EXIF dictionary to a bytes object for writing with PIL.\n exif_bytes = piexif.dump(exif_dict)\n\n return exif_bytes\n" ]
[ [ "numpy.shape" ], [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dongmengshi/easylearn
[ "df528aaa69c3cf61f5459a04671642eb49421dfb", "df528aaa69c3cf61f5459a04671642eb49421dfb", "df528aaa69c3cf61f5459a04671642eb49421dfb", "df528aaa69c3cf61f5459a04671642eb49421dfb" ]
[ "eslearn/utils/lc_featureSelection_variance.py", "eslearn/machine_learning/test/GCNNCourseCodes/metrics.py", "eslearn/machine_learning/test/gcn_test.py", "eslearn/utils/lc_cacl_MAD.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 24 14:38:20 2018\ndimension reduction with VarianceThreshold using sklearn.\nFeature selector that removes all low-variance features.\n@author: lenovo\n\"\"\"\nfrom sklearn.feature_selection import VarianceThreshold\nimport numpy as np\n#\nnp.random.seed(1)\nX = np.random.randn(100, 10)\nX = np.hstack([X, np.zeros([100, 5])])\n#\n\n\ndef featureSelection_variance(X, thrd):\n sel = VarianceThreshold(threshold=thrd)\n X_selected = sel.fit_transform(X)\n mask = sel.get_support()\n return X_selected, mask\n\n\nX = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]\nselector = VarianceThreshold()\nselector.fit_transform(X)\nselector.variances_\n", "import tensorflow as tf\n\n\ndef masked_softmax_cross_entropy(preds, labels, mask):\n \"\"\"Softmax cross-entropy loss with masking.\"\"\"\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels) \n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss *= mask\n return tf.reduce_mean(loss)\n\ndef sigmoid_cross_entropy(preds, labels):\n \"\"\"Softmax cross-entropy loss with masking.\"\"\"\n loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels) \n return tf.reduce_mean(loss)\n\ndef softmax_cross_entropy(preds, labels):\n \"\"\"Softmax cross-entropy loss with masking.\"\"\"\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels) \n return tf.reduce_mean(loss)\n\ndef masked_accuracy(preds, labels, mask):\n \"\"\"Accuracy with masking.\"\"\"\n correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1))\n accuracy_all = tf.cast(correct_prediction, tf.float32)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n accuracy_all *= mask\n return tf.reduce_mean(accuracy_all)\n\ndef inductive_multiaccuracy(preds, labels):\n \"\"\"Accuracy with masking.\"\"\"\n\n correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1)) \n return tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n\ndef inductive_accuracy(preds, labels):\n \"\"\"Accuracy with masking.\"\"\"\n\n predicted = tf.nn.sigmoid(preds)\n correct_pred = tf.equal(tf.round(predicted), labels)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) \n return accuracy\n", "import torch\nfrom torch.nn import Linear\nimport torch.nn.functional as F\nfrom torch_geometric.nn.conv import MessagePassing\nfrom torch_geometric.data import Data\n\n\nclass CGConv(MessagePassing):\n r\"\"\"The crystal graph convolutional operator from the\n `\"Crystal Graph Convolutional Neural Networks for an\n Accurate and Interpretable Prediction of Material Properties\"\n <https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.120.145301>`_\n paper\n\n .. math::\n \\mathbf{x}^{\\prime}_i = \\mathbf{x}_i + \\sum_{j \\in \\mathcal{N}(i)}\n \\sigma \\left( \\mathbf{z}_{i,j} \\mathbf{W}_f + \\mathbf{b}_f \\right)\n \\odot g \\left( \\mathbf{z}_{i,j} \\mathbf{W}_s + \\mathbf{b}_s \\right)\n\n where :math:`\\mathbf{z}_{i,j} = [ \\mathbf{x}_i, \\mathbf{x}_j,\n \\mathbf{e}_{i,j} ]` denotes the concatenation of central node features,\n neighboring node features and edge features.\n In addition, :math:`\\sigma` and :math:`g` denote the sigmoid and softplus\n functions, respectively.\n\n Args:\n channels (int): Size of each input sample.\n dim (int): Edge feature dimensionality.\n aggr (string, optional): The aggregation operator to use\n (:obj:`\"add\"`, :obj:`\"mean\"`, :obj:`\"max\"`).\n (default: :obj:`\"add\"`)\n bias (bool, optional): If set to :obj:`False`, the layer will not learn\n an additive bias. (default: :obj:`True`)\n **kwargs (optional): Additional arguments of\n :class:`torch_geometric.nn.conv.MessagePassing`.\n \"\"\"\n def __init__(self, channels, dim, aggr='add', bias=True, **kwargs):\n super(CGConv, self).__init__(aggr=aggr, **kwargs)\n self.in_channels = channels\n self.out_channels = channels\n self.dim = dim\n\n self.lin_f = Linear(2 * channels + dim, channels, bias=bias)\n self.lin_s = Linear(2 * channels + dim, channels, bias=bias)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.lin_f.reset_parameters()\n self.lin_s.reset_parameters()\n\n\n def forward(self, data):\n \"\"\"\"\"\"\n x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr\n return self.propagate(edge_index, x=x, edge_attr=edge_attr)\n\n\n def message(self, x_i, x_j, edge_attr):\n z = torch.cat([x_i, x_j, edge_attr], dim=-1)\n return self.lin_f(z).sigmoid() * F.softplus(self.lin_s(z))\n\n def update(self, aggr_out, x):\n return aggr_out + x\n\n def __repr__(self):\n return '{}({}, {}, dim={})'.format(self.__class__.__name__,\n self.in_channels, self.out_channels,\n self.dim)\n\nif __name__ == \"__main__\":\n # Generate Data\n edge_index = torch.tensor([[0, 1, 1, 2],[1, 0, 2, 1]], dtype=torch.long)\n x = torch.tensor([[-1], [0], [1]], dtype=torch.float)\n y = torch.tensor([[-1], [1], [1]], dtype=torch.float)\n edge_attr = torch.tensor([[1], [0], [0]], dtype=torch.float)\n data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)\n\n # Training\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = CGConv(1, 1).to(device)\n data = data.to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)\n\n model.train()\n for epoch in range(20):\n optimizer.zero_grad()\n out = model(data)\n loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])\n loss.backward()\n optimizer.step()\n\n # Evaluation\n model.eval()\n _, pred = model(data).max(dim=1)\n correct = float (pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())\n acc = correct / data.test_mask.sum().item()\n print('Accuracy: {:.4f}'.format(acc))", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 6 11:18:08 2018\nMAD,median absolute deviation for dimension reduction\nMAD=median(|Xi−median(X)|)\nrefer to {Linked dimensions of psychopathology\nand connectivity in functional brain networks}\n@author: Li Chao\n\"\"\"\nimport numpy as np\n\n\ndef select_features_using_MAD(M, perc=0.1):\n # perc: how many percentages of feature\n # that have top MAD to be selected\n MAD = cacl_MAD(M)\n Ind_descendOrd = np.argsort(MAD)[::-1] # decend order\n Ind_select = Ind_descendOrd[0:int(len(Ind_descendOrd) * perc)]\n feature_selected = M[:, Ind_select]\n return feature_selected\n\n\ndef cacl_MAD(M):\n # caculate MAD\n # row is sample, col is feature\n my_median = np.median(M, 0)\n my_abs = np.abs(M - my_median)\n MAD = np.median(my_abs, 0)\n return MAD\n" ]
[ [ "sklearn.feature_selection.VarianceThreshold", "numpy.random.randn", "numpy.zeros", "numpy.random.seed" ], [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.nn.sigmoid", "tensorflow.reduce_mean", "tensorflow.cast", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.argmax", "tensorflow.round" ], [ "torch.nn.functional.nll_loss", "torch.cat", "torch.tensor", "torch.nn.Linear", "torch.cuda.is_available" ], [ "numpy.argsort", "numpy.median", "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
silent567/examples
[ "e9de12549125ecd93a4924f6b8e2bbf66d7635d9" ]
[ "mnist/my_multi_tune3.py" ]
[ "#!/usr/bin/env python\n# coding=utf-8\n\nfrom my_multi_main3 import main\nimport numpy as np\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\nparser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\nparser.add_argument('--norm-flag', type=bool, default=False,\n help='Triggering the Layer Normalization flag for attention scores')\nparser.add_argument('--gamma', type=float, default=None,\n help='Controlling the sparisty of gfusedmax/sparsemax, the smaller, the more sparse')\nparser.add_argument('--lam', type=float, default=1.0,\n help='Lambda: Controlling the smoothness of gfusedmax, the larger, the smoother')\nparser.add_argument('--max-type', type=str, default='softmax',choices=['softmax','sparsemax','gfusedmax'],\n help='mapping function in attention')\nparser.add_argument('--optim-type', type=str, default='SGD',choices=['SGD','Adam'],\n help='mapping function in attention')\nparser.add_argument('--head-cnt', type=int, default=2, metavar='S', choices=[1,2,4,5,10],\n help='Number of heads for attention (default: 1)')\n\nargs = parser.parse_args()\n\nhyperparameter_choices = {\n 'lr':list(10**np.arange(-4,-1,0.5)),\n 'norm_flag': [True,False],\n 'gamma':list(10**np.arange(-1,3,0.5))+[None,],\n 'lam':list(10**np.arange(-2,2,0.5)),\n 'max_type':['softmax','sparsemax','gfusedmax'],\n # 'max_type':['sparsemax'],\n 'optim_type':['SGD','Adam'],\n 'head_cnt':[1,2,4,5,10,20]\n}\n\nparam_num = 25\nrecord = np.zeros([param_num,len(hyperparameter_choices)+1])\nrecord_name = 'record3_multi_%s.csv'%time.strftime('%Y-%m-%d_%H-%M-%S',time.localtime())\nfor n in range(param_num):\n for param_index,(k,v) in enumerate(hyperparameter_choices.items()):\n print(param_index,k)\n value_index = np.random.choice(len(v))\n if isinstance(v[value_index],str) or isinstance(v[value_index],bool) or v[value_index] is None:\n record[n,param_index] = value_index\n else:\n record[n,param_index] = v[value_index]\n setattr(args,k,v[value_index])\n record[n,-1] = main(args)\n np.savetxt(record_name, record, delimiter=',')\n\n\n\n" ]
[ [ "numpy.savetxt", "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
neonbjb/DL-Art-School
[ "a6f0f854b987ac724e258af8b042ea4459a571bc", "a6f0f854b987ac724e258af8b042ea4459a571bc", "a6f0f854b987ac724e258af8b042ea4459a571bc", "a6f0f854b987ac724e258af8b042ea4459a571bc" ]
[ "codes/data/image_corruptor.py", "codes/models/glean/stylegan2_latent_bank.py", "codes/models/spleeter/util.py", "codes/train.py" ]
[ "import functools\nimport random\nfrom math import cos, pi\n\nimport cv2\nimport kornia\nimport numpy as np\nimport torch\nfrom kornia.augmentation import ColorJitter\n\nfrom data.util import read_img\nfrom PIL import Image\nfrom io import BytesIO\n\n\n# Get a rough visualization of the above distribution. (Y-axis is meaningless, just spreads data)\nfrom utils.util import opt_get\n\n'''\nif __name__ == '__main__':\n import numpy as np\n import matplotlib.pyplot as plt\n data = np.asarray([get_rand() for _ in range(5000)])\n plt.plot(data, np.random.uniform(size=(5000,)), 'x')\n plt.show()\n'''\n\n\ndef kornia_color_jitter_numpy(img, setting):\n if setting * 255 > 1:\n # I'm using Kornia's ColorJitter, which requires pytorch arrays in b,c,h,w format.\n img = torch.from_numpy(img).permute(2,0,1).unsqueeze(0)\n img = ColorJitter(setting, setting, setting, setting)(img)\n img = img.squeeze(0).permute(1,2,0).numpy()\n return img\n\n\n# Performs image corruption on a list of images from a configurable set of corruption\n# options.\nclass ImageCorruptor:\n def __init__(self, opt):\n self.opt = opt\n self.reset_random()\n self.blur_scale = opt['corruption_blur_scale'] if 'corruption_blur_scale' in opt.keys() else 1\n self.fixed_corruptions = opt['fixed_corruptions'] if 'fixed_corruptions' in opt.keys() else []\n self.num_corrupts = opt['num_corrupts_per_image'] if 'num_corrupts_per_image' in opt.keys() else 0\n self.cosine_bias = opt_get(opt, ['cosine_bias'], True)\n if self.num_corrupts == 0:\n return\n else:\n self.random_corruptions = opt['random_corruptions'] if 'random_corruptions' in opt.keys() else []\n\n def reset_random(self):\n if 'random_seed' in self.opt.keys():\n self.rand = random.Random(self.opt['random_seed'])\n else:\n self.rand = random.Random()\n\n # Feeds a random uniform through a cosine distribution to slightly bias corruptions towards \"uncorrupted\".\n # Return is on [0,1] with a bias towards 0.\n def get_rand(self):\n r = self.rand.random()\n if self.cosine_bias:\n return 1 - cos(r * pi / 2)\n else:\n return r\n\n def corrupt_images(self, imgs, return_entropy=False):\n if self.num_corrupts == 0 and not self.fixed_corruptions:\n if return_entropy:\n return imgs, []\n else:\n return imgs\n\n if self.num_corrupts == 0:\n augmentations = []\n else:\n augmentations = random.choices(self.random_corruptions, k=self.num_corrupts)\n\n # Sources of entropy\n corrupted_imgs = []\n entropy = []\n undo_fns = []\n applied_augs = augmentations + self.fixed_corruptions\n for img in imgs:\n for aug in augmentations:\n r = self.get_rand()\n img, undo_fn = self.apply_corruption(img, aug, r, applied_augs)\n if undo_fn is not None:\n undo_fns.append(undo_fn)\n for aug in self.fixed_corruptions:\n r = self.get_rand()\n img, undo_fn = self.apply_corruption(img, aug, r, applied_augs)\n entropy.append(r)\n if undo_fn is not None:\n undo_fns.append(undo_fn)\n # Apply undo_fns after all corruptions are finished, in same order.\n for ufn in undo_fns:\n img = ufn(img)\n corrupted_imgs.append(img)\n\n\n if return_entropy:\n return corrupted_imgs, entropy\n else:\n return corrupted_imgs\n\n def apply_corruption(self, img, aug, rand_val, applied_augmentations):\n undo_fn = None\n if 'color_quantization' in aug:\n # Color quantization\n quant_div = 2 ** (int(rand_val * 10 / 3) + 2)\n img = img * 255\n img = (img // quant_div) * quant_div\n img = img / 255\n elif 'color_jitter' in aug:\n lo_end = 0\n hi_end = .2\n setting = rand_val * (hi_end - lo_end) + lo_end\n img = kornia_color_jitter_numpy(img, setting)\n elif 'gaussian_blur' in aug:\n img = cv2.GaussianBlur(img, (0,0), self.blur_scale*rand_val*1.5)\n elif 'motion_blur' in aug:\n # Motion blur\n intensity = self.blur_scale*rand_val * 3 + 1\n angle = random.randint(0,360)\n k = np.zeros((intensity, intensity), dtype=np.float32)\n k[(intensity - 1) // 2, :] = np.ones(intensity, dtype=np.float32)\n k = cv2.warpAffine(k, cv2.getRotationMatrix2D((intensity / 2 - 0.5, intensity / 2 - 0.5), angle, 1.0),\n (intensity, intensity))\n k = k * (1.0 / np.sum(k))\n img = cv2.filter2D(img, -1, k)\n elif 'block_noise' in aug:\n # Large distortion blocks in part of an img, such as is used to mask out a face.\n pass\n elif 'lq_resampling' in aug:\n # Random mode interpolation HR->LR->HR\n if 'lq_resampling4x' == aug:\n scale = 4\n else:\n if rand_val < .3:\n scale = 1\n elif rand_val < .7:\n scale = 2\n else:\n scale = 4\n if scale > 1:\n interpolation_modes = [cv2.INTER_NEAREST, cv2.INTER_CUBIC, cv2.INTER_LINEAR, cv2.INTER_LANCZOS4]\n mode = random.randint(0,4) % len(interpolation_modes)\n # Downsample first, then upsample using the random mode.\n img = cv2.resize(img, dsize=(img.shape[1]//scale, img.shape[0]//scale), interpolation=mode)\n def lq_resampling_undo_fn(scale, img):\n return cv2.resize(img, dsize=(img.shape[1]*scale, img.shape[0]*scale), interpolation=cv2.INTER_LINEAR)\n undo_fn = functools.partial(lq_resampling_undo_fn, scale)\n elif 'color_shift' in aug:\n # Color shift\n pass\n elif 'interlacing' in aug:\n # Interlacing distortion\n pass\n elif 'chromatic_aberration' in aug:\n # Chromatic aberration\n pass\n elif 'noise' in aug:\n # Random noise\n if 'noise-5' == aug:\n noise_intensity = 5 / 255.0\n else:\n noise_intensity = (rand_val*6) / 255.0\n img += np.random.rand(*img.shape) * noise_intensity\n elif 'jpeg' in aug:\n if 'noise' not in applied_augmentations and 'noise-5' not in applied_augmentations:\n if aug == 'jpeg':\n lo=10\n range=20\n elif aug == 'jpeg-low':\n lo=15\n range=10\n elif aug == 'jpeg-medium':\n lo=23\n range=25\n elif aug == 'jpeg-broad':\n lo=15\n range=60\n elif aug == 'jpeg-normal':\n lo=47\n range=35\n else:\n raise NotImplementedError(\"specified jpeg corruption doesn't exist\")\n # JPEG compression\n qf = (int((1-rand_val)*range) + lo)\n # Use PIL to perform a mock compression to a data buffer, then swap back to cv2.\n img = (img * 255).astype(np.uint8)\n img = Image.fromarray(img)\n buffer = BytesIO()\n img.save(buffer, \"JPEG\", quality=qf, optimize=True)\n buffer.seek(0)\n jpeg_img_bytes = np.asarray(bytearray(buffer.read()), dtype=\"uint8\")\n img = read_img(\"buffer\", jpeg_img_bytes, rgb=True)\n elif 'saturation' in aug:\n # Lightening / saturation\n saturation = rand_val * .3\n img = np.clip(img + saturation, a_max=1, a_min=0)\n elif 'greyscale' in aug:\n img = np.tile(np.mean(img, axis=2, keepdims=True), [1,1,3])\n elif 'none' not in aug:\n raise NotImplementedError(\"Augmentation doesn't exist\")\n\n return img, undo_fn\n", "import torch\nimport torch.nn as nn\n\nfrom models.arch_util import ConvGnLelu\nfrom models.stylegan.stylegan2_rosinality import Generator\n\n\nclass Stylegan2LatentBank(nn.Module):\n def __init__(self, pretrained_model_file, encoder_nf=64, encoder_max_nf=512, max_dim=1024, latent_dim=512, encoder_levels=4, decoder_levels=3):\n super().__init__()\n\n # Initialize the bank.\n self.bank = Generator(size=max_dim, style_dim=latent_dim, n_mlp=8, channel_multiplier=2) # Assumed using 'f' generators with mult=2.\n state_dict = torch.load(pretrained_model_file)\n self.bank.load_state_dict(state_dict, strict=True)\n\n # Shut off training of the latent bank.\n for p in self.bank.parameters():\n p.requires_grad = False\n p.DO_NOT_TRAIN = True\n\n # These are from `stylegan_rosinality.py`, search for `self.channels = {`.\n stylegan_encoder_dims = [512, 512, 512, 512, 512, 256, 128, 64, 32]\n\n # Initialize the fusion blocks. TODO: Try using the StyledConvs instead of regular ones.\n encoder_output_dims = reversed([min(encoder_nf * 2 ** i, encoder_max_nf) for i in range(encoder_levels)])\n input_dims_by_layer = [eod + sed for eod, sed in zip(encoder_output_dims, stylegan_encoder_dims)]\n self.fusion_blocks = nn.ModuleList([ConvGnLelu(in_filters, out_filters, kernel_size=3, activation=True, norm=False, bias=True)\n for in_filters, out_filters in zip(input_dims_by_layer, stylegan_encoder_dims)])\n\n self.decoder_levels = decoder_levels\n self.decoder_start = encoder_levels - 1\n self.total_levels = encoder_levels + decoder_levels - 1\n\n # This forward mirrors the forward() pass from the rosinality stylegan2 implementation, with the additions called\n # for from the GLEAN paper. GLEAN mods are annotated with comments.\n # Removed stuff:\n # - Support for split latents (we're spoonfeeding them)\n # - Support for fixed noise inputs\n # - RGB computations -> we only care about the latents\n # - Style MLP -> GLEAN computes the Style inputs directly.\n # - Later layers -> GLEAN terminates at 256 resolution.\n def forward(self, convolutional_features, latent_vectors):\n\n out = self.bank.input(latent_vectors[:, 0]) # The input here is only used to fetch the batch size.\n out = self.bank.conv1(out, latent_vectors[:, 0], noise=None)\n\n k = 0\n decoder_outputs = []\n for conv1, conv2 in zip(self.bank.convs[::2], self.bank.convs[1::2]):\n if k < len(self.fusion_blocks):\n out = torch.cat([convolutional_features[-k-1], out], dim=1)\n out = self.fusion_blocks[k](out)\n\n out = conv1(out, latent_vectors[:, k], noise=None)\n out = conv2(out, latent_vectors[:, k], noise=None)\n\n if k >= self.decoder_start:\n decoder_outputs.append(out)\n if k >= self.total_levels:\n break\n\n k += 1\n\n return decoder_outputs\n", "import numpy as np\nimport tensorflow as tf\n\nfrom .unet import UNet\n\n\ndef tf2pytorch(checkpoint_path, num_instrumments):\n tf_vars = {}\n init_vars = tf.train.list_variables(checkpoint_path)\n # print(init_vars)\n for name, shape in init_vars:\n try:\n # print('Loading TF Weight {} with shape {}'.format(name, shape))\n data = tf.train.load_variable(checkpoint_path, name)\n tf_vars[name] = data\n except Exception as e:\n print('Load error')\n conv_idx = 0\n tconv_idx = 0\n bn_idx = 0\n outputs = []\n for i in range(num_instrumments):\n output = {}\n outputs.append(output)\n\n for j in range(1,7):\n if conv_idx == 0:\n conv_suffix = \"\"\n else:\n conv_suffix = \"_\" + str(conv_idx)\n\n if bn_idx == 0:\n bn_suffix = \"\"\n else:\n bn_suffix = \"_\" + str(bn_idx)\n\n output['down{}_conv.weight'.format(j)] = np.transpose(\n tf_vars[\"conv2d{}/kernel\".format(conv_suffix)], (3, 2, 0, 1))\n # print('conv dtype: ',output['down{}.0.weight'.format(j)].dtype)\n output['down{}_conv.bias'.format(\n j)] = tf_vars[\"conv2d{}/bias\".format(conv_suffix)]\n\n output['down{}_act.0.weight'.format(\n j)] = tf_vars[\"batch_normalization{}/gamma\".format(bn_suffix)]\n output['down{}_act.0.bias'.format(\n j)] = tf_vars[\"batch_normalization{}/beta\".format(bn_suffix)]\n output['down{}_act.0.running_mean'.format(\n j)] = tf_vars['batch_normalization{}/moving_mean'.format(bn_suffix)]\n output['down{}_act.0.running_var'.format(\n j)] = tf_vars['batch_normalization{}/moving_variance'.format(bn_suffix)]\n\n conv_idx += 1\n bn_idx += 1\n\n # up blocks\n for j in range(1, 7):\n if tconv_idx == 0:\n tconv_suffix = \"\"\n else:\n tconv_suffix = \"_\" + str(tconv_idx)\n\n if bn_idx == 0:\n bn_suffix = \"\"\n else:\n bn_suffix= \"_\" + str(bn_idx)\n\n output['up{}.0.weight'.format(j)] = np.transpose(\n tf_vars[\"conv2d_transpose{}/kernel\".format(tconv_suffix)], (3,2,0, 1))\n output['up{}.0.bias'.format(\n j)] = tf_vars[\"conv2d_transpose{}/bias\".format(tconv_suffix)]\n output['up{}.2.weight'.format(\n j)] = tf_vars[\"batch_normalization{}/gamma\".format(bn_suffix)]\n output['up{}.2.bias'.format(\n j)] = tf_vars[\"batch_normalization{}/beta\".format(bn_suffix)]\n output['up{}.2.running_mean'.format(\n j)] = tf_vars['batch_normalization{}/moving_mean'.format(bn_suffix)]\n output['up{}.2.running_var'.format(\n j)] = tf_vars['batch_normalization{}/moving_variance'.format(bn_suffix)]\n tconv_idx += 1\n bn_idx += 1\n\n if conv_idx == 0:\n suffix = \"\"\n else:\n suffix = \"_\" + str(conv_idx)\n output['up7.0.weight'] = np.transpose(\n tf_vars['conv2d{}/kernel'.format(suffix)], (3, 2, 0, 1))\n output['up7.0.bias'] = tf_vars['conv2d{}/bias'.format(suffix)]\n conv_idx += 1\n\n return outputs", "import os\nimport math\nimport argparse\nimport random\nimport logging\nfrom tqdm import tqdm\n\nimport torch\nfrom data.data_sampler import DistIterSampler\nfrom trainer.eval.evaluator import create_evaluator\n\nfrom utils import util, options as option\nfrom data import create_dataloader, create_dataset\nfrom trainer.ExtensibleTrainer import ExtensibleTrainer\nfrom time import time\n\nfrom utils.util import opt_get\n\n\ndef init_dist(backend, **kwargs):\n # These packages have globals that screw with Windows, so only import them if needed.\n import torch.distributed as dist\n import torch.multiprocessing as mp\n\n \"\"\"initialization for distributed training\"\"\"\n if mp.get_start_method(allow_none=True) != 'spawn':\n mp.set_start_method('spawn')\n rank = int(os.environ['RANK'])\n num_gpus = torch.cuda.device_count()\n torch.cuda.set_device(rank % num_gpus)\n dist.init_process_group(backend=backend, **kwargs)\n\nclass Trainer:\n\n def init(self, opt, launcher, all_networks={}):\n self._profile = False\n self.val_compute_psnr = opt_get(opt, ['eval', 'compute_psnr'], False)\n self.val_compute_fea = opt_get(opt, ['eval', 'compute_fea'], False)\n\n #### loading resume state if exists\n if opt['path'].get('resume_state', None):\n # distributed resuming: all load into default GPU\n device_id = torch.cuda.current_device()\n resume_state = torch.load(opt['path']['resume_state'],\n map_location=lambda storage, loc: storage.cuda(device_id))\n option.check_resume(opt, resume_state['iter']) # check resume options\n else:\n resume_state = None\n\n #### mkdir and loggers\n if self.rank <= 0: # normal training (self.rank -1) OR distributed training (self.rank 0)\n if resume_state is None:\n util.mkdir_and_rename(\n opt['path']['experiments_root']) # rename experiment folder if exists\n util.mkdirs(\n (path for key, path in opt['path'].items() if not key == 'experiments_root' and path is not None\n and 'pretrain_model' not in key and 'resume' not in key))\n\n # config loggers. Before it, the log will not work\n util.setup_logger('base', opt['path']['log'], 'train_' + opt['name'], level=logging.INFO,\n screen=True, tofile=True)\n self.logger = logging.getLogger('base')\n self.logger.info(option.dict2str(opt))\n # tensorboard logger\n if opt['use_tb_logger'] and 'debug' not in opt['name']:\n self.tb_logger_path = os.path.join(opt['path']['experiments_root'], 'tb_logger')\n version = float(torch.__version__[0:3])\n if version >= 1.1: # PyTorch 1.1\n from torch.utils.tensorboard import SummaryWriter\n else:\n self.self.logger.info(\n 'You are using PyTorch {}. Tensorboard will use [tensorboardX]'.format(version))\n from tensorboardX import SummaryWriter\n self.tb_logger = SummaryWriter(log_dir=self.tb_logger_path)\n else:\n util.setup_logger('base', opt['path']['log'], 'train', level=logging.INFO, screen=True)\n self.logger = logging.getLogger('base')\n\n # convert to NoneDict, which returns None for missing keys\n opt = option.dict_to_nonedict(opt)\n self.opt = opt\n\n #### wandb init\n if opt['wandb'] and self.rank <= 0:\n import wandb\n os.makedirs(os.path.join(opt['path']['log'], 'wandb'), exist_ok=True)\n wandb.init(project=opt['name'], dir=opt['path']['log'])\n\n #### random seed\n seed = opt['train']['manual_seed']\n if seed is None:\n seed = random.randint(1, 10000)\n if self.rank <= 0:\n self.logger.info('Random seed: {}'.format(seed))\n seed += self.rank # Different multiprocessing instances should behave differently.\n util.set_random_seed(seed)\n\n torch.backends.cudnn.benchmark = opt_get(opt, ['cuda_benchmarking_enabled'], True)\n # torch.backends.cudnn.deterministic = True\n if opt_get(opt, ['anomaly_detection'], False):\n torch.autograd.set_detect_anomaly(True)\n\n # Save the compiled opt dict to the global loaded_options variable.\n util.loaded_options = opt\n\n #### create train and val dataloader\n dataset_ratio = 1 # enlarge the size of each epoch\n for phase, dataset_opt in opt['datasets'].items():\n if phase == 'train':\n self.train_set, collate_fn = create_dataset(dataset_opt, return_collate=True)\n train_size = int(math.ceil(len(self.train_set) / dataset_opt['batch_size']))\n total_iters = int(opt['train']['niter'])\n self.total_epochs = int(math.ceil(total_iters / train_size))\n if opt['dist']:\n self.train_sampler = DistIterSampler(self.train_set, self.world_size, self.rank, dataset_ratio)\n self.total_epochs = int(math.ceil(total_iters / (train_size * dataset_ratio)))\n shuffle = False\n else:\n self.train_sampler = None\n shuffle = True\n self.train_loader = create_dataloader(self.train_set, dataset_opt, opt, self.train_sampler, collate_fn=collate_fn, shuffle=shuffle)\n if self.rank <= 0:\n self.logger.info('Number of train images: {:,d}, iters: {:,d}'.format(\n len(self.train_set), train_size))\n self.logger.info('Total epochs needed: {:d} for iters {:,d}'.format(\n self.total_epochs, total_iters))\n elif phase == 'val':\n self.val_set, collate_fn = create_dataset(dataset_opt, return_collate=True)\n self.val_loader = create_dataloader(self.val_set, dataset_opt, opt, None, collate_fn=collate_fn)\n if self.rank <= 0:\n self.logger.info('Number of val images in [{:s}]: {:d}'.format(\n dataset_opt['name'], len(self.val_set)))\n else:\n raise NotImplementedError('Phase [{:s}] is not recognized.'.format(phase))\n assert self.train_loader is not None\n\n #### create model\n self.model = ExtensibleTrainer(opt, cached_networks=all_networks)\n\n ### Evaluators\n self.evaluators = []\n if 'eval' in opt.keys() and 'evaluators' in opt['eval'].keys():\n # In \"pure\" mode, we propagate through the normal training steps, but use validation data instead and average\n # the total loss. A validation dataloader is required.\n if opt_get(opt, ['eval', 'pure'], False):\n assert hasattr(self, 'val_loader')\n\n for ev_key, ev_opt in opt['eval']['evaluators'].items():\n self.evaluators.append(create_evaluator(self.model.networks[ev_opt['for']],\n ev_opt, self.model.env))\n\n #### resume training\n if resume_state:\n self.logger.info('Resuming training from epoch: {}, iter: {}.'.format(\n resume_state['epoch'], resume_state['iter']))\n\n self.start_epoch = resume_state['epoch']\n self.current_step = resume_state['iter']\n self.model.resume_training(resume_state, 'amp_opt_level' in opt.keys()) # handle optimizers and schedulers\n else:\n self.current_step = -1 if 'start_step' not in opt.keys() else opt['start_step']\n self.start_epoch = 0\n if 'force_start_step' in opt.keys():\n self.current_step = opt['force_start_step']\n opt['current_step'] = self.current_step\n\n def do_step(self, train_data):\n if self._profile:\n print(\"Data fetch: %f\" % (time() - _t))\n _t = time()\n\n opt = self.opt\n self.current_step += 1\n #### update learning rate\n self.model.update_learning_rate(self.current_step, warmup_iter=opt['train']['warmup_iter'])\n\n #### training\n if self._profile:\n print(\"Update LR: %f\" % (time() - _t))\n _t = time()\n self.model.feed_data(train_data, self.current_step)\n self.model.optimize_parameters(self.current_step)\n if self._profile:\n print(\"Model feed + step: %f\" % (time() - _t))\n _t = time()\n\n #### log\n if self.current_step % opt['logger']['print_freq'] == 0 and self.rank <= 0:\n logs = self.model.get_current_log(self.current_step)\n message = '[epoch:{:3d}, iter:{:8,d}, lr:('.format(self.epoch, self.current_step)\n for v in self.model.get_current_learning_rate():\n message += '{:.3e},'.format(v)\n message += ')] '\n for k, v in logs.items():\n if 'histogram' in k:\n self.tb_logger.add_histogram(k, v, self.current_step)\n elif isinstance(v, dict):\n self.tb_logger.add_scalars(k, v, self.current_step)\n else:\n message += '{:s}: {:.4e} '.format(k, v)\n # tensorboard logger\n if opt['use_tb_logger'] and 'debug' not in opt['name']:\n self.tb_logger.add_scalar(k, v, self.current_step)\n if opt['wandb'] and self.rank <= 0:\n import wandb\n wandb.log(logs)\n self.logger.info(message)\n\n #### save models and training states\n if self.current_step % opt['logger']['save_checkpoint_freq'] == 0:\n if self.rank <= 0:\n self.logger.info('Saving models and training states.')\n self.model.save(self.current_step)\n self.model.save_training_state(self.epoch, self.current_step)\n if 'alt_path' in opt['path'].keys():\n import shutil\n print(\"Synchronizing tb_logger to alt_path..\")\n alt_tblogger = os.path.join(opt['path']['alt_path'], \"tb_logger\")\n shutil.rmtree(alt_tblogger, ignore_errors=True)\n shutil.copytree(self.tb_logger_path, alt_tblogger)\n\n #### validation\n if opt_get(opt, ['eval', 'pure'], False) and self.current_step % opt['train']['val_freq'] == 0:\n metrics = []\n for val_data in tqdm(self.val_loader):\n self.model.feed_data(val_data, self.current_step, perform_micro_batching=False)\n metrics.append(self.model.test())\n reduced_metrics = {}\n for metric in metrics:\n for k, v in metric.as_dict().items():\n if isinstance(v, torch.Tensor) and len(v.shape) == 0:\n if k in reduced_metrics.keys():\n reduced_metrics[k].append(v)\n else:\n reduced_metrics[k] = [v]\n if self.rank <= 0:\n for k, v in reduced_metrics.items():\n val = torch.stack(v).mean().item()\n self.tb_logger.add_scalar(f'val_{k}', val, self.current_step)\n print(f\">>Eval {k}: {val}\")\n if opt['wandb']:\n import wandb\n wandb.log({f'eval_{k}': torch.stack(v).mean().item() for k,v in reduced_metrics.items()})\n\n if len(self.evaluators) != 0 and self.current_step % opt['train']['val_freq'] == 0:\n eval_dict = {}\n for eval in self.evaluators:\n if eval.uses_all_ddp or self.rank <= 0:\n eval_dict.update(eval.perform_eval())\n if self.rank <= 0:\n print(\"Evaluator results: \", eval_dict)\n for ek, ev in eval_dict.items():\n self.tb_logger.add_scalar(ek, ev, self.current_step)\n if opt['wandb']:\n import wandb\n wandb.log(eval_dict)\n\n\n def do_training(self):\n self.logger.info('Start training from epoch: {:d}, iter: {:d}'.format(self.start_epoch, self.current_step))\n for epoch in range(self.start_epoch, self.total_epochs + 1):\n self.epoch = epoch\n if opt['dist']:\n self.train_sampler.set_epoch(epoch)\n tq_ldr = tqdm(self.train_loader)\n\n _t = time()\n for train_data in tq_ldr:\n self.do_step(train_data)\n\n def create_training_generator(self, index):\n self.logger.info('Start training from epoch: {:d}, iter: {:d}'.format(self.start_epoch, self.current_step))\n for epoch in range(self.start_epoch, self.total_epochs + 1):\n self.epoch = epoch\n if self.opt['dist']:\n self.train_sampler.set_epoch(epoch)\n tq_ldr = tqdm(self.train_loader, position=index)\n\n _t = time()\n for train_data in tq_ldr:\n yield self.model\n self.do_step(train_data)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/train_diffusion_vocoder_clips.yml')\n parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none', help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n args = parser.parse_args()\n opt = option.parse(args.opt, is_train=True)\n if args.launcher != 'none':\n # export CUDA_VISIBLE_DEVICES for running in distributed mode.\n if 'gpu_ids' in opt.keys():\n gpu_list = ','.join(str(x) for x in opt['gpu_ids'])\n os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list\n print('export CUDA_VISIBLE_DEVICES=' + gpu_list)\n trainer = Trainer()\n\n #### distributed training settings\n if args.launcher == 'none': # disabled distributed training\n opt['dist'] = False\n trainer.rank = -1\n if len(opt['gpu_ids']) == 1:\n torch.cuda.set_device(opt['gpu_ids'][0])\n print('Disabled distributed training.')\n else:\n opt['dist'] = True\n init_dist('nccl')\n trainer.world_size = torch.distributed.get_world_size()\n trainer.rank = torch.distributed.get_rank()\n\n trainer.init(opt, args.launcher)\n trainer.do_training()\n" ]
[ [ "numpy.clip", "torch.from_numpy", "numpy.ones", "numpy.mean", "numpy.random.rand", "numpy.zeros", "numpy.sum" ], [ "torch.cat", "torch.load" ], [ "tensorflow.train.load_variable", "tensorflow.train.list_variables" ], [ "torch.multiprocessing.set_start_method", "torch.distributed.init_process_group", "torch.cuda.set_device", "torch.cuda.current_device", "torch.autograd.set_detect_anomaly", "torch.multiprocessing.get_start_method", "torch.stack", "torch.distributed.get_rank", "torch.cuda.device_count", "torch.distributed.get_world_size" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pclucas14/continuum
[ "09034db1371e9646ca660fd4d4df73e61bf77067", "09034db1371e9646ca660fd4d4df73e61bf77067" ]
[ "tests/test_background_swap.py", "continuum/scenarios/base.py" ]
[ "import os\n\nfrom torch.utils.data import DataLoader\nfrom continuum.datasets import CIFAR10, InMemoryDataset\nfrom continuum.datasets import MNIST\nimport torchvision\nfrom continuum.scenarios import TransformationIncremental\nimport pytest\nimport numpy as np\n\nfrom continuum.transforms.bg_swap import BackgroundSwap\n\nDATA_PATH = os.environ.get(\"CONTINUUM_DATA_PATH\")\n\n# Uncomment for debugging via image output\n# import matplotlib.pyplot as plt\n\n\ndef test_bg_swap_fast():\n \"\"\"\n Fast test for background swap.\n \"\"\"\n bg_x = np.ones(shape=[2, 5, 5, 3]) * -1\n bg_y = np.random.rand(2)\n\n fg = np.random.normal(loc=.5, scale=.1, size=[5, 5])\n bg = InMemoryDataset(bg_x, bg_y)\n\n bg_swap = BackgroundSwap(bg, input_dim=(5, 5), normalize_bg=None)\n\n spliced_1_channel = bg_swap(fg)[:, :, 0]\n\n assert np.array_equal((spliced_1_channel <= -1), (fg <= .5))\n\n\[email protected]\ndef test_background_swap_numpy():\n \"\"\"\n Test background swap on a single ndarray input.\n \"\"\"\n mnist = MNIST(DATA_PATH, download=True, train=True)\n cifar = CIFAR10(DATA_PATH, download=True, train=True)\n\n bg_swap = BackgroundSwap(cifar, input_dim=(28, 28))\n\n im = mnist.get_data()[0][0]\n im = bg_swap(im)\n\n # Uncomment for debugging\n # plt.imshow(im, interpolation='nearest')\n # plt.show()\n\n\[email protected]\ndef test_background_swap_torch():\n \"\"\"\n Test background swap on a single tensor input.\n \"\"\"\n cifar = CIFAR10(DATA_PATH, download=True, train=True)\n\n mnist = torchvision.datasets.MNIST(DATA_PATH, train=True, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor()\n ]))\n\n bg_swap = BackgroundSwap(cifar, input_dim=(28, 28))\n im = mnist[0][0]\n\n im = bg_swap(im)\n\n # Uncomment for debugging\n # plt.imshow(im.permute(1, 2, 0), interpolation='nearest')\n # plt.show()\n\n\[email protected]\ndef test_background_tranformation():\n \"\"\"\n Example code using TransformationIncremental to create a setting with 3 tasks.\n \"\"\"\n cifar = CIFAR10(DATA_PATH, train=True)\n mnist = MNIST(DATA_PATH, download=False, train=True)\n nb_task = 3\n list_trsf = []\n for i in range(nb_task):\n list_trsf.append([torchvision.transforms.ToTensor(), BackgroundSwap(cifar, bg_label=i, input_dim=(28, 28)),\n torchvision.transforms.ToPILImage()])\n scenario = TransformationIncremental(mnist, base_transformations=[torchvision.transforms.ToTensor()],\n incremental_transformations=list_trsf)\n folder = \"tests/samples/background_trsf/\"\n if not os.path.exists(folder):\n os.makedirs(folder)\n for task_id, task_data in enumerate(scenario):\n task_data.plot(path=folder, title=f\"background_{task_id}.jpg\", nb_samples=100, shape=[28, 28, 3])\n loader = DataLoader(task_data)\n _, _, _ = next(iter(loader))\n", "import abc\nfrom typing import Callable, List, Union\n\nimport numpy as np\nfrom torchvision import transforms\n\nfrom continuum.datasets import _ContinuumDataset\nfrom continuum.tasks import TaskSet, TaskType\nfrom continuum.transforms.segmentation import Compose as SegmentationCompose\n\n\nclass _BaseScenario(abc.ABC):\n \"\"\"Abstract loader.\n\n DO NOT INSTANTIATE THIS CLASS.\n\n :param cl_dataset: A Continuum dataset.\n :param nb_tasks: The number of tasks to do.\n :param transformations: A list of transformations applied to all tasks. If\n it's a list of list, then the transformation will be\n different per task.\n \"\"\"\n\n def __init__(\n self,\n cl_dataset: _ContinuumDataset,\n nb_tasks: int,\n transformations: Union[List[Callable], List[List[Callable]]] = None\n ) -> None:\n\n self.cl_dataset = cl_dataset\n self._nb_tasks = nb_tasks\n self.transformations = transformations\n self._counter = 0\n\n if transformations is None:\n self.transformations = self.cl_dataset.transformations\n if self.cl_dataset.data_type == TaskType.SEGMENTATION:\n composer = SegmentationCompose\n else:\n composer = transforms.Compose\n if self.transformations is not None and isinstance(self.transformations[0], list):\n # We have list of list of callable, where each sublist is dedicated to\n # a task.\n if len(self.transformations) != nb_tasks:\n raise ValueError(\n f\"When using different transformations per task, there must be as as much transformations\"\n f\" ({len(transformations)}) than there are tasks ({nb_tasks})\"\n f\", which is not currently the case.\"\n )\n self.trsf = [composer(trsf) for trsf in self.transformations]\n else:\n self.trsf = composer(self.transformations)\n\n @abc.abstractmethod\n def _setup(self, nb_tasks: int) -> int:\n raise NotImplementedError\n\n @property\n def train(self) -> bool:\n \"\"\"Returns whether we are in training or testing mode.\n\n This property is dependent on the dataset, not the actual scenario.\n \"\"\"\n return self.cl_dataset.train\n\n @property\n def nb_samples(self) -> int:\n \"\"\"Total number of samples in the whole continual setting.\"\"\"\n return len(self.dataset[0]) # type: ignore\n\n @property\n def nb_classes(self) -> int:\n \"\"\"Total number of classes in the whole continual setting.\"\"\"\n return len(np.unique(self.dataset[1])) # type: ignore\n\n @property\n def classes(self) -> List:\n \"\"\"list of classes in the whole continual setting.\"\"\"\n return np.unique(self.dataset[1]) # type: ignore\n\n @property\n def nb_tasks(self) -> int:\n \"\"\"Number of tasks in the whole continual setting.\"\"\"\n return len(self)\n\n def __len__(self) -> int:\n \"\"\"Returns the number of tasks.\n\n :return: Number of tasks.\n \"\"\"\n return self._nb_tasks\n\n def __iter__(self):\n \"\"\"Used for iterating through all tasks with the CLLoader in a for loop.\"\"\"\n self._counter = 0\n return self\n\n def __next__(self) -> TaskSet:\n \"\"\"An iteration/task in the for loop.\"\"\"\n if self._counter >= len(self):\n raise StopIteration\n task = self[self._counter]\n self._counter += 1\n return task\n\n def __getitem__(self, task_index: Union[int, slice]):\n \"\"\"Returns a task by its unique index.\n\n :param task_index: The unique index of a task. As for List, you can use\n indexing between [0, len], negative indexing, or\n even slices.\n :return: A train PyTorch's Datasets.\n \"\"\"\n if isinstance(task_index, slice) and isinstance(self.trsf, list):\n raise ValueError(\n f\"You cannot select multiple task ({task_index}) when you have a \"\n \"different set of transformations per task\"\n )\n\n x, y, t, _, data_indexes = self._select_data_by_task(task_index)\n\n return TaskSet(\n x, y, t,\n trsf=self.trsf[task_index] if isinstance(self.trsf, list) else self.trsf,\n data_type=self.cl_dataset.data_type,\n bounding_boxes=self.cl_dataset.bounding_boxes,\n data_indexes=data_indexes\n )\n\n def _select_data_by_task(\n self,\n task_index: Union[int, slice, np.ndarray]\n ) -> Union[np.ndarray, np.ndarray, np.ndarray, Union[int, List[int]]]:\n \"\"\"Selects a subset of the whole data for a given task.\n\n This class returns the \"task_index\" in addition of the x, y, t data.\n This task index is either an integer or a list of integer when the user\n used a slice. We need this variable when in segmentation to disentangle\n samples with multiple task ids.\n\n :param task_index: The unique index of a task. As for List, you can use\n indexing between [0, len], negative indexing, or\n even slices.\n :return: A tuple of numpy array being resp. (1) the data, (2) the targets,\n (3) task ids, and (4) the actual task required by the user.\n \"\"\"\n\n # conversion of task_index into a list\n\n if isinstance(task_index, slice):\n start = task_index.start if task_index.start is not None else 0\n stop = task_index.stop if task_index.stop is not None else len(self) + 1\n step = task_index.step if task_index.step is not None else 1\n task_index = list(range(start, stop, step))\n if len(task_index) == 0:\n raise ValueError(f\"Invalid slicing resulting in no data (start={start}, end={stop}, step={step}).\")\n\n if isinstance(task_index, np.ndarray):\n task_index = list(task_index)\n\n x, y, t = self.dataset # type: ignore\n\n if isinstance(task_index, list):\n task_index = [\n t if t >= 0 else _handle_negative_indexes(t, len(self)) for t in task_index\n ]\n if len(t.shape) == 2:\n data_indexes = np.unique(np.where(t[:, task_index] == 1)[0])\n else:\n data_indexes = np.where(np.isin(t, task_index))[0]\n else:\n if task_index < 0:\n task_index = _handle_negative_indexes(task_index, len(self))\n\n if len(t.shape) == 2:\n data_indexes = np.where(t[:, task_index] == 1)[0]\n else:\n data_indexes = np.where(t == task_index)[0]\n\n if self.cl_dataset.data_type == TaskType.H5:\n # for h5 TaskType, x is just the filename containing all data\n # no need for slicing here\n selected_x = x\n else:\n selected_x = x[data_indexes]\n selected_y = y[data_indexes]\n selected_t = t[data_indexes]\n\n if self.cl_dataset.need_class_remapping: # TODO: to remove with TransformIncremental\n # A remapping of the class ids is done to handle some special cases\n # like PermutedMNIST or RotatedMNIST.\n selected_y = self.cl_dataset.class_remapping(selected_y)\n\n return selected_x, selected_y, selected_t, task_index, data_indexes\n\n\ndef _handle_negative_indexes(index: int, total_len: int) -> int:\n if index < 0:\n index = index % total_len\n return index\n" ]
[ [ "numpy.array_equal", "torch.utils.data.DataLoader", "numpy.ones", "numpy.random.normal", "numpy.random.rand" ], [ "numpy.where", "numpy.isin", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
g-nightingale/tox_examples
[ "d7714375c764580b4b8af9db61332ced4e851def" ]
[ "packaging/squarer/ml_squarer.py" ]
[ "import numpy as np\n\n\ndef train_ml_squarer() -> None:\n print(\"Training!\")\n\n\ndef square() -> int:\n \"\"\"Square a number...maybe\"\"\"\n return np.random.randint(1, 100)\n\n\nif __name__ == '__main__':\n train_ml_squarer()" ]
[ [ "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GOOGLE-M/SGC
[ "78ad8d02b80808302e38559e2d0f430f66a809bd", "78ad8d02b80808302e38559e2d0f430f66a809bd", "78ad8d02b80808302e38559e2d0f430f66a809bd", "78ad8d02b80808302e38559e2d0f430f66a809bd" ]
[ "venv/lib/python3.7/site-packages/torch/utils/benchmark/utils/timer.py", "venv/lib/python3.7/site-packages/caffe2/python/operator_test/locally_connected_op_test.py", "venv/lib/python3.7/site-packages/torch/nn/parallel/distributed.py", "venv/lib/python3.7/site-packages/torch/nn/init.py" ]
[ "\"\"\"Timer class based on the timeit.Timer class, but torch aware.\"\"\"\nimport enum\nimport timeit\nimport textwrap\nfrom typing import Any, Callable, Dict, List, NoReturn, Optional, Type, Union\n\nimport numpy as np\nimport torch\nfrom torch.utils.benchmark.utils import common, cpp_jit\nfrom torch.utils.benchmark.utils._stubs import TimerClass, TimeitModuleType\nfrom torch.utils.benchmark.utils.valgrind_wrapper import timer_interface as valgrind_timer_interface\n\n\n__all__ = [\"Timer\", \"timer\", \"Language\"]\n\n\nif torch.has_cuda and torch.cuda.is_available():\n def timer() -> float:\n torch.cuda.synchronize()\n return timeit.default_timer()\nelse:\n timer = timeit.default_timer\n\n\nclass Language(enum.Enum):\n PYTHON = 0\n CPP = 1\n\n\nclass CPPTimer:\n def __init__(\n self,\n stmt: str,\n setup: str,\n timer: Callable[[], float],\n globals: Dict[str, Any],\n ) -> None:\n if timer is not timeit.default_timer:\n raise NotImplementedError(\n \"PyTorch was built with CUDA and a GPU is present; however \"\n \"Timer does not yet support GPU measurements. If your \"\n \"code is CPU only, pass `timer=timeit.default_timer` to the \"\n \"Timer's constructor to indicate this. (Note that this will \"\n \"produce incorrect results if the GPU is in fact used, as \"\n \"Timer will not synchronize CUDA.)\"\n )\n\n if globals:\n raise ValueError(\"C++ timing does not support globals.\")\n\n self._stmt: str = textwrap.dedent(stmt)\n self._setup: str = textwrap.dedent(setup)\n self._timeit_module: Optional[TimeitModuleType] = None\n\n def timeit(self, number: int) -> float:\n if self._timeit_module is None:\n self._timeit_module = cpp_jit.compile_timeit_template(\n self._stmt,\n self._setup,\n )\n\n return self._timeit_module.timeit(number)\n\n\nclass Timer(object):\n \"\"\"Helper class for measuring execution time of PyTorch statements.\n\n For a full tutorial on how to use this class, see:\n https://pytorch.org/tutorials/recipes/recipes/benchmark.html\n\n The PyTorch Timer is based on `timeit.Timer` (and in fact uses\n `timeit.Timer` internally), but with several key differences:\n\n 1) Runtime aware:\n Timer will perform warmups (important as some elements of PyTorch are\n lazily initialized), set threadpool size so that comparisons are\n apples-to-apples, and synchronize asynchronous CUDA functions when\n necessary.\n\n 2) Focus on replicates:\n When measuring code, and particularly complex kernels / models,\n run-to-run variation is a significant confounding factor. It is\n expected that all measurements should include replicates to quantify\n noise and allow median computation, which is more robust than mean.\n To that effect, this class deviates from the `timeit` API by\n conceptually merging `timeit.Timer.repeat` and `timeit.Timer.autorange`.\n (Exact algorithms are discussed in method docstrings.) The `timeit`\n method is replicated for cases where an adaptive strategy is not\n desired.\n\n 3) Optional metadata:\n When defining a Timer, one can optionally specify `label`, `sub_label`,\n `description`, and `env`. (Defined later) These fields are included in\n the representation of result object and by the `Compare` class to group\n and display results for comparison.\n\n 4) Instruction counts\n In addition to wall times, Timer can run a statement under Callgrind\n and report instructions executed.\n\n Directly analogous to `timeit.Timer` constructor arguments:\n\n `stmt`, `setup`, `timer`, `globals`\n\n PyTorch Timer specific constructor arguments:\n\n `label`, `sub_label`, `description`, `env`, `num_threads`\n\n Args:\n stmt: Code snippet to be run in a loop and timed.\n\n setup: Optional setup code. Used to define variables used in `stmt`\n\n timer:\n Callable which returns the current time. If PyTorch was built\n without CUDA or there is no GPU present, this defaults to\n `timeit.default_timer`; otherwise it will synchronize CUDA before\n measuring the time.\n\n globals:\n A dict which defines the global variables when `stmt` is being\n executed. This is the other method for providing variables which\n `stmt` needs.\n\n label:\n String which summarizes `stmt`. For instance, if `stmt` is\n \"torch.nn.functional.relu(torch.add(x, 1, out=out))\"\n one might set label to \"ReLU(x + 1)\" to improve readability.\n\n sub_label:\n Provide supplemental information to disambiguate measurements\n with identical stmt or label. For instance, in our example\n above sub_label might be \"float\" or \"int\", so that it is easy\n to differentiate:\n \"ReLU(x + 1): (float)\"\n\n \"ReLU(x + 1): (int)\"\n when printing Measurements or summarizing using `Compare`.\n\n description:\n String to distinguish measurements with identical label and\n sub_label. The principal use of `description` is to signal to\n `Compare` the columns of data. For instance one might set it\n based on the input size to create a table of the form: ::\n\n | n=1 | n=4 | ...\n ------------- ...\n ReLU(x + 1): (float) | ... | ... | ...\n ReLU(x + 1): (int) | ... | ... | ...\n\n\n using `Compare`. It is also included when printing a Measurement.\n\n env:\n This tag indicates that otherwise identical tasks were run in\n different environments, and are therefore not equivilent, for\n instance when A/B testing a change to a kernel. `Compare` will\n treat Measurements with different `env` specification as distinct\n when merging replicate runs.\n\n num_threads:\n The size of the PyTorch threadpool when executing `stmt`. Single\n threaded performace is important as both a key inference workload\n and a good indicator of intrinsic algorithmic efficiency, so the\n default is set to one. This is in contrast to the default PyTorch\n threadpool size which tries to utilize all cores.\n \"\"\"\n\n _timer_cls: Type[TimerClass] = timeit.Timer\n\n def __init__(\n self,\n stmt: str = \"pass\",\n setup: str = \"pass\",\n timer: Callable[[], float] = timer,\n globals: Optional[Dict[str, Any]] = None,\n label: Optional[str] = None,\n sub_label: Optional[str] = None,\n description: Optional[str] = None,\n env: Optional[str] = None,\n num_threads: int = 1,\n language: Union[Language, str] = Language.PYTHON,\n ):\n if not isinstance(stmt, str):\n raise ValueError(\"Currently only a `str` stmt is supported.\")\n\n # We copy `globals` to prevent mutations from leaking.\n # (For instance, `eval` adds the `__builtins__` key)\n self._globals = dict(globals or {})\n if language in (Language.PYTHON, \"py\", \"python\"):\n # Include `torch` if not specified as a convenience feature.\n self._globals.setdefault(\"torch\", torch)\n self._language: Language = Language.PYTHON\n\n elif language in (Language.CPP, \"cpp\", \"c++\"):\n assert self._timer_cls is timeit.Timer, \"_timer_cls has already been swapped.\"\n self._timer_cls = CPPTimer\n setup = (\"\" if setup == \"pass\" else setup)\n self._language = Language.CPP\n\n else:\n raise ValueError(f\"Invalid language `{language}`.\")\n\n # Convenience adjustment so that multi-line code snippets defined in\n # functions do not IndentationError (Python) or look odd (C++). The\n # leading newline removal is for the initial newline that appears when\n # defining block strings. For instance:\n # textwrap.dedent(\"\"\"\n # print(\"This is a stmt\")\n # \"\"\")\n # produces '\\nprint(\"This is a stmt\")\\n'.\n #\n # Stripping this down to 'print(\"This is a stmt\")' doesn't change\n # what gets executed, but it makes __repr__'s nicer.\n stmt = textwrap.dedent(stmt)\n stmt = (stmt[1:] if stmt and stmt[0] == \"\\n\" else stmt).rstrip()\n setup = textwrap.dedent(setup)\n setup = (setup[1:] if setup and setup[0] == \"\\n\" else setup).rstrip()\n\n self._timer = self._timer_cls(\n stmt=stmt,\n setup=setup,\n timer=timer,\n globals=valgrind_timer_interface.CopyIfCallgrind.unwrap_all(self._globals),\n )\n self._task_spec = common.TaskSpec(\n stmt=stmt,\n setup=setup,\n label=label,\n sub_label=sub_label,\n description=description,\n env=env,\n num_threads=num_threads,\n )\n\n def timeit(self, number: int = 1000000) -> common.Measurement:\n \"\"\"Mirrors the semantics of timeit.Timer.timeit().\n\n Execute the main statement (`stmt`) `number` times.\n https://docs.python.org/3/library/timeit.html#timeit.Timer.timeit\n \"\"\"\n with common.set_torch_threads(self._task_spec.num_threads):\n # Warmup\n self._timer.timeit(number=max(int(number // 100), 1))\n\n return common.Measurement(\n number_per_run=number,\n raw_times=[self._timer.timeit(number=number)],\n task_spec=self._task_spec\n )\n\n def repeat(self, repeat: int = -1, number: int = -1) -> None:\n raise NotImplementedError(\"See `Timer.blocked_autorange.`\")\n\n def autorange(self, callback: Optional[Callable[[int, float], NoReturn]] = None) -> None:\n raise NotImplementedError(\"See `Timer.blocked_autorange.`\")\n\n def _threaded_measurement_loop(\n self,\n number: int,\n time_hook: Callable[[], float],\n stop_hook: Callable[[List[float]], bool],\n min_run_time: float,\n max_run_time: Optional[float] = None,\n callback: Optional[Callable[[int, float], NoReturn]] = None\n ) -> List[float]:\n total_time = 0.0\n can_stop = False\n times: List[float] = []\n with common.set_torch_threads(self._task_spec.num_threads):\n while (total_time < min_run_time) or (not can_stop):\n time_spent = time_hook()\n times.append(time_spent)\n total_time += time_spent\n if callback:\n callback(number, time_spent)\n can_stop = stop_hook(times)\n if max_run_time and total_time > max_run_time:\n break\n return times\n\n def _estimate_block_size(self, min_run_time: float) -> int:\n with common.set_torch_threads(self._task_spec.num_threads):\n # Estimate the block size needed for measurement to be negligible\n # compared to the inner loop. This also serves as a warmup.\n overhead = np.median([self._timer.timeit(0) for _ in range(5)])\n number = 1\n while True:\n time_taken = self._timer.timeit(number)\n relative_overhead = overhead / time_taken\n if relative_overhead <= 1e-4 and time_taken >= min_run_time / 1000:\n break\n if time_taken > min_run_time:\n break\n number *= 10\n return number\n\n def adaptive_autorange(\n self,\n threshold: float = 0.1,\n *,\n min_run_time: float = 0.01,\n max_run_time: float = 10.0,\n callback: Optional[Callable[[int, float], NoReturn]] = None,\n ) -> common.Measurement:\n number = self._estimate_block_size(min_run_time=0.05)\n\n def time_hook() -> float:\n return self._timer.timeit(number)\n\n def stop_hook(times: List[float]) -> bool:\n if len(times) > 3:\n return common.Measurement(\n number_per_run=number,\n raw_times=times,\n task_spec=self._task_spec\n ).meets_confidence(threshold=threshold)\n return False\n times = self._threaded_measurement_loop(\n number, time_hook, stop_hook, min_run_time, max_run_time, callback=callback)\n\n return common.Measurement(\n number_per_run=number,\n raw_times=times,\n task_spec=self._task_spec\n )\n\n def blocked_autorange(\n self,\n callback: Optional[Callable[[int, float], NoReturn]] = None,\n min_run_time: float = 0.2,\n ) -> common.Measurement:\n \"\"\"Measure many replicates while keeping timer overhead to a minimum.\n\n At a high level, blocked_autorange executes the following pseudo-code::\n\n `setup`\n\n total_time = 0\n while total_time < min_run_time\n start = timer()\n for _ in range(block_size):\n `stmt`\n total_time += (timer() - start)\n\n Note the variable `block_size` in the inner loop. The choice of block\n size is important to measurement quality, and must balance two\n competing objectives:\n\n 1) A small block size results in more replicates and generally\n better statistics.\n\n 2) A large block size better amortizes the cost of `timer`\n invocation, and results in a less biased measurement. This is\n important because CUDA syncronization time is non-trivial\n (order single to low double digit microseconds) and would\n otherwise bias the measurement.\n\n blocked_autorange sets block_size by running a warmup period,\n increasing block size until timer overhead is less than 0.1% of\n the overall computation. This value is then used for the main\n measurement loop.\n\n Returns:\n A `Measurement` object that contains measured runtimes and\n repetition counts, and can be used to compute statistics.\n (mean, median, etc.)\n \"\"\"\n number = self._estimate_block_size(min_run_time)\n\n def time_hook() -> float:\n return self._timer.timeit(number)\n\n def stop_hook(times: List[float]) -> bool:\n return True\n\n times = self._threaded_measurement_loop(\n number, time_hook, stop_hook,\n min_run_time=min_run_time,\n callback=callback)\n\n return common.Measurement(\n number_per_run=number,\n raw_times=times,\n task_spec=self._task_spec\n )\n\n def collect_callgrind(\n self,\n number: int = 100,\n collect_baseline: bool = True\n ) -> valgrind_timer_interface.CallgrindStats:\n \"\"\"Collect instruction counts using Callgrind.\n\n Unlike wall times, instruction counts are deterministic\n (modulo non-determinism in the program itself and small amounts of\n jitter from the Python interpreter.) This makes them ideal for detailed\n performance analysis. This method runs `stmt` in a separate process\n so that Valgrind can instrument the program. Performance is severely\n degraded due to the instrumentation, howevever this is ameliorated by\n the fact that a small number of iterations is generally sufficient to\n obtain good measurements.\n\n In order to to use this method `valgrind`, `callgrind_control`, and\n `callgrind_annotate` must be installed.\n\n Because there is a process boundary between the caller (this process)\n and the `stmt` execution, `globals` cannot contain arbitrary in-memory\n data structures. (Unlike timing methods) Instead, globals are\n restricted to builtins, `nn.Modules`'s, and TorchScripted functions/modules\n to reduce the surprise factor from serialization and subsequent\n deserialization. The `GlobalsBridge` class provides more detail on this\n subject. Take particular care with nn.Modules: they rely on pickle and\n you may need to add an import to `setup` for them to transfer properly.\n\n By default, a profile for an empty statement will be collected and\n cached to indicate how many instructions are from the Python loop which\n drives `stmt`.\n\n Returns:\n A `CallgrindStats` object which provides instruction counts and\n some basic facilities for analyzing and manipulating results.\n \"\"\"\n if not isinstance(self._task_spec.stmt, str):\n raise ValueError(\"`collect_callgrind` currently only supports string `stmt`\")\n\n # Check that the statement is valid. It doesn't guarantee success, but it's much\n # simpler and quicker to raise an exception for a faulty `stmt` or `setup` in\n # the parent process rather than the valgrind subprocess.\n self._timer.timeit(1)\n is_python = (self._language == Language.PYTHON)\n assert is_python or not self._globals\n return valgrind_timer_interface.wrapper_singleton().collect_callgrind(\n task_spec=self._task_spec,\n globals=self._globals,\n number=number,\n collect_baseline=collect_baseline and is_python,\n is_python=is_python)\n", "\n\n\n\nimport numpy as np\nfrom hypothesis import given, settings, assume\nimport hypothesis.strategies as st\n\nfrom caffe2.python import core, utils, workspace\nimport caffe2.python.hypothesis_test_util as hu\nimport caffe2.python.serialized_test.serialized_test_util as serial\n\n\n\nclass TestLocallyConnectedOp(serial.SerializedTestCase):\n @given(N=st.integers(1, 3),\n C=st.integers(1, 3),\n H=st.integers(1, 5),\n W=st.integers(1, 5),\n M=st.integers(1, 3),\n kernel=st.integers(1, 3),\n op_name=st.sampled_from([\"LC\", \"LC2D\"]),\n order=st.sampled_from([\"NCHW\", \"NHWC\"]),\n use_bias=st.booleans(),\n **hu.gcs)\n @settings(deadline=10000)\n def test_lc_2d(\n self, N, C, H, W, M, kernel, op_name, order, use_bias, gc, dc):\n if H < kernel:\n kernel = H\n if W < kernel:\n kernel = W\n\n assume(C == kernel * N)\n\n op = core.CreateOperator(\n op_name,\n [\"X\", \"W\", \"b\"] if use_bias else [\"X\", \"W\"],\n [\"Y\"],\n kernels=[kernel, kernel],\n order=order,\n engine=\"\",\n )\n\n Y_H = H - kernel + 1\n Y_W = W - kernel + 1\n if order == \"NCHW\":\n X = np.random.rand(N, C, H, W).astype(np.float32) - 0.5\n W = np.random.rand(Y_H, Y_W, M, C, kernel,\n kernel).astype(np.float32) - 0.5\n else:\n X = np.random.rand(N, H, W, C).astype(np.float32) - 0.5\n W = np.random.rand(Y_H, Y_W, M, kernel, kernel,\n C).astype(np.float32) - 0.5\n b = np.random.rand(Y_H, Y_W, M).astype(np.float32) - 0.5\n inputs = [X, W, b] if use_bias else [X, W]\n\n def lc_2d_nchw(X, W, b=None):\n N, C, XH, XW = X.shape\n YH, YW, M, _, KH, KW = W.shape\n\n def conv(n, m, yh, yw):\n sum = b[yh, yw, m] if b is not None else 0\n for c in range(C):\n for kh in range(KH):\n for kw in range(KW):\n hh = yh + kh\n ww = yw + kw\n sum += X[n, c, hh, ww] * W[yh, yw, m, c, kh, kw]\n return sum\n\n output = np.zeros((N, M, YH, YW), dtype=np.float32)\n for n in range(N):\n for m in range(M):\n for yh in range(YH):\n for yw in range(YW):\n output[n, m, yh, yw] = conv(n, m, yh, yw)\n return [output]\n\n def lc_2d_nhwc(X, W, b=None):\n XT = utils.NHWC2NCHW(X)\n WT = np.transpose(W, [0, 1, 2, 5, 3, 4])\n output = lc_2d_nchw(XT, WT, b)\n return [utils.NCHW2NHWC(output[0])]\n\n ref_op = lc_2d_nchw if order == \"NCHW\" else lc_2d_nhwc\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=inputs,\n reference=ref_op,\n )\n self.assertDeviceChecks(dc, op, inputs, [0])\n for i in range(len(inputs)):\n self.assertGradientChecks(gc, op, inputs, i, [0])\n\n @given(N=st.integers(1, 3),\n C=st.integers(1, 3),\n size=st.integers(1, 5),\n M=st.integers(1, 3),\n kernel=st.integers(1, 3),\n op_name=st.sampled_from([\"LC\", \"LC1D\"]),\n use_bias=st.booleans(),\n **hu.gcs)\n @settings(deadline=1000)\n def test_lc_1d(self, N, C, size, M, kernel, op_name, use_bias, gc, dc):\n if workspace.has_hip_support:\n # Skip as test flaky on ROCM with deadline set to 1000\n return\n if size < kernel:\n kernel = size\n\n op = core.CreateOperator(\n op_name,\n [\"X\", \"W\", \"b\"] if use_bias else [\"X\", \"W\"],\n [\"Y\"],\n kernels=[kernel],\n order=\"NCHW\",\n engine=\"\",\n )\n\n L = size - kernel + 1\n X = np.random.rand(N, C, size).astype(np.float32) - 0.5\n W = np.random.rand(L, M, C, kernel).astype(np.float32) - 0.5\n b = np.random.rand(L, M).astype(np.float32) - 0.5\n inputs = [X, W, b] if use_bias else [X, W]\n\n def lc_1d_nchw(X, W, b=None):\n N, C, XL = X.shape\n YL, M, _, KL = W.shape\n\n def conv(n, m, yl):\n sum = b[yl, m] if b is not None else 0\n for c in range(C):\n for kl in range(KL):\n ll = yl + kl\n sum += X[n, c, ll] * W[yl, m, c, kl]\n return sum\n\n output = np.zeros((N, M, YL), dtype=np.float32)\n for n in range(N):\n for m in range(M):\n for yl in range(YL):\n output[n, m, yl] = conv(n, m, yl)\n return [output]\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=inputs,\n reference=lc_1d_nchw,\n )\n self.assertDeviceChecks(dc, op, inputs, [0])\n for i in range(len(inputs)):\n self.assertGradientChecks(gc, op, inputs, i, [0])\n\n @given(N=st.integers(1, 1),\n C=st.integers(1, 1),\n T=st.integers(2, 2),\n H=st.integers(2, 2),\n W=st.integers(2, 2),\n M=st.integers(1, 1),\n kernel=st.integers(2, 2),\n op_name=st.sampled_from([\"LC\", \"LC3D\"]),\n use_bias=st.booleans(),\n **hu.gcs)\n @settings(deadline=1000)\n def test_lc_3d(self, N, C, T, H, W, M, kernel, op_name, use_bias, gc, dc):\n if T < kernel:\n kernel = T\n if H < kernel:\n kernel = H\n if W < kernel:\n kernel = W\n\n op = core.CreateOperator(\n op_name,\n [\"X\", \"W\", \"b\"] if use_bias else [\"X\", \"W\"],\n [\"Y\"],\n kernels=[kernel, kernel, kernel],\n order=\"NCHW\",\n engine=\"\",\n )\n\n Y_T = T - kernel + 1\n Y_H = H - kernel + 1\n Y_W = W - kernel + 1\n X = np.random.rand(N, C, T, H, W).astype(np.float32) - 0.5\n W = np.random.rand(Y_T, Y_H, Y_W, M, C, kernel,\n kernel, kernel).astype(np.float32) - 0.5\n b = np.random.rand(Y_T, Y_H, Y_W, M).astype(np.float32) - 0.5\n inputs = [X, W, b] if use_bias else [X, W]\n\n def lc_3d_nchw(X, W, b=None):\n N, C, XT, XH, XW = X.shape\n YT, YH, YW, M, _, KT, KH, KW = W.shape\n\n def conv(n, m, yt, yh, yw):\n sum = b[yt, yh, yw, m] if b is not None else 0\n for c in range(C):\n for kt in range(KT):\n for kh in range(KH):\n for kw in range(KW):\n tt = yt + kt\n hh = yh + kh\n ww = yw + kw\n sum += X[n, c, tt, hh, ww] * \\\n W[yt, yh, yw, m, c, kt, kh, kw]\n return sum\n\n output = np.zeros((N, M, YT, YH, YW), dtype=np.float32)\n for n in range(N):\n for m in range(M):\n for yt in range(YT):\n for yh in range(YH):\n for yw in range(YW):\n output[n, m, yt, yh, yw] = conv(\n n, m, yt, yh, yw)\n return [output]\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=inputs,\n reference=lc_3d_nchw,\n )\n self.assertDeviceChecks(dc, op, inputs, [0])\n for i in range(len(inputs)):\n self.assertGradientChecks(gc, op, inputs, i, [0])\n", "from contextlib import contextmanager\nimport copy\nimport itertools\nimport os\nimport inspect\nimport logging\nimport warnings\nfrom typing import NamedTuple\n\nimport torch\n\nfrom . import comm\nimport torch.distributed as dist\n\nRPC_AVAILABLE = False\nif dist.is_available():\n from torch.distributed.distributed_c10d import _get_default_group\n from torch.distributed.distributed_c10d import ReduceOp\nif torch.distributed.rpc.is_available():\n RPC_AVAILABLE = True\n from torch.distributed.rpc import RRef\nfrom ..modules import Module\nfrom .replicate import replicate\nfrom .scatter_gather import scatter_kwargs, gather, is_namedtuple\nfrom .parallel_apply import parallel_apply\nfrom torch._utils import _get_device_index, _get_all_device_indices\nfrom ._functions import _get_stream\n\n\ndef _find_tensors(obj):\n r\"\"\"\n Recursively find all tensors contained in the specified object.\n \"\"\"\n if RPC_AVAILABLE and isinstance(obj, RRef):\n # If the current node is the owner of the RRef, unwrap it and try to\n # find Tensors.\n # TODO: Expand to remote RRefs.\n if obj.is_owner():\n return _find_tensors(obj.local_value())\n if isinstance(obj, torch.Tensor):\n return [obj]\n if isinstance(obj, (list, tuple)):\n return itertools.chain(*map(_find_tensors, obj))\n if isinstance(obj, dict):\n return itertools.chain(*map(_find_tensors, obj.values()))\n return []\n\ndef _dump_DDP_relevant_env_vars():\n relevant_env_vars = [\n \"RANK\",\n \"LOCAL_RANK\",\n \"WORLD_SIZE\",\n \"MASTER_PORT\",\n \"MASTER_ADDR\",\n \"CUDA_VISIBLE_DEVICES\",\n \"GLOO_SOCKET_IFNAME\",\n \"GLOO_DEVICE_TRANSPORT\",\n \"NCCL_SOCKET_IFNAME\",\n \"NCCL_BLOCKING_WAIT\",\n \"NCCL_DEBUG\",\n \"NCCL_DEBUG_SUBSYS\",\n \"NCCL_IB_DISABLE\",\n # More NCCL env vars:\n \"NCCL_P2P_DISABLE\",\n \"NCCL_P2P_LEVEL\",\n \"NCCL_SHM_DISABLE\",\n \"NCCL_SOCKET_NTHREADS\",\n \"NCCL_NSOCKS_PERTHREAD\",\n \"NCCL_BUFFSIZE\",\n \"NCCL_NTHREADS\",\n \"NCCL_RINGS\",\n \"NCCL_MAX_NCHANNELS\",\n \"NCCL_MIN_NCHANNELS\",\n \"NCCL_CHECKS_DISABLE\",\n \"NCCL_CHECK_POINTERS\",\n \"NCCL_LAUNCH_MODE\",\n \"NCCL_IB_HCA\",\n \"NCCL_IB_TIMEOUT\",\n \"NCCL_IB_RETRY_CNT\",\n \"NCCL_IB_GID_INDEX\",\n \"NCCL_IB_SL\",\n \"NCCL_IB_TC\",\n \"NCCL_IB_AR_THRESHOLD\",\n \"NCCL_IB_CUDA_SUPPORT\",\n \"NCCL_NET_GDR_LEVEL\",\n \"NCCL_NET_GDR_READ\",\n \"NCCL_SINGLE_RING_THRESHOLD\",\n \"NCCL_LL_THRESHOLD\",\n \"NCCL_TREE_THRESHOLD\",\n \"NCCL_ALGO\",\n \"NCCL_PROTO\",\n \"NCCL_IGNORE_CPU_AFFINITY\",\n \"NCCL_DEBUG_FILE\",\n \"NCCL_COLLNET_ENABLE\",\n \"NCCL_TOPO_FILE\",\n \"NCCL_TOPO_DUMP_FILE\",\n ]\n formatted_output = \"\"\n for var in relevant_env_vars:\n value = os.environ[var] if var in os.environ else \"N/A\"\n formatted_output += \"env:%s=%s\\n\" % (var, value)\n print(formatted_output)\n\n\n\nclass _DDPUnevenInputsConfig(NamedTuple):\n ddp_join_enabled: bool\n ddp_join_divide_by_initial_world_size: bool\n\n\nclass DistributedDataParallel(Module):\n r\"\"\"Implements distributed data parallelism that is based on\n ``torch.distributed`` package at the module level.\n\n This container parallelizes the application of the given module by\n splitting the input across the specified devices by chunking in the batch\n dimension. The module is replicated on each machine and each device, and\n each such replica handles a portion of the input. During the backwards\n pass, gradients from each node are averaged.\n\n The batch size should be larger than the number of GPUs used locally.\n\n See also: :ref:`distributed-basics` and :ref:`cuda-nn-ddp-instead`.\n The same constraints on input as in :class:`torch.nn.DataParallel` apply.\n\n Creation of this class requires that ``torch.distributed`` to be already\n initialized, by calling :func:`torch.distributed.init_process_group`.\n\n ``DistributedDataParallel`` is proven to be significantly faster than\n :class:`torch.nn.DataParallel` for single-node multi-GPU data\n parallel training.\n\n To use ``DistributedDataParallel`` on a host with N GPUs, you should spawn\n up ``N`` processes, ensuring that each process exclusively works on a single\n GPU from 0 to N-1. This can be done by either setting\n ``CUDA_VISIBLE_DEVICES`` for every process or by calling:\n\n >>> torch.cuda.set_device(i)\n\n where i is from 0 to N-1. In each process, you should refer the following\n to construct this module:\n\n >>> torch.distributed.init_process_group(\n >>> backend='nccl', world_size=N, init_method='...'\n >>> )\n >>> model = DistributedDataParallel(model, device_ids=[i], output_device=i)\n\n In order to spawn up multiple processes per node, you can use either\n ``torch.distributed.launch`` or ``torch.multiprocessing.spawn``.\n\n .. note ::\n Please refer to `PyTorch Distributed Overview <https://pytorch.org/tutorials/beginner/dist_overview.html>`__\n for a brief introduction to all features related to distributed training.\n\n .. note:: ``nccl`` backend is currently the fastest and highly recommended\n backend when using GPUs. This applies to both single-node and\n multi-node distributed training.\n\n .. note:: This module also supports mixed-precision distributed training.\n This means that your model can have different types of parameters such\n as mixed types of ``fp16`` and ``fp32``, the gradient reduction on these\n mixed types of parameters will just work fine.\n\n .. note:: If you use ``torch.save`` on one process to checkpoint the module,\n and ``torch.load`` on some other processes to recover it, make sure that\n ``map_location`` is configured properly for every process. Without\n ``map_location``, ``torch.load`` would recover the module to devices\n where the module was saved from.\n\n .. note:: When a model is trained on ``M`` nodes with ``batch=N``, the\n gradient will be ``M`` times smaller when compared to the same model\n trained on a single node with ``batch=M*N`` if the loss is summed (NOT\n averaged as usual) across instances in a batch (because the gradients\n between different nodes are averaged). You should take this into\n consideration when you want to obtain a mathematically equivalent\n training process compared to the local training counterpart. But in most\n cases, you can just treat a DistributedDataParallel wrapped model, a\n DataParallel wrapped model and an ordinary model on a single GPU as the\n same (E.g. using the same learning rate for equivalent batch size).\n\n .. note::\n Parameters are never broadcast between processes. The module performs\n an all-reduce step on gradients and assumes that they will be modified\n by the optimizer in all processes in the same way. Buffers\n (e.g. BatchNorm stats) are broadcast from the module in process of rank\n 0, to all other replicas in the system in every iteration.\n\n .. note::\n If you are using DistributedDataParallel in conjunction with the\n :ref:`distributed-rpc-framework`, you should always use\n :meth:`torch.distributed.autograd.backward` to compute gradients and\n :class:`torch.distributed.optim.DistributedOptimizer` for optimizing\n parameters.\n\n Example::\n\n >>> import torch.distributed.autograd as dist_autograd\n >>> from torch.nn.parallel import DistributedDataParallel as DDP\n >>> from torch import optim\n >>> from torch.distributed.optim import DistributedOptimizer\n >>> from torch.distributed.rpc import RRef\n >>>\n >>> t1 = torch.rand((3, 3), requires_grad=True)\n >>> t2 = torch.rand((3, 3), requires_grad=True)\n >>> rref = rpc.remote(\"worker1\", torch.add, args=(t1, t2))\n >>> ddp_model = DDP(my_model)\n >>>\n >>> # Setup optimizer\n >>> optimizer_params = [rref]\n >>> for param in ddp_model.parameters():\n >>> optimizer_params.append(RRef(param))\n >>>\n >>> dist_optim = DistributedOptimizer(\n >>> optim.SGD,\n >>> optimizer_params,\n >>> lr=0.05,\n >>> )\n >>>\n >>> with dist_autograd.context() as context_id:\n >>> pred = ddp_model(rref.to_here())\n >>> loss = loss_func(pred, loss)\n >>> dist_autograd.backward(context_id, loss)\n >>> dist_optim.step()\n\n .. warning::\n Constructor, forward method, and differentiation of the output (or a\n function of the output of this module) are distributed synchronization\n points. Take that into account in case different processes might be\n executing different code.\n\n .. warning::\n This module assumes all parameters are registered in the model by the\n time it is created. No parameters should be added nor removed later.\n Same applies to buffers.\n\n .. warning::\n This module assumes all parameters are registered in the model of each\n distributed processes are in the same order. The module itself will\n conduct gradient ``allreduce`` following the reverse order of the\n registered parameters of the model. In other words, it is users'\n responsibility to ensure that each distributed process has the exact\n same model and thus the exact same parameter registration order.\n\n .. warning::\n This module allows parameters with non-rowmajor-contiguous strides.\n For example, your model may contain some parameters whose\n :class:`torch.memory_format` is ``torch.contiguous_format``\n and others whose format is ``torch.channels_last``. However,\n corresponding parameters in different processes must have the\n same strides.\n\n .. warning::\n This module doesn't work with :func:`torch.autograd.grad` (i.e. it will\n only work if gradients are to be accumulated in ``.grad`` attributes of\n parameters).\n\n .. warning::\n If you plan on using this module with a ``nccl`` backend or a ``gloo``\n backend (that uses Infiniband), together with a DataLoader that uses\n multiple workers, please change the multiprocessing start method to\n ``forkserver`` (Python 3 only) or ``spawn``. Unfortunately\n Gloo (that uses Infiniband) and NCCL2 are not fork safe, and you will\n likely experience deadlocks if you don't change this setting.\n\n .. warning::\n Forward and backward hooks defined on :attr:`module` and its submodules\n won't be invoked anymore, unless the hooks are initialized in the\n :meth:`forward` method.\n\n .. warning::\n You should never try to change your model's parameters after wrapping\n up your model with ``DistributedDataParallel``. Because, when\n wrapping up your model with ``DistributedDataParallel``, the constructor\n of ``DistributedDataParallel`` will register the additional gradient\n reduction functions on all the parameters of the model itself at the\n time of construction. If you change the model's parameters afterwards,\n gradient redunction functions no longer match the correct set of\n parameters.\n\n .. warning::\n Using ``DistributedDataParallel`` in conjunction with the\n :ref:`distributed-rpc-framework` is experimental and subject to change.\n\n .. warning::\n The ``gradient_as_bucket_view`` mode does not yet work with Automatic\n Mixed Precision (AMP). AMP maintains stashed gradients that are used for\n unscaling gradients. With ``gradient_as_bucket_view=True``, these\n stashed gradients will point to communication buckets in the first\n iteration. In the next iteration, the communication buckets are mutated\n and thus these stashed gradients will be unexpectedly mutated as well,\n which might lead to wrong results.\n\n Args:\n module (Module): module to be parallelized\n device_ids (list of int or torch.device): CUDA devices. This should\n only be provided when the input module resides on a single\n CUDA device. For single-device modules, the i'th\n :attr:`module` replica is placed on ``device_ids[i]``. For\n multi-device modules and CPU modules, ``device_ids`` must be\n ``None`` or an empty list, and input data for the forward\n pass must be placed on the correct device. (default: all\n visible devices for single-device modules)\n output_device (int or torch.device): Device location of output for\n single-device CUDA modules. For multi-device modules and\n CPU modules, it must be ``None``, and the module itself\n dictates the output location. (default: ``device_ids[0]``\n for single-device modules)\n broadcast_buffers (bool): Flag that enables syncing (broadcasting)\n buffers of the module at beginning of the ``forward``\n function. (default: ``True``)\n process_group: The process group to be used for distributed data\n all-reduction. If ``None``, the default process group, which\n is created by :func:`torch.distributed.init_process_group`,\n will be used. (default: ``None``)\n bucket_cap_mb: ``DistributedDataParallel`` will bucket parameters into\n multiple buckets so that gradient reduction of each\n bucket can potentially overlap with backward computation.\n :attr:`bucket_cap_mb` controls the bucket size in\n MegaBytes (MB). (default: 25)\n find_unused_parameters (bool): Traverse the autograd graph from all\n tensors contained in the return value of the\n wrapped module's ``forward`` function. Parameters\n that don't receive gradients as part of this\n graph are preemptively marked as being ready to\n be reduced. Note that all ``forward`` outputs\n that are derived from module parameters must\n participate in calculating loss and later the\n gradient computation. If they don't, this wrapper\n will hang waiting for autograd to produce\n gradients for those parameters. Any outputs\n derived from module parameters that are otherwise\n unused can be detached from the autograd graph\n using ``torch.Tensor.detach``. (default: ``False``)\n check_reduction: This argument is deprecated.\n gradient_as_bucket_view (bool): This is a prototype feature and subject\n to changes. When set to ``True``, gradients will be views\n pointing to different offsets of ``allreduce`` communication\n buckets. This can reduce peak memory usage, where the\n saved memory size will be equal to the total gradients\n size. Moreover, it avoids the overhead of copying between\n gradients and ``allreduce`` communication buckets. When\n gradients are views, ``detach_()`` cannot be called on the\n gradients. If hitting such errors, please fix it by\n referring to the :meth:`~torch.optim.Optimizer.zero_grad`\n function in ``torch/optim/optimizer.py`` as a solution.\n\n\n Attributes:\n module (Module): the module to be parallelized.\n\n Example::\n\n >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')\n >>> net = torch.nn.parallel.DistributedDataParallel(model, pg)\n \"\"\"\n def __init__(self, module, device_ids=None,\n output_device=None, dim=0, broadcast_buffers=True,\n process_group=None,\n bucket_cap_mb=25,\n find_unused_parameters=False,\n check_reduction=False,\n gradient_as_bucket_view=False):\n\n super(DistributedDataParallel, self).__init__()\n\n assert any((p.requires_grad for p in module.parameters())), (\n \"DistributedDataParallel is not needed when a module \"\n \"doesn't have any parameter that requires a gradient.\"\n )\n\n self.is_multi_device_module = len({p.device for p in module.parameters()}) > 1\n distinct_device_types = {p.device.type for p in module.parameters()}\n assert len(distinct_device_types) == 1, (\n \"DistributedDataParallel's input module must be on \"\n \"the same type of devices, but input module parameters locate in {}.\"\n ).format(distinct_device_types)\n self.device_type = list(distinct_device_types)[0]\n\n if self.device_type == \"cpu\" or self.is_multi_device_module:\n assert not device_ids and not output_device, (\n \"DistributedDataParallel device_ids and output_device arguments \"\n \"only work with single-device GPU modules, but got \"\n \"device_ids {}, output_device {}, and module parameters {}.\"\n ).format(device_ids, output_device, {p.device for p in module.parameters()})\n\n self.device_ids = None\n self.output_device = None\n else:\n # Use all devices by default for single-device GPU modules\n if device_ids is None:\n device_ids = _get_all_device_indices()\n\n self.device_ids = [_get_device_index(x, True) for x in device_ids]\n\n if output_device is None:\n output_device = device_ids[0]\n\n self.output_device = _get_device_index(output_device, True)\n\n if process_group is None:\n self.process_group = _get_default_group()\n else:\n self.process_group = process_group\n\n self.dim = dim\n self.module = module\n self.device = list(self.module.parameters())[0].device\n self.broadcast_buffers = broadcast_buffers\n self.find_unused_parameters = find_unused_parameters\n self.require_backward_grad_sync = True\n self.require_forward_param_sync = True\n self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(\n ddp_join_enabled=False, ddp_join_divide_by_initial_world_size=False\n )\n self.gradient_as_bucket_view = gradient_as_bucket_view\n if hasattr(module, '_ddp_params_and_buffers_to_ignore'):\n self.parameters_to_ignore = module._ddp_params_and_buffers_to_ignore\n else:\n self.parameters_to_ignore = []\n\n if check_reduction:\n # This argument is no longer used since the reducer\n # will ensure reduction completes even if some parameters\n # do not receive gradients.\n warnings.warn(\n \"The `check_reduction` argument in `DistributedDataParallel` \"\n \"module is deprecated. Please avoid using it.\"\n )\n pass\n\n # Check that a module does not have Uninitialized parameters\n for param in module.parameters():\n if isinstance(param, torch.nn.parameter.UninitializedParameter):\n raise RuntimeError(\n 'Modules with uninitialized parameters can\\'t be used with `DistributedDataParallel`. '\n 'Run a dummy forward pass to correctly initialize the modules')\n # used for intra-node param sync and inter-node sync as wel\n self.broadcast_bucket_size = int(250 * 1024 * 1024)\n\n # reduction bucket size\n self.bucket_bytes_cap = int(bucket_cap_mb * 1024 * 1024)\n # Whether to perform input tensor CPU to GPU copies on a side-stream\n self.use_side_stream_for_tensor_copies = os.environ.get(\"PYTORCH_DDP_USE_SIDE_STREAM\", \"1\") == \"1\"\n\n # Sync params and buffers\n self._sync_params_and_buffers(authoritative_rank=0)\n\n self._ddp_init_helper()\n\n def _sync_params_and_buffers(self, authoritative_rank=0):\n module_states = []\n for name, param in self.module.state_dict().items():\n if name not in self.parameters_to_ignore:\n module_states.append(param)\n\n if len(module_states) > 0:\n self._distributed_broadcast_coalesced(\n module_states,\n self.broadcast_bucket_size,\n authoritative_rank)\n\n def _ddp_init_helper(self):\n \"\"\"\n Initialization helper function that does the following:\n\n (1) replicating the module from device[0] to the other devices\n (2) bucketing the parameters for reductions\n (3) resetting the bucketing states\n (4) registering the grad hooks\n (5) passing a handle of DDP to SyncBatchNorm Layer\n \"\"\"\n\n def parameters(m, recurse=True):\n def model_parameters(m):\n ps = m._former_parameters.values() \\\n if hasattr(m, \"_former_parameters\") \\\n else m.parameters(recurse=False)\n for p in ps:\n yield p\n\n for m in m.modules() if recurse else [m]:\n for p in model_parameters(m):\n yield p\n\n if self.device_ids and len(self.device_ids) > 1:\n\n warnings.warn(\n \"Single-Process Multi-GPU is not the recommended mode for \"\n \"DDP. In this mode, each DDP instance operates on multiple \"\n \"devices and creates multiple module replicas within one \"\n \"process. The overhead of scatter/gather and GIL contention \"\n \"in every forward pass can slow down training. \"\n \"Please consider using one DDP instance per device or per \"\n \"module replica by explicitly setting device_ids or \"\n \"CUDA_VISIBLE_DEVICES. \"\n )\n\n # only create replicas for single-device CUDA modules\n #\n # TODO: we don't need to replicate params in here. they're always going to\n # be broadcasted using larger blocks in broadcast_coalesced, so it might be\n # better to not pollute the caches with these small blocks\n self._module_copies = replicate(self.module, self.device_ids, detach=True)\n self._module_copies[0] = self.module\n\n for module_copy in self._module_copies[1:]:\n for param, copy_param in zip(self.module.parameters(), parameters(module_copy)):\n # Reducer requires param copies have the same strides across replicas.\n # Fixes up copy_param strides in case replicate didn't match param strides.\n if param.layout is torch.strided and param.stride() != copy_param.stride():\n with torch.no_grad():\n copy_param.set_(copy_param.clone()\n .as_strided(param.size(), param.stride())\n .copy_(copy_param))\n copy_param.requires_grad = param.requires_grad\n\n else:\n self._module_copies = [self.module]\n\n self.modules_params = [list(parameters(m)) for m in self._module_copies]\n # Collect buffers for modules, filtering out buffers that should be ignored.\n named_module_buffers = [\n [(buffer, buffer_name) for buffer_name, buffer in m.named_buffers()]\n for m in self._module_copies\n ]\n self.modules_buffers = [\n [\n buffer\n for (buffer, buffer_name) in module_buffers\n if buffer_name not in self.parameters_to_ignore\n ]\n for module_buffers in named_module_buffers\n ]\n # Build tuple of (module, parameter) for all parameters that require grads.\n if self.device_ids and len(self.device_ids) > 1:\n # Single-process multi-device mode,does not support self.parameters_to_ignore.\n if self.parameters_to_ignore:\n raise ValueError(\n \"Single-Process multi-device mode does not \"\n \"support ignoring parameters upfront. Please consider \"\n \"using one DDP instance per device.\"\n )\n\n modules_and_parameters = [\n [\n (module, parameter)\n for module in replica.modules()\n for parameter in filter(\n lambda parameter: parameter.requires_grad,\n parameters(module, recurse=False))\n ] for replica in self._module_copies]\n else:\n modules_and_parameters = [\n [\n (module, parameter)\n for module_name, module in replica.named_modules()\n for parameter in [\n param\n # Note that we access module.named_parameters instead of\n # parameters(module). parameters(module) is only needed in the\n # single-process multi device case, where it accesses replicated\n # parameters through _former_parameters.\n for param_name, param in module.named_parameters(recurse=False)\n if param.requires_grad\n and f\"{module_name}.{param_name}\" not in self.parameters_to_ignore\n ]\n ]\n for replica in self._module_copies\n ]\n\n # Build list of parameters.\n parameters = [\n list(parameter for _, parameter in replica)\n for replica in modules_and_parameters]\n\n # Checks if a module will produce a sparse gradient.\n def produces_sparse_gradient(module):\n if isinstance(module, torch.nn.Embedding):\n return module.sparse\n if isinstance(module, torch.nn.EmbeddingBag):\n return module.sparse\n return False\n\n # Build list of booleans indicating whether or not to expect sparse\n # gradients for the corresponding parameters.\n expect_sparse_gradient = [\n list(produces_sparse_gradient(module) for module, _ in replica)\n for replica in modules_and_parameters]\n\n # The bucket size limit is specified in the constructor.\n # Additionally, we allow for a single small bucket for parameters\n # that are defined first, such that their gradients don't spill into\n # a much larger bucket, adding unnecessary latency after gradient\n # computation finishes. Experiments showed 1MB is a reasonable value.\n bucket_indices = dist._compute_bucket_assignment_by_size(\n parameters[0],\n [dist._DEFAULT_FIRST_BUCKET_BYTES, self.bucket_bytes_cap],\n expect_sparse_gradient[0])\n\n # Note: reverse list of buckets because we want to approximate the\n # order in which their gradients are produced, and assume they\n # are used in the forward pass in the order they are defined.\n self.reducer = dist.Reducer(\n parameters,\n list(reversed(bucket_indices)),\n self.process_group,\n expect_sparse_gradient,\n self.bucket_bytes_cap,\n self.find_unused_parameters,\n self.gradient_as_bucket_view)\n\n # Set logging data that can be got during construction time.\n dist._set_construction_logging_data(\n self.reducer,\n self.module.__class__.__name__,\n [] if self.device_ids is None else self.device_ids,\n -1 if self.output_device is None else self.output_device,\n self.broadcast_buffers)\n\n # passing a handle to torch.nn.SyncBatchNorm layer\n self._passing_sync_batchnorm_handle(self._module_copies)\n\n def __getstate__(self):\n self._check_default_group()\n attrs = copy.copy(self.__dict__)\n del attrs['process_group']\n del attrs['reducer']\n return attrs\n\n def __setstate__(self, state):\n # If serializable, then the process group should be the default one\n self.process_group = _get_default_group()\n super(DistributedDataParallel, self).__setstate__(state)\n self.__dict__.setdefault('require_forward_param_sync', True)\n self.__dict__.setdefault('require_backward_grad_sync', True)\n self._ddp_init_helper()\n\n def _check_default_group(self):\n pickle_not_supported = False\n try:\n if self.process_group != _get_default_group():\n pickle_not_supported = True\n except RuntimeError:\n pickle_not_supported = True\n\n if pickle_not_supported:\n raise RuntimeError(\"DDP Pickling/Unpickling are only supported \"\n \"when using DDP with the default process \"\n \"group. That is, when you have called \"\n \"init_process_group and have not passed \"\n \"process_group argument to DDP constructor\")\n\n @contextmanager\n def no_sync(self):\n r\"\"\"\n A context manager to disable gradient synchronizations across DDP\n processes. Within this context, gradients will be accumulated on module\n variables, which will later be synchronized in the first\n forward-backward pass exiting the context.\n\n Example::\n\n >>> ddp = torch.nn.parallel.DistributedDataParallel(model, pg)\n >>> with ddp.no_sync():\n >>> for input in inputs:\n >>> ddp(input).backward() # no synchronization, accumulate grads\n >>> ddp(another_input).backward() # synchronize grads\n \"\"\"\n old_require_backward_grad_sync = self.require_backward_grad_sync\n self.require_backward_grad_sync = False\n try:\n yield\n finally:\n self.require_backward_grad_sync = old_require_backward_grad_sync\n\n def forward(self, *inputs, **kwargs):\n if self.ddp_uneven_inputs_config.ddp_join_enabled:\n ones = torch.ones(\n 1, device=self.device\n )\n work = dist.all_reduce(ones, group=self.process_group, async_op=True)\n self.reducer._set_forward_pass_work_handle(\n work, self.ddp_uneven_inputs_config.ddp_join_divide_by_initial_world_size\n )\n\n # Calling _rebuild_buckets before forward compuation,\n # It may allocate new buckets before deallocating old buckets\n # inside _rebuild_buckets. To save peak memory usage,\n # call _rebuild_buckets before the peak memory usage increases\n # during forward computation.\n # This should be called only once during whole training period.\n if self.reducer._rebuild_buckets():\n logging.info(\"Reducer buckets have been rebuilt in this iteration.\")\n\n if self.require_forward_param_sync:\n self._sync_params()\n\n if self.ddp_uneven_inputs_config.ddp_join_enabled:\n # Notify joined ranks whether they should sync in backwards pass or not.\n self._check_global_requires_backward_grad_sync(is_joined_rank=False)\n\n if self.device_ids:\n if len(self.device_ids) == 1:\n inputs, kwargs = self.to_kwargs(inputs, kwargs, self.device_ids[0])\n output = self.module(*inputs[0], **kwargs[0])\n else:\n inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)\n outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)\n output = self.gather(outputs, self.output_device)\n else:\n output = self.module(*inputs, **kwargs)\n\n if torch.is_grad_enabled() and self.require_backward_grad_sync:\n self.require_forward_param_sync = True\n # We'll return the output object verbatim since it is a freeform\n # object. We need to find any tensors in this object, though,\n # because we need to figure out which parameters were used during\n # this forward pass, to ensure we short circuit reduction for any\n # unused parameters. Only if `find_unused_parameters` is set.\n if self.find_unused_parameters:\n self.reducer.prepare_for_backward(list(_find_tensors(output)))\n else:\n self.reducer.prepare_for_backward([])\n else:\n self.require_forward_param_sync = False\n\n return output\n\n def scatter(self, inputs, kwargs, device_ids):\n return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)\n\n def _recursive_to(self, inputs, target_gpu):\n r\"\"\"\n Recursively moves input to the target_gpu.\n \"\"\"\n def to_map(obj):\n if isinstance(obj, torch.Tensor):\n if not self.use_side_stream_for_tensor_copies:\n return (obj.to(target_gpu), )\n else:\n # Perform CPU -> GPU copies in a background stream. This code is\n # motivated from similar logic in torch/nn/parallel/_functions.py\n stream = _get_stream(target_gpu)\n with torch.cuda.stream(stream):\n output = obj.to(target_gpu)\n # synchronize with the copy stream\n with torch.cuda.device(target_gpu):\n current_stream = torch.cuda.current_stream()\n # Sync the current stream with the copy stream\n current_stream.wait_stream(stream)\n # Ensure tensor memory is not reused until work on\n # main stream is complete\n output.record_stream(current_stream)\n return (output, )\n if is_namedtuple(obj):\n return [type(obj)(*args) for args in zip(*map(to_map, obj))]\n if isinstance(obj, tuple) and len(obj) > 0:\n return list(zip(*map(to_map, obj)))\n if isinstance(obj, list) and len(obj) > 0:\n return [list(i) for i in zip(*map(to_map, obj))]\n if isinstance(obj, dict) and len(obj) > 0:\n return [type(obj)(i) for i in zip(*map(to_map, obj.items()))]\n return [obj]\n\n # Avoid reference cycle\n try:\n res = to_map(inputs)\n finally:\n to_map = None\n return res\n\n def to_kwargs(self, inputs, kwargs, device_id):\n inputs = self._recursive_to(inputs, device_id) if inputs else []\n kwargs = self._recursive_to(kwargs, device_id) if kwargs else []\n if len(inputs) < len(kwargs):\n inputs.extend([() for _ in range(len(kwargs) - len(inputs))])\n elif len(kwargs) < len(inputs):\n kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])\n inputs = tuple(inputs)\n kwargs = tuple(kwargs)\n return inputs, kwargs\n\n def parallel_apply(self, replicas, inputs, kwargs):\n return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])\n\n def gather(self, outputs, output_device):\n return gather(outputs, output_device, dim=self.dim)\n\n def train(self, mode=True):\n super(DistributedDataParallel, self).train(mode)\n for module in self._module_copies[1:]:\n module.train(mode)\n return self\n\n def get_ddp_logging_data(self):\n return dist._get_ddp_logging_data(self.reducer)\n\n # When running in join mode, schedules an allreduce to match the one in the\n # forward pass to determine the no. of currently active processes and whether\n # all processes have joined.\n def _schedule_shadow_all_reduce_for_fwd_pass(self):\n all_active_procs = torch.zeros(\n 1, device=self.device\n )\n dist.all_reduce(all_active_procs, group=self.process_group)\n return all_active_procs.item()\n\n # When running in join mode, schedules an allreduce to notify joined ranks\n # of whether backwards pass synchronization will run this iteraton or not.\n def _check_global_requires_backward_grad_sync(self, is_joined_rank):\n if not is_joined_rank and self.require_backward_grad_sync:\n requires_sync_tensor = torch.ones(1, device=self.device)\n else:\n requires_sync_tensor = torch.zeros(1, device=self.device)\n\n work = dist.all_reduce(\n requires_sync_tensor, group=self.process_group, async_op=True\n )\n return work, requires_sync_tensor\n\n # When running in join mode, checks and performs sync of module buffers if\n # the models have buffers that should be synchronized in the forward pass.\n def _check_and_sync_module_buffers(self):\n if self.will_sync_module_buffers():\n my_rank = dist.get_rank(self.process_group)\n authoritative_rank = self._find_common_rank(my_rank, False)\n self._distributed_broadcast_coalesced(\n self.modules_buffers[0], self.broadcast_bucket_size, authoritative_rank\n )\n\n # When running in join model, agrees upon a common rank and broadcast model\n # parameters to all other ranks.\n def _sync_final_model(self, is_last_joiner):\n # Agree upon the process that will be the authoritative model copy.\n # The current rank is a candidate for being the authoritative copy if\n # is_last_joiner=True. We break ties via picking the larger rank.\n my_rank = dist.get_rank(self.process_group)\n self._authoritative_rank = self._find_common_rank(my_rank, is_last_joiner)\n self._sync_params_and_buffers(authoritative_rank=self._authoritative_rank)\n\n # Schedule allreduce ops to match those scheduled in the reducer's backward\n # pass.\n def _match_all_reduce_for_bwd_pass(self):\n allreduce_work = []\n # Schedule allreduce in the same order as Reducer schedules them, i.e.\n # the order of the buckets. Retrieving the bucket order from the reducer\n # ensures that we keep the same order in join mode, such as when bucket\n # order is rebuilt dynamically.\n all_bucket_tensors = self.reducer.get_bucket_tensors()\n for bucket_tensors in all_bucket_tensors:\n # Joined processes contribute zero gradient. In the case that\n # divide_by_initial_world_size=True, we divide grads by the static\n # world size, if not, the dividing factor is reduced by the number\n # of joined processes.\n zero_tensors = [\n torch.zeros_like(t) for t in bucket_tensors\n ]\n work = self.process_group.allreduce(zero_tensors)\n allreduce_work.append(work)\n for work in allreduce_work:\n work.wait()\n\n # Allreduces the used parameter mapping across ranks.\n def _match_unused_params_allreduce(self):\n locally_used_param_maps = self.reducer._get_local_used_maps()\n self.process_group.allreduce(locally_used_param_maps)\n\n @contextmanager\n def join(self, divide_by_initial_world_size=True, enable=True):\n r\"\"\"\n A context manager to be used in conjunction with an instance of\n :class:`torch.nn.parallel.DistributedDataParallel` to be\n able to train with uneven inputs across participating processes.\n\n This context manager will keep track of already-joined DDP processes,\n and \"shadow\" the forward and backward passes by inserting collective\n communication operations to match with the ones created by non-joined\n DDP processes. This will ensure each collective call has a corresponding\n call by already-joined DDP processes, preventing hangs or errors that\n would otherwise happen when training with uneven inputs across\n processes.\n\n Once all DDP processes have joined, the context manager will broadcast\n the model corresponding to the last joined process to all processes to\n ensure the model is the same across all processes\n (which is guaranteed by DDP).\n\n To use this to enable training with uneven inputs across processes,\n simply wrap this context manager around your training loop. No further\n modifications to the model or data loading is required.\n\n .. warning::\n This module works only with the multi-process, single-device usage\n of :class:`torch.nn.parallel.DistributedDataParallel`,\n which means that a single process works on a single GPU.\n\n .. warning::\n This module currently does not support custom distributed collective\n operations in the forward pass, such as ``SyncBatchNorm`` or other\n custom defined collectives in the model's forward pass.\n\n Args:\n divide_by_initial_world_size (bool): If ``True``, will divide\n gradients by the initial ``world_size`` DDP training was launched\n with. If ``False``, will compute the effective world size\n (number of ranks that have not depleted their inputs yet) and\n divide gradients by that during allreduce. Set\n ``divide_by_initial_world_size=True`` to ensure every input\n sample including the uneven inputs have equal weight in terms of\n how much they contribute to the global gradient. This is\n achieved by always dividing the gradient by the initial\n ``world_size`` even when we encounter uneven inputs. If you set\n this to ``False``, we divide the gradient by the remaining\n number of nodes. This ensures parity with training on a smaller\n ``world_size`` although it also means the uneven inputs would\n contribute more towards the global gradient. Typically, you\n would want to set this to ``True`` for cases where the last few\n inputs of your training job are uneven. In extreme cases, where\n there is a large discrepancy in the number of inputs, setting\n this to ``False`` might provide better results.\n enable (bool): Whether to enable uneven input detection or not. Pass\n in ``enable=False`` to disable in cases where you know that\n inputs are even across participating processes. Default is\n ``True``.\n\n\n Example::\n\n >>> import torch\n >>> import torch.distributed as dist\n >>> import os\n >>> import torch.multiprocessing as mp\n >>> import torch.nn as nn\n >>> # On each spawned worker\n >>> def worker(rank):\n >>> dist.init_process_group(\"nccl\", rank=rank, world_size=2)\n >>> torch.cuda.set_device(rank)\n >>> model = nn.Linear(1, 1, bias=False).to(rank)\n >>> model = torch.nn.parallel.DistributedDataParallel(\n >>> model, device_ids=[rank], output_device=rank\n >>> )\n >>> # Rank 1 gets one more input than rank 0.\n >>> inputs = [torch.tensor([1]).float() for _ in range(10 + rank)]\n >>> with model.join():\n >>> for _ in range(5):\n >>> for inp in inputs:\n >>> loss = model(inp).sum()\n >>> loss.backward()\n >>> # Without the join() API, the below synchronization will hang\n >>> # blocking for rank 1's allreduce to complete.\n >>> torch.cuda.synchronize(device=rank)\n \"\"\"\n try:\n if self.device_ids and len(self.device_ids) > 1:\n raise ValueError(\n \"\"\"DDP join() API does not support Single-Process Multi-GPU\n mode training. The recommended approach for DDP training is\n to spawn a single process that works on a single GPU.\"\"\"\n )\n has_error = False\n self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(\n ddp_join_enabled=enable,\n ddp_join_divide_by_initial_world_size=divide_by_initial_world_size,\n )\n yield\n except Exception as e:\n # Set to skip any processing in the finally block.\n has_error = True\n raise e\n finally:\n # Skip any processing to let the exception immediately be raised if\n # there was one.\n if enable and not has_error:\n all_procs_joined = False\n is_last_joiner = True\n i = 0\n WARN_THRESHOLD = 1000\n warnings.simplefilter(\"once\")\n while not all_procs_joined:\n if i > WARN_THRESHOLD:\n my_rank = dist.get_rank(self.process_group)\n warnings.warn(\n \"Detected uneven input skew of greater \"\n f\"than {WARN_THRESHOLD}. This means that rank {my_rank} \"\n f\"has at least {WARN_THRESHOLD} fewer inputs than \"\n \"other currently active ranks. This level of skew could \"\n \"lead to performance degradation during training.\"\n )\n # Schedules allreduce to match fwd pass allreduce in non-joined procs\n num_active_procs = self._schedule_shadow_all_reduce_for_fwd_pass()\n if num_active_procs == 0:\n all_procs_joined = True\n else:\n # Some DDP process still needs to be joined.\n if is_last_joiner:\n is_last_joiner = False\n # It will rebuild buckets only once during training period\n self.reducer._rebuild_buckets()\n # Schedule a corresponding broadcast if we are syncing module\n # buffers in the forward pass.\n self._check_and_sync_module_buffers()\n\n (\n work,\n should_sync_backwards_tensor,\n ) = self._check_global_requires_backward_grad_sync(\n is_joined_rank=True\n )\n work.wait()\n # If nonzero, then we should sync in the bwd pass.\n should_sync_backwards = should_sync_backwards_tensor.item() != 0\n # Forward param sync is disabled in the next iteration\n # if we are skipping grad sync this iteration. Hence, we\n # set require_forward_param_sync appropriately here.\n self.require_forward_param_sync = should_sync_backwards\n if not should_sync_backwards:\n continue\n # Schedules one allreduce per gradient bucket to match\n # the backwards pass allreduce.\n self._match_all_reduce_for_bwd_pass()\n # Check if we need to allreduce locally unused params.\n if self.find_unused_parameters:\n self._match_unused_params_allreduce()\n # It will push rebuilt params only once during training period\n self.reducer._push_all_rebuilt_params()\n i += 1\n\n # All procs joined. Agree on authoritative rank and broadcast the model.\n self._sync_final_model(is_last_joiner)\n\n def register_comm_hook(self, state: object, hook: callable):\n r\"\"\"\n Registers a communication hook which is an enhancement that provides a\n flexible hook to users where they can specify how DDP aggregates gradients\n across multiple workers.\n\n This hook would be very useful for researchers to try out new ideas. For\n example, this hook can be used to implement several algorithms like GossipGrad\n and gradient compression which involve different communication strategies for\n parameter syncs while running Distributed DataParallel training.\n\n Args:\n state (object): Passed to the hook to maintain any state information during the training process.\n Examples include error feedback in gradient compression,\n peers to communicate with next in GossipGrad, etc.\n\n It is locally stored by each worker\n and shared by all the gradient tensors on the worker.\n hook (callable): Averages gradient tensors across workers and defined as:\n ``hook(state: object, bucket: dist._GradBucket) -> torch.futures.Future``:\n\n This function is called once the bucket is ready. The\n hook can perform whatever processing is needed and return\n a Future indicating completion of any async work (ex: allreduce).\n If the hook doesn't perform any communication, it can also\n just return a completed Future. The Future should hold the\n new value of grad bucket's tensors. Once a bucket is ready,\n c10d reducer would call this hook and use the tensors returned\n by the Future and copy grads to individual parameters.\n\n We also provide an API called ``get_future`` to retrieve a\n Future associated with the completion of ``c10d.ProcessGroup.work``.\n\n .. warning ::\n Grad bucket's tensors will not be predivided by world_size. User is responsible\n to divide by the world_size in case of operations like allreduce.\n\n .. warning ::\n DDP communication hook can only be registered once and should be registered\n before calling backward.\n\n .. warning ::\n The Future object that hook returns should contain a result that has the same\n shape with the tensors inside grad bucket.\n\n .. warning ::\n DDP communication hook does not support single-process multiple-device mode.\n Gradbucket tensors should consist of only a single tensor.\n\n .. warning ::\n ``get_future`` API supports only NCCL backend and will return a ``torch._C.Future``\n which is an internal type and should be used with caution. It can still be used by\n ``register_comm_hook`` API, but it is subject to some subtle differences compared\n to ``torch.futures.Future``.\n\n .. warning ::\n DDP communication hook is experimental and subject to change.\n\n Example::\n Below is an example of a noop hook that returns the same tensors.\n\n >>> def noop(state: object, bucket: dist._GradBucket): -> torch.futures.Future\n >>> fut = torch.futures.Future()\n >>> fut.set_result(bucket.get_tensors())\n >>> return fut\n\n >>> ddp.register_comm_hook(state = None, hook = noop)\n\n Example::\n Below is an example of a Parallel SGD algorithm where gradients are encoded before\n allreduce, and then decoded after allreduce.\n\n >>> def encode_and_decode(state: object, bucket: dist._GradBucket): -> torch.futures.Future\n >>> tensors = [t / process_group.world_size for t in bucket.get_tensors()]\n >>> encoded_tensors = encode(tensors) # encode gradients\n >>> fut = process_group.allreduce(encoded_tensors).get_future()\n >>> # Define the then callback to decode.\n >>> def decode(fut):\n >>> decoded_tensors = decode(fut.value()) # decode gradients\n >>> return decoded_tensors\n >>> return fut.then(decode)\n\n >>> ddp.register_comm_hook(state = None, hook = encode_and_decode)\n \"\"\"\n self._check_comm_hook(hook)\n dist._register_comm_hook(self.reducer, state, hook)\n\n def _register_builtin_comm_hook(\n self, comm_hook_type\n ):\n r\"\"\"\n Registers a built-in communication hook that specifies how DDP\n aggregates gradients across multiple workers.\n The built-in hooks aim to provide efficient C++ implementations for certain hooks,\n which might not be as efficient if implemented in Python using a Python communication hook.\n\n Args:\n comm_hook_type (dist.BuiltinCommHookType): type of communication hook, such as\n ALLREDUCE, FP16_COMPRESS, etc.\n\n .. warning ::\n DDP communication hook can only be registered once and should be registered\n before calling backward.\n\n .. warning ::\n DDP communication hook does not support single-process multiple-device mode.\n Gradbucket tensors should consist of only a single tensor.\n\n .. warning ::\n DDP communication hook is experimental and subject to change.\n\n Example::\n Below is an example of a FP16 compression where gradients are\n compressed into 16-bit floating-point numbers before allreduce, and\n then decompressed after allreduce.\n\n >>> ddp._register_builtin_comm_hook(dist.BuiltinCommHookType.FP16_COMPRESS)\n\n \"\"\"\n dist._register_builtin_comm_hook(self.reducer, comm_hook_type)\n\n def _distributed_broadcast_coalesced(\n self, tensors, buffer_size, authoritative_rank=0\n ):\n dist._broadcast_coalesced(\n self.process_group, tensors, buffer_size, authoritative_rank\n )\n\n def will_sync_module_buffers(self):\n return (\n self.require_forward_param_sync\n and self.broadcast_buffers\n and len(self.modules_buffers[0]) > 0\n )\n\n def _find_common_rank(self, input_rank, rank_cond):\n # -1 indicates that this rank is not under consideration to be the\n # common_rank\n rank_to_use = torch.tensor(\n [input_rank if rank_cond else -1],\n device=self.device,\n )\n dist.all_reduce(rank_to_use, op=ReduceOp.MAX, group=self.process_group)\n if rank_to_use.item() == -1:\n raise ValueError(\n \"BUG! Expected rank_cond to be true for at least one process.\"\n )\n return rank_to_use.item()\n\n def _sync_params(self):\n with torch.no_grad():\n # only do intra-node parameters sync for replicated single-device\n # CUDA modules\n if self.device_ids and len(self.device_ids) > 1:\n # intra-node parameter sync\n result = comm.broadcast_coalesced(\n self.modules_params[0],\n self.device_ids,\n self.broadcast_bucket_size)\n for tensors, module_params in zip(result[1:],\n self.modules_params[1:]):\n for tensor, param in zip(tensors, module_params):\n # Formerly, this spot used param.set_(tensor) to steal tensor's\n # data without a deep copy. Unfortunately, that wiped out the\n # allreduce hook attached to param's AccumulateGrad function,\n # likely causing https://github.com/pytorch/pytorch/issues/37079.\n # TODO: If set_ becomes safe to use here, use set_.\n # Otherwise, find another way to steal tensor's data.\n param.copy_(tensor)\n # Assume we have just run the optimizer and zeroed the\n # grads of the parameters on the root model. We need\n # to zero the grads on all model replicas as well.\n # This snippet is copied from torch.optim.Optimizer.\n if param.grad is not None:\n if param.grad.grad_fn is not None:\n param.grad.detach_()\n else:\n param.grad.requires_grad_(False)\n param.grad.zero_()\n\n # module buffer sync\n if self.will_sync_module_buffers():\n # Synchronize buffers across processes.\n # If we are running DDP with the join manager, we have to agree\n # upon a rank to sync module buffers from, since rank 0 may\n # already have been joined and have stale module buffers.\n if self.ddp_uneven_inputs_config.ddp_join_enabled:\n authoritative_rank = self._find_common_rank(dist.get_rank(), True)\n else:\n # The process with rank 0 is considered the authoritative copy.\n authoritative_rank = 0\n self._distributed_broadcast_coalesced(\n self.modules_buffers[0],\n self.broadcast_bucket_size,\n authoritative_rank,\n )\n # only do intra-node buffer sync for replicated single-device\n # CUDA modules\n if self.device_ids and len(self.device_ids) > 1:\n # intra-node buffer sync\n result = comm.broadcast_coalesced(\n self.modules_buffers[0],\n self.device_ids,\n self.broadcast_bucket_size)\n for tensors, module_buffers in zip(result[1:],\n self.modules_buffers[1:]):\n for tensor, buffer in zip(tensors, module_buffers):\n buffer.set_(tensor)\n\n def _passing_sync_batchnorm_handle(self, module_copies):\n for dev_idx, module in enumerate(module_copies):\n for layer in module.modules():\n if isinstance(layer, torch.nn.modules.SyncBatchNorm):\n assert self.device_type != 'cpu', \"SyncBatchNorm layers only work with GPU modules\"\n layer._specify_ddp_gpu_num(\n len(self.device_ids) if self.device_ids else 1)\n\n def _check_comm_hook(self, hook):\n if not callable(hook):\n raise TypeError(\"Communication hook must be callable.\")\n\n sig = inspect.signature(hook)\n if (\n sig.parameters[\"bucket\"].annotation != inspect._empty\n and sig.parameters[\"bucket\"].annotation != dist._GradBucket\n ):\n raise ValueError(\n \"Communication hook: bucket annotation should be dist._GradBucket.\"\n )\n\n if sig.return_annotation != inspect._empty and (\n sig.return_annotation != torch.futures.Future\n and sig.return_annotation != torch._C.Future\n ):\n raise ValueError(\n \"Communication hook: return annotation should be torch.futures.Future or torch._C.Future.\"\n )\n\n @staticmethod\n def _set_params_and_buffers_to_ignore_for_model(\n module, params_and_buffers_to_ignore\n ):\n # This is a workaround to set parameters and buffers DDP should ignore\n # during synchronization. It will be removed when the API is finalized\n # as part of addressing https://github.com/pytorch/pytorch/issues/43690.\n module._ddp_params_and_buffers_to_ignore = params_and_buffers_to_ignore\n", "import math\nimport warnings\n\nfrom torch import Tensor\nimport torch\n\n\n# These no_grad_* functions are necessary as wrappers around the parts of these\n# functions that use `with torch.no_grad()`. The JIT doesn't support context\n# managers, so these need to be implemented as builtins. Using these wrappers\n# lets us keep those builtins small and re-usable.\ndef _no_grad_uniform_(tensor, a, b):\n with torch.no_grad():\n return tensor.uniform_(a, b)\n\n\ndef _no_grad_normal_(tensor, mean, std):\n with torch.no_grad():\n return tensor.normal_(mean, std)\n\n\ndef _no_grad_trunc_normal_(tensor, mean, std, a, b):\n # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n def norm_cdf(x):\n # Computes standard normal cumulative distribution function\n return (1. + math.erf(x / math.sqrt(2.))) / 2.\n\n if (mean < a - 2 * std) or (mean > b + 2 * std):\n warnings.warn(\"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. \"\n \"The distribution of values may be incorrect.\",\n stacklevel=2)\n\n with torch.no_grad():\n # Values are generated by using a truncated uniform distribution and\n # then using the inverse CDF for the normal distribution.\n # Get upper and lower cdf values\n l = norm_cdf((a - mean) / std)\n u = norm_cdf((b - mean) / std)\n\n # Uniformly fill tensor with values from [l, u], then translate to\n # [2l-1, 2u-1].\n tensor.uniform_(2 * l - 1, 2 * u - 1)\n\n # Use inverse cdf transform for normal distribution to get truncated\n # standard normal\n tensor.erfinv_()\n\n # Transform to proper mean, std\n tensor.mul_(std * math.sqrt(2.))\n tensor.add_(mean)\n\n # Clamp to ensure it's in the proper range\n tensor.clamp_(min=a, max=b)\n return tensor\n\n\ndef _no_grad_fill_(tensor, val):\n with torch.no_grad():\n return tensor.fill_(val)\n\n\ndef _no_grad_zero_(tensor):\n with torch.no_grad():\n return tensor.zero_()\n\n\ndef calculate_gain(nonlinearity, param=None):\n r\"\"\"Return the recommended gain value for the given nonlinearity function.\n The values are as follows:\n\n ================= ====================================================\n nonlinearity gain\n ================= ====================================================\n Linear / Identity :math:`1`\n Conv{1,2,3}D :math:`1`\n Sigmoid :math:`1`\n Tanh :math:`\\frac{5}{3}`\n ReLU :math:`\\sqrt{2}`\n Leaky Relu :math:`\\sqrt{\\frac{2}{1 + \\text{negative\\_slope}^2}}`\n SELU :math:`\\frac{3}{4}`\n ================= ====================================================\n\n Args:\n nonlinearity: the non-linear function (`nn.functional` name)\n param: optional parameter for the non-linear function\n\n Examples:\n >>> gain = nn.init.calculate_gain('leaky_relu', 0.2) # leaky_relu with negative_slope=0.2\n \"\"\"\n linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']\n if nonlinearity in linear_fns or nonlinearity == 'sigmoid':\n return 1\n elif nonlinearity == 'tanh':\n return 5.0 / 3\n elif nonlinearity == 'relu':\n return math.sqrt(2.0)\n elif nonlinearity == 'leaky_relu':\n if param is None:\n negative_slope = 0.01\n elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):\n # True/False are instances of int, hence check above\n negative_slope = param\n else:\n raise ValueError(\"negative_slope {} not a valid number\".format(param))\n return math.sqrt(2.0 / (1 + negative_slope ** 2))\n elif nonlinearity == 'selu':\n return 3.0 / 4 # Value found empirically (https://github.com/pytorch/pytorch/pull/50664)\n else:\n raise ValueError(\"Unsupported nonlinearity {}\".format(nonlinearity))\n\n\ndef uniform_(tensor: Tensor, a: float = 0., b: float = 1.) -> Tensor:\n r\"\"\"Fills the input Tensor with values drawn from the uniform\n distribution :math:`\\mathcal{U}(a, b)`.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n a: the lower bound of the uniform distribution\n b: the upper bound of the uniform distribution\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.uniform_(w)\n \"\"\"\n return _no_grad_uniform_(tensor, a, b)\n\n\ndef normal_(tensor: Tensor, mean: float = 0., std: float = 1.) -> Tensor:\n r\"\"\"Fills the input Tensor with values drawn from the normal\n distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n mean: the mean of the normal distribution\n std: the standard deviation of the normal distribution\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.normal_(w)\n \"\"\"\n return _no_grad_normal_(tensor, mean, std)\n\ndef trunc_normal_(tensor: Tensor, mean: float = 0., std: float = 1., a: float = -2., b: float = 2.) -> Tensor:\n r\"\"\"Fills the input Tensor with values drawn from a truncated\n normal distribution. The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n with values outside :math:`[a, b]` redrawn until they are within\n the bounds. The method used for generating the random values works\n best when :math:`a \\leq \\text{mean} \\leq b`.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n mean: the mean of the normal distribution\n std: the standard deviation of the normal distribution\n a: the minimum cutoff value\n b: the maximum cutoff value\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.trunc_normal_(w)\n \"\"\"\n return _no_grad_trunc_normal_(tensor, mean, std, a, b)\n\n\ndef constant_(tensor: Tensor, val: float) -> Tensor:\n r\"\"\"Fills the input Tensor with the value :math:`\\text{val}`.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n val: the value to fill the tensor with\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.constant_(w, 0.3)\n \"\"\"\n return _no_grad_fill_(tensor, val)\n\n\ndef ones_(tensor: Tensor) -> Tensor:\n r\"\"\"Fills the input Tensor with the scalar value `1`.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.ones_(w)\n \"\"\"\n return _no_grad_fill_(tensor, 1.)\n\n\ndef zeros_(tensor: Tensor) -> Tensor:\n r\"\"\"Fills the input Tensor with the scalar value `0`.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.zeros_(w)\n \"\"\"\n return _no_grad_zero_(tensor)\n\n\ndef eye_(tensor):\n r\"\"\"Fills the 2-dimensional input `Tensor` with the identity\n matrix. Preserves the identity of the inputs in `Linear` layers, where as\n many inputs are preserved as possible.\n\n Args:\n tensor: a 2-dimensional `torch.Tensor`\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.eye_(w)\n \"\"\"\n if tensor.ndimension() != 2:\n raise ValueError(\"Only tensors with 2 dimensions are supported\")\n\n with torch.no_grad():\n torch.eye(*tensor.shape, out=tensor, requires_grad=tensor.requires_grad)\n return tensor\n\n\ndef dirac_(tensor, groups=1):\n r\"\"\"Fills the {3, 4, 5}-dimensional input `Tensor` with the Dirac\n delta function. Preserves the identity of the inputs in `Convolutional`\n layers, where as many input channels are preserved as possible. In case\n of groups>1, each group of channels preserves identity\n\n Args:\n tensor: a {3, 4, 5}-dimensional `torch.Tensor`\n groups (optional): number of groups in the conv layer (default: 1)\n Examples:\n >>> w = torch.empty(3, 16, 5, 5)\n >>> nn.init.dirac_(w)\n >>> w = torch.empty(3, 24, 5, 5)\n >>> nn.init.dirac_(w, 3)\n \"\"\"\n dimensions = tensor.ndimension()\n if dimensions not in [3, 4, 5]:\n raise ValueError(\"Only tensors with 3, 4, or 5 dimensions are supported\")\n\n sizes = tensor.size()\n\n if sizes[0] % groups != 0:\n raise ValueError('dim 0 must be divisible by groups')\n\n out_chans_per_grp = sizes[0] // groups\n min_dim = min(out_chans_per_grp, sizes[1])\n\n with torch.no_grad():\n tensor.zero_()\n\n for g in range(groups):\n for d in range(min_dim):\n if dimensions == 3: # Temporal convolution\n tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2] = 1\n elif dimensions == 4: # Spatial convolution\n tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2,\n tensor.size(3) // 2] = 1\n else: # Volumetric convolution\n tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2,\n tensor.size(3) // 2, tensor.size(4) // 2] = 1\n return tensor\n\n\ndef _calculate_fan_in_and_fan_out(tensor):\n dimensions = tensor.dim()\n if dimensions < 2:\n raise ValueError(\"Fan in and fan out can not be computed for tensor with fewer than 2 dimensions\")\n\n num_input_fmaps = tensor.size(1)\n num_output_fmaps = tensor.size(0)\n receptive_field_size = 1\n if tensor.dim() > 2:\n receptive_field_size = tensor[0][0].numel()\n fan_in = num_input_fmaps * receptive_field_size\n fan_out = num_output_fmaps * receptive_field_size\n\n return fan_in, fan_out\n\n\ndef xavier_uniform_(tensor: Tensor, gain: float = 1.) -> Tensor:\n r\"\"\"Fills the input `Tensor` with values according to the method\n described in `Understanding the difficulty of training deep feedforward\n neural networks` - Glorot, X. & Bengio, Y. (2010), using a uniform\n distribution. The resulting tensor will have values sampled from\n :math:`\\mathcal{U}(-a, a)` where\n\n .. math::\n a = \\text{gain} \\times \\sqrt{\\frac{6}{\\text{fan\\_in} + \\text{fan\\_out}}}\n\n Also known as Glorot initialization.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n gain: an optional scaling factor\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu'))\n \"\"\"\n fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)\n std = gain * math.sqrt(2.0 / float(fan_in + fan_out))\n a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation\n\n return _no_grad_uniform_(tensor, -a, a)\n\n\ndef xavier_normal_(tensor: Tensor, gain: float = 1.) -> Tensor:\n r\"\"\"Fills the input `Tensor` with values according to the method\n described in `Understanding the difficulty of training deep feedforward\n neural networks` - Glorot, X. & Bengio, Y. (2010), using a normal\n distribution. The resulting tensor will have values sampled from\n :math:`\\mathcal{N}(0, \\text{std}^2)` where\n\n .. math::\n \\text{std} = \\text{gain} \\times \\sqrt{\\frac{2}{\\text{fan\\_in} + \\text{fan\\_out}}}\n\n Also known as Glorot initialization.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n gain: an optional scaling factor\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.xavier_normal_(w)\n \"\"\"\n fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)\n std = gain * math.sqrt(2.0 / float(fan_in + fan_out))\n\n return _no_grad_normal_(tensor, 0., std)\n\n\ndef _calculate_correct_fan(tensor, mode):\n mode = mode.lower()\n valid_modes = ['fan_in', 'fan_out']\n if mode not in valid_modes:\n raise ValueError(\"Mode {} not supported, please use one of {}\".format(mode, valid_modes))\n\n fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)\n return fan_in if mode == 'fan_in' else fan_out\n\n\ndef kaiming_uniform_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):\n r\"\"\"Fills the input `Tensor` with values according to the method\n described in `Delving deep into rectifiers: Surpassing human-level\n performance on ImageNet classification` - He, K. et al. (2015), using a\n uniform distribution. The resulting tensor will have values sampled from\n :math:`\\mathcal{U}(-\\text{bound}, \\text{bound})` where\n\n .. math::\n \\text{bound} = \\text{gain} \\times \\sqrt{\\frac{3}{\\text{fan\\_mode}}}\n\n Also known as He initialization.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n a: the negative slope of the rectifier used after this layer (only\n used with ``'leaky_relu'``)\n mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``\n preserves the magnitude of the variance of the weights in the\n forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the\n backwards pass.\n nonlinearity: the non-linear function (`nn.functional` name),\n recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.kaiming_uniform_(w, mode='fan_in', nonlinearity='relu')\n \"\"\"\n fan = _calculate_correct_fan(tensor, mode)\n gain = calculate_gain(nonlinearity, a)\n std = gain / math.sqrt(fan)\n bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation\n with torch.no_grad():\n return tensor.uniform_(-bound, bound)\n\n\ndef kaiming_normal_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):\n r\"\"\"Fills the input `Tensor` with values according to the method\n described in `Delving deep into rectifiers: Surpassing human-level\n performance on ImageNet classification` - He, K. et al. (2015), using a\n normal distribution. The resulting tensor will have values sampled from\n :math:`\\mathcal{N}(0, \\text{std}^2)` where\n\n .. math::\n \\text{std} = \\frac{\\text{gain}}{\\sqrt{\\text{fan\\_mode}}}\n\n Also known as He initialization.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n a: the negative slope of the rectifier used after this layer (only\n used with ``'leaky_relu'``)\n mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``\n preserves the magnitude of the variance of the weights in the\n forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the\n backwards pass.\n nonlinearity: the non-linear function (`nn.functional` name),\n recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu')\n \"\"\"\n fan = _calculate_correct_fan(tensor, mode)\n gain = calculate_gain(nonlinearity, a)\n std = gain / math.sqrt(fan)\n with torch.no_grad():\n return tensor.normal_(0, std)\n\n\ndef orthogonal_(tensor, gain=1):\n r\"\"\"Fills the input `Tensor` with a (semi) orthogonal matrix, as\n described in `Exact solutions to the nonlinear dynamics of learning in deep\n linear neural networks` - Saxe, A. et al. (2013). The input tensor must have\n at least 2 dimensions, and for tensors with more than 2 dimensions the\n trailing dimensions are flattened.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`, where :math:`n \\geq 2`\n gain: optional scaling factor\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.orthogonal_(w)\n \"\"\"\n if tensor.ndimension() < 2:\n raise ValueError(\"Only tensors with 2 or more dimensions are supported\")\n\n rows = tensor.size(0)\n cols = tensor.numel() // rows\n flattened = tensor.new(rows, cols).normal_(0, 1)\n\n if rows < cols:\n flattened.t_()\n\n # Compute the qr factorization\n q, r = torch.qr(flattened)\n # Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf\n d = torch.diag(r, 0)\n ph = d.sign()\n q *= ph\n\n if rows < cols:\n q.t_()\n\n with torch.no_grad():\n tensor.view_as(q).copy_(q)\n tensor.mul_(gain)\n return tensor\n\n\ndef sparse_(tensor, sparsity, std=0.01):\n r\"\"\"Fills the 2D input `Tensor` as a sparse matrix, where the\n non-zero elements will be drawn from the normal distribution\n :math:`\\mathcal{N}(0, 0.01)`, as described in `Deep learning via\n Hessian-free optimization` - Martens, J. (2010).\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n sparsity: The fraction of elements in each column to be set to zero\n std: the standard deviation of the normal distribution used to generate\n the non-zero values\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.sparse_(w, sparsity=0.1)\n \"\"\"\n if tensor.ndimension() != 2:\n raise ValueError(\"Only tensors with 2 dimensions are supported\")\n\n rows, cols = tensor.shape\n num_zeros = int(math.ceil(sparsity * rows))\n\n with torch.no_grad():\n tensor.normal_(0, std)\n for col_idx in range(cols):\n row_indices = torch.randperm(rows)\n zero_indices = row_indices[:num_zeros]\n tensor[zero_indices, col_idx] = 0\n return tensor\n\n\n# for backward compatibility\ndef _make_deprecate(meth):\n new_name = meth.__name__\n old_name = new_name[:-1]\n\n def deprecated_init(*args, **kwargs):\n warnings.warn(\"nn.init.{} is now deprecated in favor of nn.init.{}.\"\n .format(old_name, new_name), stacklevel=2)\n return meth(*args, **kwargs)\n\n deprecated_init.__doc__ = r\"\"\"\n {old_name}(...)\n\n .. warning::\n This method is now deprecated in favor of :func:`torch.nn.init.{new_name}`.\n\n See :func:`~torch.nn.init.{new_name}` for details.\"\"\".format(\n old_name=old_name, new_name=new_name)\n deprecated_init.__name__ = old_name\n return deprecated_init\n\n\nuniform = _make_deprecate(uniform_)\nnormal = _make_deprecate(normal_)\nconstant = _make_deprecate(constant_)\neye = _make_deprecate(eye_)\ndirac = _make_deprecate(dirac_)\nxavier_uniform = _make_deprecate(xavier_uniform_)\nxavier_normal = _make_deprecate(xavier_normal_)\nkaiming_uniform = _make_deprecate(kaiming_uniform_)\nkaiming_normal = _make_deprecate(kaiming_normal_)\northogonal = _make_deprecate(orthogonal_)\nsparse = _make_deprecate(sparse_)\n" ]
[ [ "torch.utils.benchmark.utils.cpp_jit.compile_timeit_template", "torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.wrapper_singleton", "torch.cuda.synchronize", "torch.utils.benchmark.utils.common.set_torch_threads", "torch.utils.benchmark.utils.common.Measurement", "torch.utils.benchmark.utils.common.TaskSpec", "torch.cuda.is_available", "torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.CopyIfCallgrind.unwrap_all" ], [ "numpy.zeros", "numpy.random.rand", "numpy.transpose" ], [ "torch.distributed._register_comm_hook", "torch.zeros", "torch.distributed.distributed_c10d._get_default_group", "torch.distributed._set_construction_logging_data", "torch.no_grad", "torch.cuda.stream", "torch.distributed.get_rank", "torch.is_grad_enabled", "torch.distributed._broadcast_coalesced", "torch.distributed.rpc.is_available", "torch.ones", "torch.tensor", "torch.distributed._compute_bucket_assignment_by_size", "torch.distributed._register_builtin_comm_hook", "torch.distributed._get_ddp_logging_data", "torch.cuda.current_stream", "torch.zeros_like", "torch.distributed.is_available", "torch._utils._get_all_device_indices", "torch.cuda.device", "torch._utils._get_device_index", "torch.distributed.all_reduce" ], [ "torch.randperm", "torch.eye", "torch.qr", "torch.no_grad", "torch.diag" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mohammedshariqnawaz/Pedestron
[ "9785feb94f00e07ae24a662525b4678f12d0fdc8" ]
[ "mmdet/models/detectors/csp.py" ]
[ "\nfrom .single_stage import SingleStageDetector\nfrom ..registry import DETECTORS\nfrom mmdet.core import bbox2result\nimport torch.nn as nn\nimport torch\nfrom .. import builder\nimport numpy as np\nimport cv2\nfrom mmdet.core import bbox2roi, bbox2result, build_assigner, build_sampler\n\[email protected]_module\nclass CSP(SingleStageDetector):\n\n def __init__(self,\n backbone,\n neck,\n bbox_head,\n refine_roi_extractor=None,\n refine_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n detached=True,\n return_feature_maps=False):\n super(CSP, self).__init__(backbone, neck, bbox_head, train_cfg,\n test_cfg, pretrained)\n if refine_head is not None:\n self.refine_roi_extractor = builder.build_roi_extractor(\n refine_roi_extractor)\n self.refine_head = builder.build_head(refine_head)\n self.return_feature_maps = return_feature_maps\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n self.detached = detached\n\n def show_input_debug(self, img, classification_maps, scale_maps, offset_maps):\n img_numpy = img.cpu().numpy().copy()[0]\n # img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]\n img_numpy = np.transpose(img_numpy, [1, 2, 0]) + [102.9801, 115.9465, 122.7717]\n img_numpy = img_numpy[:, :, ::-1]\n img_numpy = img_numpy.astype(np.uint8)\n strides = [8, 16, 32, 64, 128]\n img_nows = []\n for i, stride in enumerate(strides):\n img_now = img_numpy.copy()\n # cls_numpy = classification_maps[0][i].cpu().numpy().copy()[0][2]\n cls_numpy = classification_maps[0][i].cpu().numpy().copy()[0][:80]\n scale_numpy = scale_maps[0][i].cpu().numpy().copy()[0][0] * stride\n offset_numpy = offset_maps[0][i].cpu().numpy().copy()[0][:2]\n cs, ys, xs = cls_numpy.nonzero()\n print(len(ys))\n for c, x, y in zip(cs, xs, ys):\n cv2.imshow(str(c), classification_maps[0][i].cpu().numpy().copy()[0][80+c])\n realx = x\n realy = y\n height = scale_numpy[y, x]\n realy = realy + 0.5 + offset_numpy[0][y, x]\n realx = realx + 0.5 + offset_numpy[1][y, x]\n realy = realy * stride\n realx = realx * stride\n top_y = int(realy - height/2)\n top_x = int(realx)\n down_y = int(realy + height/2)\n down_x = int(realx)\n top_left = (int(top_x - height * 0.1), int(top_y))\n down_right = (int(down_x + height * 0.1), down_y)\n cv2.rectangle(img_now, top_left, down_right, (255, 255, 5*int(c)), 2)\n img_nows.append(img_now)\n cv2.imshow(str(i) +'img', img_now)\n cv2.waitKey(0)\n\n def show_input_debug_caltech(self, img, classification_maps, scale_maps, offset_maps):\n for j in range(img.shape[0]):\n img_numpy = img.cpu().numpy().copy()[j]\n img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]\n img_numpy = img_numpy[:, :, ::-1]\n img_numpy = img_numpy.astype(np.uint8)\n strides = [4]\n img_nows = []\n for i, stride in enumerate(strides):\n img_now = img_numpy.copy()\n cls_numpy = classification_maps[j][i].cpu().numpy().copy()[0][2]\n ignore_numpy = classification_maps[j][i].cpu().numpy().copy()[0][1]\n cv2.imshow('ignore', ignore_numpy)\n scale_numpy = scale_maps[j][i].cpu().numpy().copy()[0][0] * stride\n offset_numpy = offset_maps[j][i].cpu().numpy().copy()[0][:2]\n ys, xs = cls_numpy.nonzero()\n print(len(ys))\n for x, y in zip(xs, ys):\n # cv2.imshow(str(c), classification_maps[j][i].cpu().numpy().copy()[0][c])\n realx = x\n realy = y\n height = scale_numpy[y, x]\n realy = realy + 0.5 + offset_numpy[0][y, x]\n realx = realx + 0.5 + offset_numpy[1][y, x]\n realy = realy * stride\n realx = realx * stride\n top_y = int(realy - height/2)\n top_x = int(realx)\n down_y = int(realy + height/2)\n down_x = int(realx)\n top_left = (int(top_x - height * 0.1), int(top_y))\n down_right = (int(down_x + height * 0.1), down_y)\n cv2.rectangle(img_now, top_left, down_right, (255, 255, 125), 2)\n img_nows.append(img_now)\n cv2.imshow(str(i) +'img', img_now)\n cv2.waitKey(0)\n\n def show_input_debug_head(self, img, classification_maps, scale_maps, offset_maps):\n for j in range(img.shape[0]):\n img_numpy = img.cpu().numpy().copy()[j]\n img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]\n img_numpy = img_numpy[:, :, ::-1]\n img_numpy = img_numpy.astype(np.uint8)\n strides = [4]\n img_nows = []\n for i, stride in enumerate(strides):\n img_now = img_numpy.copy()\n cls_numpy = classification_maps[j][i].cpu().numpy().copy()[0][2]\n ignore_numpy = classification_maps[j][i].cpu().numpy().copy()[0][1]\n cv2.imshow('ignore', ignore_numpy)\n scale_numpy = scale_maps[j][i].exp().cpu().numpy().copy()[0][0] * stride\n offset_numpy = offset_maps[j][i].cpu().numpy().copy()[0][:2]\n ys, xs = cls_numpy.nonzero()\n for x, y in zip(xs, ys):\n # cv2.imshow(str(c), classification_maps[j][i].cpu().numpy().copy()[0][c])\n realx = x\n realy = y\n height = scale_numpy[y, x]\n realy = realy + 0.5 + offset_numpy[0][y, x]\n realx = realx + 0.5 + offset_numpy[1][y, x]\n realy = realy * stride\n realx = realx * stride\n top_y = int(realy)\n top_x = int(realx)\n down_y = int(realy + height)\n down_x = int(realx)\n top_left = (int(top_x - height * 0.41/2), int(top_y))\n down_right = (int(down_x + height * 0.41/2), down_y)\n cv2.rectangle(img_now, top_left, down_right, (255, 255, 125), 2)\n img_nows.append(img_now)\n cv2.imshow(str(i) +'img', img_now)\n cv2.waitKey(0)\n\n def show_mot_input_debug(self, img, classification_maps, scale_maps, offset_maps):\n for j in range(img.shape[0]):\n img_numpy = img.cpu().numpy().copy()[j]\n img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]\n # img_numpy = np.transpose(img_numpy, [1, 2, 0]) + [102.9801, 115.9465, 122.7717]\n img_numpy = img_numpy[:, :, ::-1]\n img_numpy = img_numpy.astype(np.uint8)\n strides = [4]\n img_nows = []\n for i, stride in enumerate(strides):\n img_now = img_numpy.copy()\n # cls_numpy = classification_maps[0][i].cpu().numpy().copy()[0][2]\n cls_numpy = classification_maps[j][i].cpu().numpy().copy()[0][2]\n instance_numpy = classification_maps[j][i].cpu().numpy().copy()[0][3]\n scale_numpy = scale_maps[j][i].cpu().numpy().copy()[0][0] * stride\n offset_numpy = offset_maps[j][i].cpu().numpy().copy()[0][:2]\n ys, xs = cls_numpy.nonzero()\n for x, y in zip(xs, ys):\n c=0\n cv2.imshow(str(c), classification_maps[j][i].cpu().numpy().copy()[0][2])\n realx = x\n realy = y\n height = scale_numpy[y, x]\n realy = realy + 0.5 + offset_numpy[0][y, x]\n realx = realx + 0.5 + offset_numpy[1][y, x]\n realy = realy * stride\n realx = realx * stride\n top_y = int(realy - height/2)\n top_x = int(realx)\n down_y = int(realy + height/2)\n down_x = int(realx)\n top_left = (int(top_x - height * 0.1), int(top_y))\n down_right = (int(down_x + height * 0.1), down_y)\n cv2.rectangle(img_now, top_left, down_right, (255, 255, 5*int(c)), 2)\n instance = instance_numpy[y, x]\n cv2.putText(img_now, str(instance), top_left, cv2.FONT_HERSHEY_COMPLEX, 1, 255)\n img_nows.append(img_now)\n cv2.imshow(str(i) +'img', img_now)\n cv2.waitKey(0)\n\n @property\n def refine(self):\n return hasattr(self, 'refine_head') and self.refine_head is not None\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n classification_maps=None,\n scale_maps=None,\n offset_maps=None):\n # for tracking data which batch is produced by dataset instead of data loader\n if type(img) == list:\n img=img[0]\n img_metas=img_metas[0]\n gt_bboxes=gt_bboxes[0]\n gt_labels=gt_labels[0]\n gt_bboxes_ignore = gt_bboxes_ignore[0]\n classification_maps = classification_maps[0]\n scale_maps = scale_maps[0]\n offset_maps = offset_maps[0]\n\n losses = dict()\n x = self.extract_feat(img)\n # self.show_input_debug(img, classification_maps, scale_maps, offset_maps)\n # self.show_input_debug_caltech(img, classification_maps, scale_maps, offset_maps)\n # self.show_mot_input_debug(img, classification_maps, scale_maps, offset_maps)\n # self.show_input_debug_head(img, classification_maps, scale_maps, offset_maps)\n\n outs = self.bbox_head(x)\n loss_inputs = outs + (gt_bboxes, gt_labels, classification_maps, scale_maps, offset_maps, img_metas, self.train_cfg.csp_head if self.refine else self.train_cfg)\n losses_bbox = self.bbox_head.loss(\n *loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)\n losses.update(losses_bbox)\n \n if self.refine:\n if self.detached:\n x = tuple([i.detach() for i in x])\n bbox_inputs = outs + (img_metas, self.train_cfg.csp_head, False)\n bbox_list = self.bbox_head.get_bboxes(*bbox_inputs, no_strides=False) # no_strides to not upscale yet\n \n bbox_list = [\n bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)[0]\n for det_bboxes, det_labels in bbox_list\n ]\n\n bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)\n bbox_sampler = build_sampler(\n self.train_cfg.rcnn.sampler, context=self)\n num_imgs = img.size(0)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n sampling_results = []\n \n for i in range(num_imgs):\n if bbox_list[i].shape[0] == 0 or gt_bboxes[i].shape[0] == 0:\n continue\n bbox = torch.tensor(bbox_list[i]).float().cuda()\n assign_result = bbox_assigner.assign(\n bbox, gt_bboxes[i], gt_bboxes_ignore[i],\n gt_labels[i])\n sampling_result = bbox_sampler.sample(\n assign_result,\n bbox,\n gt_bboxes[i],\n gt_labels[i])\n sampling_results.append(sampling_result)\n\n samp_list = [res.bboxes for res in sampling_results]\n if len(samp_list) == 0:\n losses.update(dict(loss_refine_cls=torch.tensor(0).float().cuda(), acc=torch.tensor(0).float().cuda()))\n return losses\n rois = bbox2roi(samp_list).float()\n if self.refine_head.loss_opinion is not None:\n pred_scores = torch.cat([torch.tensor(bbox[:, 4]).float().cuda() for bbox in bbox_list], dim=0)\n pred_rois = bbox2roi([torch.tensor(bbox).float().cuda() for bbox in bbox_list])\n pred_feats = self.refine_roi_extractor(\n x, pred_rois)\n pred_scores_refine = self.refine_head(pred_feats)\n loss_opinion = self.refine_head.compute_opinion_loss(pred_scores, pred_scores_refine)\n losses.update(loss_opinion)\n bbox_feats = self.refine_roi_extractor(\n x, rois)\n cls_score = self.refine_head(bbox_feats)\n bbox_targets = self.refine_head.get_target(\n sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn)\n loss_refine = self.refine_head.loss(cls_score,\n *bbox_targets[:2])\n losses.update(dict(loss_refine_cls=loss_refine[\"loss_cls\"], distL1=loss_refine[\"dist\"]))\n\n return losses\n\n def simple_test_accuracy(self, img, img_meta):\n gts = img_meta[0][\"gts\"]\n x = self.extract_feat(img)\n if self.detached:\n x = (x[0].detach(),)\n\n rois = bbox2roi(gts)\n if rois.shape[0] == 0:\n return 0, 0\n\n roi_feats = self.refine_roi_extractor(\n x, rois)\n cls_score = self.refine_head.get_scores(roi_feats)\n\n return (cls_score > 0.5).float().sum(), rois.size(0)\n\n def simple_test(self, img, img_meta, rescale=False, return_id=False):\n x = self.extract_feat(img)\n outs = self.bbox_head(x)\n bbox_inputs = outs + (img_meta, self.test_cfg.csp_head if self.refine else self.test_cfg, False) # TODO://Handle rescalling\n if self.return_feature_maps:\n return self.bbox_head.get_bboxes_features(*bbox_inputs)\n bbox_list = self.bbox_head.get_bboxes(*bbox_inputs, no_strides=False)\n im_scale = img_meta[0][\"scale_factor\"]\n if \"id\" in img_meta[0]:\n img_id = img_meta[0][\"id\"]\n else:\n img_id = 0\n if self.refine:\n if self.detached:\n x = (x[0].detach(),)\n bbox_list = [\n bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)[0]\n for det_bboxes, det_labels in bbox_list\n ]\n refine_cfg = self.test_cfg.get('rcnn', None)\n bbox_list = [torch.tensor(bbox).float().cuda() for bbox in bbox_list]\n rois = bbox2roi(bbox_list)\n bbox_list = [bbox/im_scale for bbox in bbox_list]\n if rois.shape[0] == 0:\n cls_score = None\n else:\n roi_feats = self.refine_roi_extractor(\n x, rois)\n cls_score = self.refine_head.get_scores(roi_feats)\n\n res_buffer = []\n if cls_score is not None:\n if refine_cfg is not None:\n res_buffer = self.refine_head.suppress_boxes(rois, cls_score, img_meta, cfg=refine_cfg)\n else:\n res_buffer = self.refine_head.combine_scores(bbox_list, cls_score)\n if return_id:\n return res_buffer, img_id\n return res_buffer\n\n bbox_results = [\n bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)\n for det_bboxes, det_labels in bbox_list\n ]\n if return_id:\n return bbox_results[0], img_id\n return bbox_results[0]\n\n def foward_features(self, features):\n bbox_list = self.bbox_head.get_bboxes(*features)\n bbox_results = [\n bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)\n for det_bboxes, det_labels in bbox_list\n ]\n return bbox_results[0]\n" ]
[ [ "torch.tensor", "numpy.transpose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MichaelAllen1966/stroke_outcome_algorithm
[ "99050bf4e0b19c38c8973fe10234fee4f230a172" ]
[ "clinical_outcome.py" ]
[ "\"\"\"\nClass to hold clinical outcome model.\nPredicts probability of good outcome of patient(s) or group(s) of patients.\n\nCall `calculate_outcome_for_all(args)` from outside of the object\n\nInputs\n======\n\nAll inputs take np arrays (for multiple groups of patients).\n\nmimic: proportion of patients with stroke mimic\n\nich: proportion of patients with intracerebral haemorrhage (ICH). \nOr probability of a patient having an ICH, when using for a single patient.\n\nnlvo: proportion of patients with non-large vessel occlusions (nLVO). \nOr probability of a patient having an NLVO, when using for a single patient.\n\nlvo: proportion of patients with large vessel occlusions (LVO). \nOr probability of a patient having a LVO, when using for a single patient.\n\nonset_to_needle: minutes from onset to thrombolysis\n\nonset_to_ouncture: minutes from onset to thrombectomy\n\nnlvo_eligible_for_treatment: proportion of patients with NLVO suitable for \ntreatment with thrombolysis. Or probability of a patient with NVLO being \neligible for treatment.\n\nlvo_eligible_for_treatment: proportion of patients with LVO suitable for \ntreatment with thrombolysis and/or thrombectomy. Or probability of a patient \nwith LVO being eligible for treatment.\n\nReturns\n=======\n\nProbability of good outcome: The probability of having a good outcome (modified\nRankin Scale 0-1) for the patient or group of patients (np array).\n\n\nReferences for decay of effect of thrombolysis and thrombectomy\n===============================================================\n\nDecay of effect of thrombolysis without image selection of patients taken from:\nEmberson, Jonathan, Kennedy R. Lees, Patrick Lyden, Lisa Blackwell, \nGregory Albers, Erich Bluhmki, Thomas Brott, et al (2014). “Effect of Treatment \nDelay, Age, and Stroke Severity on the Effects of Intravenous Thrombolysis with\nAlteplase for Acute Ischaemic Stroke: A Meta-Analysis of Individual Patient\nData from Randomised Trials.” The Lancet 384: 1929–1935.\nhttps://doi.org/10.1016/S0140-6736(14)60584-5.\n\n* Time to no effect = 6.3hrs\n\nDecay of effect of thrombectomy without image selection of patients taken from:\nFransen, Puck S. S., Olvert A. Berkhemer, Hester F. Lingsma, Debbie Beumer, \nLucie A. van den Berg, Albert J. Yoo, Wouter J. Schonewille, et al. (2016)\n“Time to Reperfusion and Treatment Effect for Acute Ischemic Stroke: A \nRandomized Clinical Trial.” JAMA Neurology 73: 190–96. \nhttps://doi.org/10.1001/jamaneurol.2015.3886.\n\n* Time to no effect = 8hrs\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\nclass Clinical_outcome:\n def __init__(self):\n \"\"\"Constructor for clinical outcome model\n \"\"\"\n self.name = \"Clinical outcome model\"\n self.thrombectomy_time_no_effect = 8 * 60\n self.thrombolysis_time_no_effect = 6.3 * 60\n self.maximum_permitted_time_to_thrombectomy = 360\n self.maximum_permitted_time_to_thrombolysis = 270\n\n def calculate_outcome_for_all(self,\n mimic,\n ich,\n nlvo,\n lvo,\n onset_to_needle,\n onset_to_puncture,\n nlvo_eligible_for_treatment,\n lvo_eligible_for_treatment,\n prop_thrombolysed_lvo_receiving_thrombectomy):\n \"\"\"\n Calculates the probability of good outcome for all patients admitted\n with acute stroke. \n\n Based on:\n Holodinsky JK, Williamson TS, Demchuk AM, et al. Modeling Stroke Patient\n Transport for All Patients With Suspected Large-Vessel Occlusion. JAMA \n Neurol. 2018;75(12):1477-1486. doi:10.1001/jamaneurol.2018.2424\n \n Sums outcomes for:\n\n 1) mimics\n 2) ICH\n 3) non-LVO\n 4) LVO treated with thrombolysis\n 5) LVO treated with thrombectomy (if thrombolysis not successful in a\n drip and ship configuration)\n\n arguments\n ---------\n\n np arrays (each row is a given geographic area with different \n characteristics)\n\n mimic: proportion of patients with stroke mimic\n ich: proportion of patients with ICH\n nlvo: proportion of patients with non-lvo\n lvo: proportion of patients with lvo\n onset_to_needle: minutes from onset to thrombolysis\n onset_to_ounctureL minutes from onset to thrombectomy\n nlvo_eligible_for_treatment: proportion of nlvo suitable for treatment\n lvo_eligible_for_treatment: proportion of lvo suitable for treatment\n\n returns\n -------\n\n probability of good outcome for all (np array)\n \"\"\"\n \n # Get outcomes\n # ------------\n \n outcomes = pd.DataFrame()\n\n # Calculate good outcomes for mimics\n outcomes['mimic'] = self._calculate_outcome_for_stroke_mimics(\n mimic.shape)\n\n # Calculate good outcomes for ich \n outcomes['ich'] = self._calculate_outcome_for_ICH(mimic.shape)\n\n # Calculate good outcomes for nlvo without treatment\n outcomes['nlvo_base'] = \\\n np.full(nlvo.shape, 0.4622)\n \n # Calculate good outcomes for nlvo with thrombolysis\n outcomes['nlvo_add_ivt'] = \\\n self._calculate_thrombolysis_outcome_for_nlvo(onset_to_needle)\n\n # Calculate good outcomes for lvo without treatment\n outcomes['lvo_base'] = \\\n np.full(nlvo.shape, 0.1328)\n \n # Calculate good outcomes for lvo with thrombolysis\n outcomes['lvo_add_ivt'] = \\\n self._calculate_thrombolysis_outcome_for_lvo(onset_to_needle)\n\n # Calculate good outcomes for lvo with thrombolysis\n outcomes['lvo_add_et'] = \\\n self._calculate_thrombectomy_outcome_for_lvo(onset_to_puncture)\n\n \n # Weight outcome results by proportion of patients\n # ------------------------------------------------\n \n # 'Results' are good outcomes\n results = pd.DataFrame()\n \n # Results for mimic\n results['mimic'] = outcomes['mimic'] * mimic\n \n # Results for ich\n results['ich'] = outcomes['ich'] * ich\n \n # Results for nlvo\n results['nlvo_base'] = nlvo * outcomes['nlvo_base']\n \n results['nlvo_ivt'] = \\\n nlvo * outcomes['nlvo_add_ivt'] * nlvo_eligible_for_treatment\n \n # Results for lvo\n results['lvo_base'] = lvo * outcomes['lvo_base']\n \n results['lvo_ivt'] = \\\n lvo * outcomes['lvo_add_ivt'] * lvo_eligible_for_treatment\n \n # Adjust thrombectomy/thrombolysis ratio for LVO \n # Reduce thrombectomy treatment by LVO responding to IVT\n lvo_receiving_et = ((lvo * lvo_eligible_for_treatment * \n prop_thrombolysed_lvo_receiving_thrombectomy) - \n results['lvo_ivt'])\n\n results['lvo_et'] = lvo_receiving_et * outcomes['lvo_add_et']\n\n p_good = results.sum(axis=1).values\n\n return p_good\n\n @staticmethod\n def _calculate_outcome_for_ICH(array_shape):\n \"\"\"\n Calculates the probability of good outcome for patients with intra-\n cranial haemorrhage (ICH).\n\n Sets all values to 0.24 \n\n Based on Holodinsky et al. (2018) Drip-and-Ship vs. Mothership: \n Modelling Stroke Patient Transport for All Suspected Large Vessel\n Occlusion Patients. JAMA Neuro (in press)\n\n arguments\n ---------\n\n array size\n\n returns\n -------\n\n probability of good outcome for ICH (np array)\n \"\"\"\n\n # Create an array of required length and set all values to 0.24\n p_good = np.zeros(array_shape)\n p_good[:] = 0.24\n\n return p_good \n\n @staticmethod\n def _calculate_outcome_for_stroke_mimics(array_shape):\n \"\"\"\n Calculates the probability of good outcome for patients with stroke\n mimic\n\n Sets all values to 1\n\n Based on Holodinsky et al. (2018) Drip-and-Ship vs. Mothership: \n Modelling Stroke Patient Transport for All Suspected Large Vessel\n Occlusion Patients. JAMA Neuro (in press)\n\n arguments\n ---------\n\n array size\n\n returns\n -------\n\n probability of good outcome for stroke mimiccs (np array)\n \"\"\"\n\n # Create an array of required length and set all values to 0.9\n p_good = np.zeros(array_shape)\n p_good[:] = 1\n\n return p_good\n \n def _calculate_thrombectomy_outcome_for_lvo(self, onset_to_puncture):\n \"\"\"\n Calculates the probability of additional good outcome for LVO patients\n receiving thrombectomy.\n\n arguments\n ---------\n\n onset_to_puncture : np array in minutes\n\n returns\n -------\n\n probability of additional good outcome if given thrombectomy (np array)\n \"\"\"\n\n p_good_max = 0.5208\n p_good_min = 0.1328\n \n # Convert probability to odds\n odds_good_max = p_good_max / (1 - p_good_max)\n odds_good_min = p_good_min / (1 - p_good_min)\n \n # Calculate fraction of effective time used\n fraction_max_effect_time_used = \\\n onset_to_puncture / self.thrombectomy_time_no_effect\n \n # Calculate odds of good outcome with treatment\n odds_good = np.exp(np.log(odds_good_max) - \n ((np.log(odds_good_max) - np.log(odds_good_min)) \n * fraction_max_effect_time_used))\n \n # Convert odds to probability\n prob_good = odds_good / (1 + odds_good)\n prob_good[prob_good < p_good_min] = p_good_min\n \n # Calculate probability of additional good outcome\n p_good_add = prob_good - p_good_min\n \n # Set additional good outcomes to zero if past permitted treatment time\n mask = onset_to_puncture > self.maximum_permitted_time_to_thrombectomy\n p_good_add[mask] = 0 \n \n # Ensure no negative outcomes\n mask = p_good_add < 0\n p_good_add[mask] = 0 \n\n return p_good_add \n\n def _calculate_thrombolysis_outcome_for_lvo(self, onset_to_needle):\n \"\"\"\n Calculates the probability of additional good outcome for LVO patients\n receiving thrombolysis. Does not include baseline untreated good\n comes.\n\n arguments\n ---------\n \n onset_to_needle : np array in minutes\n\n\n returns\n -------\n\n probability of additional good outcome if given thrombolysis \n (np array)\n \"\"\"\n \n p_good_max = 0.2441\n p_good_min = 0.1328\n \n # Convert probability to odds\n odds_good_max = p_good_max / (1 - p_good_max)\n odds_good_min = p_good_min / (1 - p_good_min)\n \n # Calculate fraction of effective time used \n fraction_max_effect_time_used = \\\n onset_to_needle / self.thrombolysis_time_no_effect\n\n # Calculate odds of good outcome with treatment\n odds_good = np.exp(np.log(odds_good_max) - \n ((np.log(odds_good_max) - np.log(odds_good_min)) \n * fraction_max_effect_time_used))\n\n # Convert odds to probability\n prob_good = odds_good / (1 + odds_good)\n prob_good[prob_good < p_good_min] = p_good_min\n \n # Calculate probability of additional good outcome\n p_good_add = prob_good - p_good_min\n \n # Set additional good outcomes to zero if past permitted treatment time\n mask = onset_to_needle> self.maximum_permitted_time_to_thrombolysis\n p_good_add[mask] = 0 \n \n # Ensure no negative outcomes\n mask = p_good_add < 0\n p_good_add[mask] = 0 \n\n # return outcome and proportion of treated who respond\n return p_good_add\n\n def _calculate_thrombolysis_outcome_for_nlvo(self, onset_to_needle):\n \"\"\"\n Calculates the probability of good outcome for non-LVO patients\n receiving thrombolysis.\n\n arguments\n ---------\n\n onset_to_needle : np array in minutes\n\n returns\n -------\n\n probability of good outcome if given thrombolysis (np array)\n \"\"\"\n\n p_good_max = 0.6444\n p_good_min = 0.4622\n \n # Convert probability to odds\n odds_good_max = p_good_max / (1 - p_good_max)\n odds_good_min = p_good_min / (1 - p_good_min)\n \n # Calculate fraction of effective time used \n fraction_max_effect_time_used = (onset_to_needle / \n self.thrombolysis_time_no_effect)\n \n # Calculate odds of good outcome with treatment\n odds_good = np.exp(np.log(odds_good_max) - \n ((np.log(odds_good_max) - np.log(odds_good_min)) \n * fraction_max_effect_time_used))\n \n # Convert odds to probability\n prob_good = odds_good / (1 + odds_good)\n prob_good[prob_good < p_good_min] = p_good_min\n \n # Calculate probability of additional good outcome\n p_good_add = prob_good - p_good_min\n \n mask = onset_to_needle> self.maximum_permitted_time_to_thrombolysis\n p_good_add[mask] = 0 \n \n # Ensure no negative outcomes\n mask = p_good_add < 0\n p_good_add[mask] = 0 \n\n # return outcome and proportion of treated who respond\n return p_good_add\n" ]
[ [ "numpy.log", "numpy.zeros", "pandas.DataFrame", "numpy.full" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
brettelliot/event-study
[ "cffc6a80dbc4b33e68e863488428996af51cc991" ]
[ "examples/earnings_surprises/earnings-converter.py" ]
[ "import pandas as pd\nfrom pandas.compat import StringIO\nimport numpy\nnumpy.set_printoptions(threshold=numpy.nan)\n\n\ndef main():\n df = pd.read_csv(StringIO(earnings), sep=\",\", header=None,\n names=['symbol', 'exchange', 'eps_pct_diff_surp', 'asof_date'])\n df = df.sort_values(by=['asof_date'])\n print(df.head())\n print(len(df))\n df.to_csv('../../data/events/nyse_earnings_surprises_2013.csv', index=False)\n\n myString = ', '.join('\"{0}\"'.format(s) for s in df.symbol.unique())\n myString = myString.replace(\" \", \"\")\n print(myString)\n\n#earnings = 'CFN, NYSE, -21.82, 2013-02-09\\nNDZ, NYSE, 30.77, 2013-01-29\\nAZZ, NYSE, -1.64, 2013-01-10'\nearnings = 'CFN, NYSE, -21.82, 2013-02-09\\n NDZ, NYSE, 30.77, 2013-01-29\\n AZZ, NYSE, -1.64, 2013-01-10\\n CLC, NYSE, 2.86, 2013-01-17\\n CMC, NYSE, 64.71, 2013-01-08\\n FC, NYSE, 15.38, 2013-01-04\\n FDO, NYSE, -6.76, 2013-01-04\\n FUL, NYSE, 14.29, 2013-01-17\\n LEN, NYSE, 30.23, 2013-01-16\\n LNN, NYSE, 53.33, 2013-01-09\\n MKC, NYSE, -3.48, 2013-01-25\\n RT, NYSE, 0.00, 2013-01-10\\n MSM, NYSE, 1.00, 2013-01-11\\n RPM, NYSE, -4.76, 2013-01-09\\n SVU, NYSE, -50.00, 2013-01-11\\n TISI, NYSE, 10.00, 2013-01-08\\n TXI, NYSE, -5.88, 2013-01-10\\n UNF, NYSE, 15.79, 2013-01-04\\n WOR, NYSE, 12.20, 2013-01-04\\n GBX, NYSE, 12.90, 2013-01-10\\n SJR, NYSE, 11.11, 2013-01-10\\n OMN, NYSE, -50.00, 2013-01-23\\n MON, NYSE, 67.57, 2013-01-09\\n GPN, NYSE, 6.90, 2013-01-09\\n AYI, NYSE, -13.75, 2013-01-09\\n STZ, NYSE, 14.55, 2013-01-10\\n SNX, NYSE, 11.54, 2013-01-11\\n TAL, NYSE, 600.00, 2013-01-23\\n IHS, NYSE, 12.35, 2013-01-09\\n EDU, NYSE, -150.00, 2013-01-30\\n SAR, NYSE, 28.57, 2013-01-15\\n ZEP, NYSE, 11.11, 2013-01-08\\n MG, NYSE, 0.00, 2013-01-09\\n MOS, NYSE, 7.14, 2013-01-04\\n ABT, NYSE, 1.33, 2013-01-24\\n ABX, NYSE, 1.83, 2013-02-15\\n AB, NYSE, 21.21, 2013-02-13\\n TAP, NYSE, 7.81, 2013-02-15\\n ACO, NYSE, -15.91, 2013-01-26\\n ADM, NYSE, -26.83, 2013-02-05\\n AEM, NYSE, -13.33, 2013-02-14\\n AEP, NYSE, 11.11, 2013-02-16\\n AES, NYSE, 6.67, 2013-02-28\\n AET, NYSE, -2.08, 2013-02-01\\n AFL, NYSE, 0.00, 2013-02-06\\n AGCO, NYSE, 1.02, 2013-02-06\\n HES, NYSE, -2.44, 2013-01-31\\n AIG, NYSE, 322.22, 2013-02-22\\n AIN, NYSE, -9.68, 2013-02-07\\n AJG, NYSE, 2.63, 2013-01-30\\n ALU, NYSE, 0.00, 2013-02-08\\n MATX, NYSE, 24.14, 2013-02-08\\n ALK, NYSE, -4.11, 2013-01-25\\n ALX, NYSE, -11.52, 2013-02-27\\n BEAM, NYSE, 0.00, 2013-02-02\\n AME, NYSE, 2.08, 2013-01-25\\n TWX, NYSE, 6.36, 2013-02-07\\n AVD, NYSE, 11.43, 2013-03-01\\n AMN, NYSE, 36.36, 2013-02-22\\n AN, NYSE, 3.08, 2013-02-01\\n AON, NYSE, 1.60, 2013-02-02\\n AP, NYSE, 77.78, 2013-02-05\\n APA, NYSE, -1.30, 2013-02-15\\n APC, NYSE, 30.00, 2013-02-05\\n APD, NYSE, 0.78, 2013-01-24\\n APH, NYSE, 4.44, 2013-01-18\\n ARG, NYSE, -3.70, 2013-01-25\\n AAN, NYSE, -4.00, 2013-02-08\\n ARW, NYSE, 13.89, 2013-02-08\\n ASGN, NYSE, -25.00, 2013-02-15\\n ASH, NYSE, -17.65, 2013-01-30\\n ASR, NYSE, 56.88, 2013-02-26\\n GAS, NYSE, -9.90, 2013-02-07\\n ATO, NYSE, -5.13, 2013-02-07\\n ATW, NYSE, 17.02, 2013-01-31\\n AU, NYSE, -67.44, 2013-02-21\\n AVP, NYSE, 37.04, 2013-02-13\\n AVT, NYSE, 21.69, 2013-01-25\\n AVY, NYSE, 10.20, 2013-01-31\\n AXP, NYSE, 0.00, 2013-01-18\\n B, NYSE, 7.84, 2013-02-23\\n BA, NYSE, 7.56, 2013-01-31\\n BAC, NYSE, 50.00, 2013-01-18\\n BAX, NYSE, 0.00, 2013-01-25\\n BC, NYSE, 122.22, 2013-01-25\\n OMX, NYSE, 6.67, 2013-02-21\\n BCE, NYSE, -2.99, 2013-02-08\\n BCR, NYSE, 1.80, 2013-02-01\\n BCS, NYSE, 40.74, 2013-02-13\\n BDX, NYSE, 9.76, 2013-02-06\\n BEN, NYSE, 1.68, 2013-02-02\\n BGG, NYSE, 250.00, 2013-01-25\\n BHE, NYSE, 10.00, 2013-02-05\\n BHI, NYSE, 1.64, 2013-01-24\\n BID, NYSE, 0.92, 2013-03-01\\n BIO, NYSE, 15.67, 2013-02-27\\n BK, NYSE, 0.00, 2013-01-16\\n BKH, NYSE, 9.68, 2013-02-01\\n WRB, NYSE, 28.00, 2013-01-29\\n BLC, NYSE, 5.71, 2013-02-09\\n BLL, NYSE, -3.03, 2013-02-01\\n BLX, NYSE, 20.75, 2013-02-08\\n BMI, NYSE, -11.36, 2013-02-07\\n BMS, NYSE, 4.00, 2013-02-01\\n BMY, NYSE, 9.30, 2013-01-25\\n BOH, NYSE, 1.12, 2013-01-31\\n BXS, NYSE, -25.00, 2013-01-24\\n BPL, NYSE, 18.52, 2013-02-09\\nBRK.A, NYSE, 175.73, 2013-03-02\\n BRO, NYSE, 7.41, 2013-02-02\\n BSX, NYSE, 63.64, 2013-01-30\\n BT, NYSE, -89.22, 2013-02-02\\n MTRN, NYSE, 17.14, 2013-03-01\\n CACI, NYSE, 3.66, 2013-01-31\\n CAT, NYSE, -13.10, 2013-01-29\\n CB, NYSE, 10.00, 2013-01-30\\n CBI, NYSE, 9.64, 2013-02-28\\n CBM, NYSE, 100.00, 2013-02-07\\n CBU, NYSE, -3.70, 2013-01-23\\n CBT, NYSE, -28.57, 2013-01-31\\n CCC, NYSE, 35.71, 2013-02-22\\n CCE, NYSE, 4.65, 2013-02-08\\n C, NYSE, -20.69, 2013-01-18\\n CCK, NYSE, -7.27, 2013-01-31\\n CCU, NYSE, -12.21, 2013-02-01\\n CDE, NYSE, -15.15, 2013-02-22\\n CDI, NYSE, 8.70, 2013-02-27\\n CAH, NYSE, 9.41, 2013-02-06\\n CFR, NYSE, 5.38, 2013-01-31\\n CHD, NYSE, 0.00, 2013-02-06\\n CKP, NYSE, -50.00, 2013-03-06\\n CPK, NYSE, 18.60, 2013-03-08\\n CI, NYSE, 6.08, 2013-02-08\\n CIA, NYSE, -100.00, 2013-03-12\\n CKH, NYSE, -93.55, 2013-02-28\\n CL, NYSE, 0.71, 2013-02-01\\n CLF, NYSE, -25.45, 2013-02-13\\n CLH, NYSE, -25.00, 2013-02-21\\n CLX, NYSE, 11.11, 2013-02-05\\n CMA, NYSE, 7.81, 2013-01-17\\n CMO, NYSE, -6.06, 2013-01-31\\n CRK, NYSE, -77.42, 2013-02-12\\n CMS, NYSE, 4.17, 2013-02-22\\n CNA, NYSE, -150.00, 2013-02-12\\n CNW, NYSE, -10.34, 2013-02-07\\n CHG, NYSE, -4.12, 2013-02-27\\n CNL, NYSE, 12.50, 2013-02-20\\n COG, NYSE, 14.29, 2013-02-22\\n COT, NYSE, -66.67, 2013-02-16\\n CP, NYSE, -0.78, 2013-01-30\\n CPF, NYSE, 11.54, 2013-02-01\\n CQB, NYSE, -17.65, 2013-03-12\\n CR, NYSE, -5.15, 2013-01-29\\nCRD.B, NYSE, 52.38, 2013-02-14\\n CRS, NYSE, 1.64, 2013-02-01\\n CSC, NYSE, 22.22, 2013-02-06\\n CSL, NYSE, 6.49, 2013-02-09\\n CTB, NYSE, 35.29, 2013-02-26\\n CTL, NYSE, -1.47, 2013-02-14\\n CTS, NYSE, -21.74, 2013-01-29\\n CUB, NYSE, -32.86, 2013-02-12\\n CMI, NYSE, 14.94, 2013-02-07\\n CUZ, NYSE, 40.00, 2013-02-14\\n CVC, NYSE, -400.00, 2013-03-01\\n CVH, NYSE, 35.82, 2013-02-07\\n CW, NYSE, 4.40, 2013-02-21\\n CWT, NYSE, 33.33, 2013-02-28\\n CX, NYSE, -258.33, 2013-02-08\\n CYN, NYSE, -13.00, 2013-01-25\\n D, NYSE, 1.47, 2013-02-01\\n DBD, NYSE, -8.16, 2013-02-13\\n DCO, NYSE, -23.81, 2013-03-05\\n DD, NYSE, 22.22, 2013-01-23\\n CVA, NYSE, -13.04, 2013-02-07\\n DHR, NYSE, 0.00, 2013-01-30\\n DIS, NYSE, 2.60, 2013-02-06\\n DLX, NYSE, 11.76, 2013-01-25\\n DNB, NYSE, -1.24, 2013-02-12\\n RRD, NYSE, 16.22, 2013-02-27\\n DOV, NYSE, 1.87, 2013-01-25\\n DOW, NYSE, -2.94, 2013-02-01\\n DRE, NYSE, 0.00, 2013-01-31\\n DHI, NYSE, 42.86, 2013-01-30\\n UFS, NYSE, -7.09, 2013-02-02\\n DTE, NYSE, 0.00, 2013-02-21\\n DUK, NYSE, 7.69, 2013-02-14\\n DVN, NYSE, 2.63, 2013-02-21\\n DV, NYSE, 55.36, 2013-02-07\\n EAT, NYSE, 0.00, 2013-01-23\\n ECL, NYSE, 0.00, 2013-02-27\\n ED, NYSE, -6.85, 2013-02-01\\n EDE, NYSE, 27.78, 2013-02-15\\n EFX, NYSE, 4.00, 2013-02-07\\n EGN, NYSE, -15.58, 2013-01-24\\n EGP, NYSE, 0.00, 2013-02-13\\n ELY, NYSE, 2.00, 2013-01-31\\n EMC, NYSE, 6.98, 2013-01-30\\n EMR, NYSE, 0.00, 2013-02-06\\n EOG, NYSE, 19.26, 2013-02-14\\n EQT, NYSE, 14.29, 2013-01-25\\n ESE, NYSE, -44.44, 2013-02-08\\n ESV, NYSE, 7.87, 2013-02-21\\n ETN, NYSE, -10.87, 2013-02-06\\n ETR, NYSE, 21.99, 2013-02-09\\n EXAR, NYSE, -14.29, 2013-01-24\\n F, NYSE, 19.23, 2013-01-30\\n OPY, NYSE, 115.79, 2013-02-02\\n CLGX, NYSE, -3.12, 2013-02-22\\n FNB, NYSE, 4.55, 2013-01-24\\n FCF, NYSE, -18.18, 2013-01-31\\n FBP, NYSE, -30.00, 2013-02-06\\n FICO, NYSE, 6.94, 2013-01-31\\n FLO, NYSE, 12.00, 2013-02-08\\n FMC, NYSE, 0.00, 2013-02-07\\n FOE, NYSE, -250.00, 2013-03-06\\n S, NYSE, 4.35, 2013-02-08\\n NEE, NYSE, 9.57, 2013-01-30\\n FRT, NYSE, 0.91, 2013-02-13\\n FRX, NYSE, -61.54, 2013-01-16\\n FUN, NYSE, -433.33, 2013-02-20\\n FUR, NYSE, -48.15, 2013-03-08\\n GBL, NYSE, -28.72, 2013-02-06\\n GVA, NYSE, -29.03, 2013-03-01\\n BGC, NYSE, -3.45, 2013-02-26\\n GD, NYSE, -26.84, 2013-01-24\\n GE, NYSE, 2.33, 2013-01-19\\n RHP, NYSE, -50.00, 2013-02-13\\n AXLL, NYSE, 95.08, 2013-02-13\\n GGG, NYSE, 13.33, 2013-01-29\\n GHM, NYSE, -22.22, 2013-02-02\\n GIB, NYSE, -4.35, 2013-01-31\\n GLT, NYSE, -25.71, 2013-02-08\\n GLW, NYSE, 3.03, 2013-01-30\\n GSK, NYSE, 8.33, 2013-02-07\\n GLF, NYSE, -160.71, 2013-02-26\\n GNI, NYSE, -14.44, 2013-01-30\\n GPC, NYSE, 0.00, 2013-02-20\\n GRA, NYSE, 4.72, 2013-02-07\\n GTY, NYSE, -10.34, 2013-03-01\\n GWW, NYSE, -7.28, 2013-01-25\\n HAE, NYSE, 4.17, 2013-01-31\\n HAL, NYSE, 3.28, 2013-01-26\\n HAR, NYSE, -32.95, 2013-02-01\\n HVT, NYSE, 30.43, 2013-02-26\\n HRC, NYSE, 6.82, 2013-01-24\\n HCC, NYSE, 43.75, 2013-02-13\\n HCN, NYSE, 1.19, 2013-02-26\\n HCP, NYSE, 1.41, 2013-02-13\\n HOG, NYSE, 0.00, 2013-01-30\\n HE, NYSE, 21.88, 2013-02-16\\n HL, NYSE, -25.00, 2013-02-26\\n HMA, NYSE, -5.00, 2013-02-15\\n HMC, NYSE, -29.58, 2013-02-01\\n HMN, NYSE, 91.43, 2013-02-06\\n HFC, NYSE, -8.97, 2013-02-27\\n HOT, NYSE, 7.69, 2013-02-08\\n HP, NYSE, 8.53, 2013-02-01\\n HLS, NYSE, 40.63, 2013-02-19\\n HRS, NYSE, 4.17, 2013-01-30\\n HSC, NYSE, -3.23, 2013-02-15\\n HSY, NYSE, -1.33, 2013-02-01\\n HUBB, NYSE, 0.00, 2013-01-25\\n HUM, NYSE, 11.21, 2013-02-05\\n HXL, NYSE, -5.26, 2013-01-24\\n IBM, NYSE, 2.67, 2013-01-23\\n IDA, NYSE, 10.00, 2013-02-22\\n IEX, NYSE, 2.99, 2013-02-05\\n IFF, NYSE, -1.19, 2013-02-08\\n DIN, NYSE, 1.22, 2013-02-28\\n INT, NYSE, 0.00, 2013-02-22\\n IP, NYSE, 6.15, 2013-01-30\\n IPG, NYSE, 3.70, 2013-02-23\\n IO, NYSE, 30.77, 2013-02-14\\n IR, NYSE, 8.57, 2013-02-02\\n IRF, NYSE, 6.38, 2013-01-29\\n ITW, NYSE, -1.11, 2013-01-30\\n IVC, NYSE, -56.00, 2013-02-09\\n JEC, NYSE, 0.00, 2013-01-24\\n JNJ, NYSE, 1.71, 2013-01-23\\n JNY, NYSE, 75.00, 2013-02-14\\n K, NYSE, 3.08, 2013-02-06\\n KAMN, NYSE, 0.00, 2013-02-26\\n KDN, NYSE, 0.00, 2013-02-22\\n KEX, NYSE, 9.30, 2013-01-31\\n KEY, NYSE, -4.55, 2013-01-25\\n KIM, NYSE, 6.45, 2013-02-06\\n KMB, NYSE, 0.74, 2013-01-26\\n KEM, NYSE, 53.33, 2013-02-01\\n KMT, NYSE, -21.88, 2013-01-25\\n KO, NYSE, 2.27, 2013-02-13\\n KSU, NYSE, 10.98, 2013-01-23\\n LDL, NYSE, -10.53, 2013-02-27\\n LDR, NYSE, 10.42, 2013-02-12\\n LEE, NYSE, 25.00, 2013-01-23\\n LEG, NYSE, 10.34, 2013-02-05\\n LLY, NYSE, 8.97, 2013-01-30\\n LM, NYSE, 29.63, 2013-02-02\\n LNC, NYSE, 3.77, 2013-02-07\\n LPX, NYSE, -10.00, 2013-02-09\\n LXU, NYSE, 145.00, 2013-03-01\\n LTC, NYSE, -1.72, 2013-02-22\\n L, NYSE, -37.93, 2013-02-12\\n LUK, NYSE, 210.17, 2013-02-26\\n LUV, NYSE, 28.57, 2013-01-25\\n LUX, NYSE, 4.35, 2013-03-01\\n MKL, NYSE, 314.07, 2013-02-05\\n MAN, NYSE, 18.18, 2013-01-31\\n MTW, NYSE, 12.50, 2013-02-01\\n SM, NYSE, 95.65, 2013-02-21\\n MAS, NYSE, 500.00, 2013-02-12\\n MTZ, NYSE, 2.22, 2013-03-01\\n MCD, NYSE, 3.76, 2013-01-24\\n MDC, NYSE, 40.48, 2013-02-01\\n MDP, NYSE, 1.14, 2013-01-25\\n MDR, NYSE, 13.04, 2013-03-01\\n MDU, NYSE, 2.56, 2013-02-05\\n MED, NYSE, 12.00, 2013-03-08\\n CVS, NYSE, 2.73, 2013-02-07\\n MFC, NYSE, -12.50, 2013-02-08\\n MGA, NYSE, 36.84, 2013-03-02\\n MGM, NYSE, 0.00, 2013-02-21\\n MLR, NYSE, -11.76, 2013-03-07\\n MLI, NYSE, 14.29, 2013-02-06\\n MMC, NYSE, 0.00, 2013-02-13\\n MMM, NYSE, 0.00, 2013-01-25\\n MSA, NYSE, 3.64, 2013-02-14\\n MNR, NYSE, 38.46, 2013-02-08\\n MO, NYSE, 1.85, 2013-02-01\\n MOD, NYSE, -75.00, 2013-02-02\\nMOG.A, NYSE, -8.54, 2013-01-26\\n MHK, NYSE, 7.45, 2013-02-22\\n MSI, NYSE, 7.61, 2013-01-24\\n MCY, NYSE, -168.00, 2013-02-05\\n MRK, NYSE, 2.47, 2013-02-02\\n MRO, NYSE, -19.12, 2013-02-07\\n POWR, NYSE, 18.18, 2013-03-08\\n MTG, NYSE, -37.87, 2013-03-01\\n MTB, NYSE, 2.76, 2013-01-17\\n MTX, NYSE, 6.38, 2013-02-01\\n MUR, NYSE, 59.23, 2013-01-31\\n MYE, NYSE, -7.14, 2013-02-14\\n NBL, NYSE, 54.21, 2013-02-08\\n NBR, NYSE, 3.45, 2013-02-20\\n NE, NYSE, -19.35, 2013-01-24\\n NEM, NYSE, 13.27, 2013-02-22\\n NFG, NYSE, 6.58, 2013-02-08\\n NHI, NYSE, 1.20, 2013-02-15\\n NI, NYSE, 0.00, 2013-02-20\\n NJR, NYSE, -17.48, 2013-02-08\\n THC, NYSE, -24.64, 2013-02-27\\n NNN, NYSE, 4.55, 2013-02-08\\n NOC, NYSE, 18.39, 2013-01-31\\n NPK, NYSE, -11.23, 2013-02-16\\n NR, NYSE, 0.00, 2013-02-15\\n NSC, NYSE, 9.24, 2013-01-23\\n NUE, NYSE, 55.17, 2013-01-30\\n NVR, NYSE, 8.22, 2013-01-25\\n NWL, NYSE, 2.38, 2013-02-02\\n NWN, NYSE, -4.55, 2013-03-02\\n NYT, NYSE, 3.23, 2013-02-08\\n OCR, NYSE, 1.18, 2013-02-20\\n OGE, NYSE, 14.71, 2013-02-28\\n OHI, NYSE, 3.57, 2013-02-12\\n OI, NYSE, 8.11, 2013-01-31\\n OII, NYSE, 2.78, 2013-02-14\\n OKE, NYSE, 17.78, 2013-02-26\\n OLN, NYSE, 2.94, 2013-01-29\\n BRS, NYSE, 32.95, 2013-02-05\\n OLP, NYSE, 0.00, 2013-03-15\\n OMC, NYSE, 3.67, 2013-02-13\\n OMI, NYSE, -12.77, 2013-02-12\\n ORB, NYSE, 31.82, 2013-02-15\\n ORI, NYSE, -28.57, 2013-01-25\\n OSK, NYSE, 93.55, 2013-01-26\\n OXY, NYSE, 10.24, 2013-02-01\\n PHX, NYSE, -18.75, 2013-02-08\\n FCFS, NYSE, 2.20, 2013-01-24\\n PBI, NYSE, 7.69, 2013-02-01\\n PCG, NYSE, 3.51, 2013-02-22\\n PCL, NYSE, 68.97, 2013-01-29\\n PCP, NYSE, -3.23, 2013-01-25\\n TPC, NYSE, 0.00, 2013-02-22\\n PDS, NYSE, 250.00, 2013-02-15\\n PEG, NYSE, 5.13, 2013-02-22\\n PEI, NYSE, 0.00, 2013-02-26\\n PEP, NYSE, 3.81, 2013-02-15\\n PFE, NYSE, 6.82, 2013-01-30\\n PG, NYSE, 9.91, 2013-01-26\\n PGR, NYSE, 0.00, 2013-01-19\\n PH, NYSE, 6.25, 2013-01-19\\n PHG, NYSE, -4.17, 2013-01-30\\n PHM, NYSE, 9.68, 2013-02-01\\n PKD, NYSE, -150.00, 2013-02-22\\n PKY, NYSE, 17.39, 2013-02-12\\n PNC, NYSE, 24.82, 2013-01-18\\n PNM, NYSE, 18.18, 2013-03-02\\n PNR, NYSE, 6.82, 2013-01-30\\n PNW, NYSE, 41.18, 2013-02-23\\n POM, NYSE, -5.00, 2013-03-02\\n POT, NYSE, -11.86, 2013-02-01\\n PPG, NYSE, -0.65, 2013-01-15\\n PPL, NYSE, 6.52, 2013-02-15\\n PRGO, NYSE, 3.82, 2013-02-02\\n PL, NYSE, 11.36, 2013-02-07\\n PSB, NYSE, 5.04, 2013-02-20\\n CSH, NYSE, 12.61, 2013-01-25\\n PWR, NYSE, 36.11, 2013-02-22\\n PX, NYSE, 0.00, 2013-01-24\\n KWR, NYSE, 26.32, 2013-03-07\\n R, NYSE, 6.36, 2013-02-01\\n RBC, NYSE, 2.70, 2013-02-05\\n RDC, NYSE, 28.57, 2013-03-01\\n HTSI, NYSE, -20.69, 2013-02-01\\n RES, NYSE, 8.33, 2013-01-24\\n RGS, NYSE, -76.92, 2013-02-01\\n RGR, NYSE, 36.99, 2013-02-28\\n RHI, NYSE, 2.44, 2013-01-30\\n RJF, NYSE, 0.00, 2013-01-24\\n RLI, NYSE, 102.27, 2013-01-24\\n ROG, NYSE, -8.62, 2013-02-20\\n ROK, NYSE, -2.38, 2013-01-31\\n ROL, NYSE, -5.88, 2013-01-24\\n ROP, NYSE, 1.37, 2013-01-29\\n RTI, NYSE, 25.00, 2013-02-07\\n RTN, NYSE, 23.08, 2013-01-25\\n RYL, NYSE, 12.00, 2013-01-30\\n BSAC, NYSE, -1.96, 2013-02-05\\n T, NYSE, -6.38, 2013-01-25\\n SCG, NYSE, 0.00, 2013-02-22\\n SCHW, NYSE, 0.00, 2013-01-17\\n SCL, NYSE, -5.56, 2013-02-20\\n SMG, NYSE, 0.88, 2013-02-07\\n SEE, NYSE, 17.24, 2013-02-20\\n SF, NYSE, 5.17, 2013-02-26\\n SFE, NYSE, -121.74, 2013-03-08\\n SHW, NYSE, -0.87, 2013-02-01\\n STC, NYSE, 29.27, 2013-02-15\\n SJI, NYSE, -6.67, 2013-03-01\\n JOE, NYSE, -1000.00, 2013-03-01\\n SJW, NYSE, 72.22, 2013-02-20\\n SLB, NYSE, 0.00, 2013-01-19\\n HSH, NYSE, 29.17, 2013-02-01\\n AOS, NYSE, 12.35, 2013-01-25\\n SNA, NYSE, 4.38, 2013-02-08\\n PII, NYSE, 0.81, 2013-01-30\\n SNV, NYSE, 0.00, 2013-01-23\\n SO, NYSE, 12.82, 2013-01-31\\n SON, NYSE, 3.70, 2013-02-14\\n SPA, NYSE, 30.00, 2013-02-06\\n TRV, NYSE, 500.00, 2013-01-23\\n SR, NYSE, 14.68, 2013-02-06\\n NVE, NYSE, 0.00, 2013-02-23\\n SCI, NYSE, 10.00, 2013-02-13\\n SSP, NYSE, -3.85, 2013-02-27\\n STT, NYSE, 11.00, 2013-01-19\\n STI, NYSE, 6.56, 2013-01-19\\n STJ, NYSE, 2.22, 2013-01-24\\n STL, NYSE, 14.29, 2013-01-24\\n STR, NYSE, 8.57, 2013-02-21\\n STE, NYSE, 3.57, 2013-02-07\\n SYK, NYSE, 0.88, 2013-01-24\\n SUN, NYSE, -4.88, 2013-03-30\\n SUP, NYSE, -61.54, 2013-03-02\\n SWK, NYSE, 3.01, 2013-01-25\\n SWN, NYSE, 2.33, 2013-02-21\\n SWS, NYSE, 0.00, 2013-02-07\\n SWX, NYSE, -2.44, 2013-02-27\\n SWY, NYSE, 23.68, 2013-02-22\\n SXI, NYSE, 1.10, 2013-02-02\\n SYY, NYSE, 19.51, 2013-02-05\\n TNC, NYSE, 6.90, 2013-02-20\\n TCB, NYSE, -16.67, 2013-01-31\\n TCO, NYSE, 5.15, 2013-02-14\\n TDS, NYSE, -725.00, 2013-02-27\\n TDW, NYSE, 38.64, 2013-02-02\\n TDY, NYSE, 8.33, 2013-01-25\\n TE, NYSE, 0.00, 2013-02-06\\n TER, NYSE, 600.00, 2013-01-24\\n TEVA, NYSE, -0.75, 2013-02-08\\n TEX, NYSE, -51.28, 2013-02-20\\n TFX, NYSE, 1.79, 2013-02-22\\n TEN, NYSE, -2.94, 2013-02-01\\n TKR, NYSE, 25.00, 2013-01-25\\n TMK, NYSE, 1.53, 2013-02-05\\n TMO, NYSE, 6.25, 2013-02-01\\n TOT, NYSE, -1.12, 2013-02-14\\n TM, NYSE, -44.72, 2013-02-06\\n TR, NYSE, 37.50, 2013-02-14\\n TRN, NYSE, 7.14, 2013-02-21\\n TRP, NYSE, -15.09, 2013-02-13\\n TRR, NYSE, 566.67, 2013-02-07\\n TSO, NYSE, -2.90, 2013-02-07\\n TSS, NYSE, -3.03, 2013-01-23\\n TTI, NYSE, -21.05, 2013-03-01\\n TXT, NYSE, -1.75, 2013-01-24\\n TYL, NYSE, 10.71, 2013-02-07\\n TSN, NYSE, 23.08, 2013-02-02\\n UDR, NYSE, 2.94, 2013-02-06\\n UFI, NYSE, -42.86, 2013-01-23\\n UGI, NYSE, -15.89, 2013-02-01\\n UAM, NYSE, 45.45, 2013-02-20\\n UHS, NYSE, 9.89, 2013-03-01\\n UHT, NYSE, 268.42, 2013-02-28\\n UIL, NYSE, -9.68, 2013-02-22\\n UNH, NYSE, 0.00, 2013-01-18\\n KMPR, NYSE, -250.00, 2013-02-08\\n UNM, NYSE, 5.13, 2013-02-06\\n UNP, NYSE, 1.39, 2013-01-25\\n UNT, NYSE, 2.06, 2013-02-20\\n URS, NYSE, -1.04, 2013-02-26\\n USG, NYSE, -67.86, 2013-02-07\\n MUX, NYSE, -600.00, 2013-03-09\\n USM, NYSE, -1100.00, 2013-02-27\\n USPH, NYSE, 3.03, 2013-03-08\\n UTL, NYSE, 3.13, 2013-01-31\\n UTX, NYSE, 26.47, 2013-01-24\\n VMI, NYSE, 8.48, 2013-02-13\\n VAR, NYSE, 3.49, 2013-01-24\\n VFC, NYSE, 1.32, 2013-02-16\\n CBS, NYSE, -8.57, 2013-02-15\\n VLO, NYSE, 57.98, 2013-01-30\\n VMC, NYSE, -81.82, 2013-02-15\\n VLY, NYSE, 0.00, 2013-01-31\\n VNO, NYSE, 6.09, 2013-02-27\\n VSH, NYSE, 37.50, 2013-02-06\\n WTS, NYSE, 5.17, 2013-02-20\\n WBS, NYSE, 6.12, 2013-01-19\\n WEC, NYSE, 4.88, 2013-01-31\\n WFC, NYSE, 3.41, 2013-01-14\\n WG, NYSE, 57.14, 2013-03-07\\n WGL, NYSE, 9.62, 2013-02-07\\n WHR, NYSE, 3.15, 2013-02-01\\n WMB, NYSE, -3.85, 2013-02-21\\n WMK, NYSE, 20.29, 2013-03-06\\n WNC, NYSE, 3.23, 2013-02-06\\n TEG, NYSE, -5.32, 2013-03-01\\n WR, NYSE, 80.00, 2013-03-01\\n WRE, NYSE, 2.17, 2013-02-14\\n WRI, NYSE, 4.44, 2013-02-15\\n WPP, NYSE, -175.00, 2013-02-12\\n WSO, NYSE, -12.77, 2013-02-15\\n WST, NYSE, 8.93, 2013-02-22\\n WWW, NYSE, 200.00, 2013-02-20\\n WY, NYSE, 36.84, 2013-01-26\\n X, NYSE, 45.33, 2013-01-30\\n XL, NYSE, 138.24, 2013-02-08\\n XOM, NYSE, 10.00, 2013-02-02\\n XRX, NYSE, 7.14, 2013-01-25\\n Y, NYSE, 54.64, 2013-02-22\\n HRG, NYSE, -50.00, 2013-02-09\\n CRY, NYSE, 33.33, 2013-02-15\\n CHK, NYSE, 85.71, 2013-02-22\\n DDR, NYSE, 0.00, 2013-02-13\\n ELS, NYSE, 0.00, 2013-01-29\\n ALG, NYSE, 37.93, 2013-03-07\\n ETH, NYSE, 5.41, 2013-01-23\\n ATR, NYSE, 0.00, 2013-02-08\\n GGP, NYSE, 6.90, 2013-02-05\\n MSL, NYSE, -10.00, 2013-01-30\\n RCL, NYSE, 66.67, 2013-02-05\\n CWEI, NYSE, -34.04, 2013-02-22\\n HR, NYSE, 0.00, 2013-02-21\\n RGA, NYSE, 35.56, 2013-02-01\\n RIG, NYSE, 12.35, 2013-03-02\\n SKT, NYSE, 2.22, 2013-02-13\\n TWI, NYSE, -80.85, 2013-02-26\\n BDN, NYSE, 17.86, 2013-02-07\\n KGC, NYSE, -4.55, 2013-02-14\\n YPF, NYSE, 26.67, 2013-03-13\\n CPT, NYSE, 1.04, 2013-02-01\\n SGY, NYSE, 67.27, 2013-02-26\\n BFS, NYSE, -11.48, 2013-03-08\\n BWA, NYSE, 3.57, 2013-02-15\\n EQR, NYSE, 0.00, 2013-02-06\\n CLP, NYSE, -81.25, 2013-02-08\\n KOF, NYSE, -7.78, 2013-02-28\\n OKS, NYSE, 3.13, 2013-02-26\\n SQM, NYSE, -15.63, 2013-03-06\\n BYD, NYSE, -138.46, 2013-03-05\\n CBL, NYSE, 8.77, 2013-02-06\\n DECK, NYSE, 7.36, 2013-03-01\\n IT, NYSE, 6.78, 2013-02-08\\n GFI, NYSE, -36.36, 2013-02-15\\n HST, NYSE, 8.11, 2013-02-22\\n LXP, NYSE, 0.00, 2013-02-22\\n OMG, NYSE, -533.33, 2013-02-20\\n REG, NYSE, 8.62, 2013-01-31\\n TUC, NYSE, -5.56, 2013-03-08\\n AF, NYSE, 7.14, 2013-01-24\\n BFR, NYSE, 13.33, 2013-02-09\\n HHS, NYSE, 26.32, 2013-02-01\\n MHO, NYSE, -3.45, 2013-02-01\\n NFX, NYSE, -36.36, 2013-02-20\\n SPG, NYSE, 13.93, 2013-02-05\\n SU, NYSE, -14.20, 2013-02-06\\n SUI, NYSE, -2.44, 2013-02-22\\n TV, NYSE, 5.13, 2013-02-26\\n CGI, NYSE, 0.00, 2013-01-24\\n CYT, NYSE, 77.42, 2013-02-01\\n EMN, NYSE, 0.00, 2013-02-01\\n GRT, NYSE, 0.00, 2013-02-15\\n MAA, NYSE, -1.74, 2013-02-07\\n PLT, NYSE, 0.00, 2013-01-30\\n BZH, NYSE, 24.27, 2013-01-29\\n ELX, NYSE, 0.00, 2013-02-01\\n AGM, NYSE, -5.41, 2013-03-19\\n MLM, NYSE, -13.21, 2013-02-13\\n AKS, NYSE, 14.29, 2013-01-30\\n ALB, NYSE, 18.18, 2013-01-23\\n VRX, NYSE, -4.00, 2013-03-01\\n CBR, NYSE, 140.00, 2013-02-22\\n MAC, NYSE, 3.45, 2013-02-07\\n RKT, NYSE, 5.47, 2013-01-23\\n RYN, NYSE, 3.51, 2013-01-25\\n ADC, NYSE, 1.96, 2013-02-28\\nBRK.B, NYSE, 0.88, 2013-03-02\\n EXP, NYSE, 0.00, 2013-02-07\\n GGB, NYSE, -66.67, 2013-02-22\\n SSD, NYSE, -100.00, 2013-02-08\\n ESS, NYSE, 4.02, 2013-02-01\\n FR, NYSE, 0.00, 2013-02-21\\n HIW, NYSE, 0.00, 2013-02-13\\n IMAX, NYSE, 58.33, 2013-02-22\\n AIV, NYSE, 4.00, 2013-02-08\\n FCH, NYSE, 50.00, 2013-02-20\\n ITGR, NYSE, 6.00, 2013-02-26\\n GEO, NYSE, 7.32, 2013-02-22\\n CLI, NYSE, 4.76, 2013-02-08\\n DAR, NYSE, -20.00, 2013-02-28\\n RS, NYSE, 9.28, 2013-02-22\\n CPE, NYSE, -66.67, 2013-03-15\\n KNX, NYSE, 4.76, 2013-01-31\\n O, NYSE, 3.70, 2013-02-15\\n PKX, NYSE, -15.35, 2013-03-02\\n COF, NYSE, -12.35, 2013-01-18\\n CYD, NYSE, -23.14, 2013-02-28\\n IRS, NYSE, 57.50, 2013-02-20\\n MCK, NYSE, -13.50, 2013-02-01\\n SWC, NYSE, 116.67, 2013-02-28\\n STM, NYSE, -22.22, 2013-01-31\\n TEO, NYSE, 28.36, 2013-03-01\\n TRK, NYSE, 400.00, 2013-03-07\\n GFF, NYSE, 300.00, 2013-01-31\\n LMT, NYSE, -0.56, 2013-01-25\\n APU, NYSE, -13.89, 2013-02-01\\n AGU, NYSE, 6.93, 2013-02-22\\n LH, NYSE, -4.35, 2013-02-09\\n DDD, NYSE, 0.00, 2013-02-26\\n WEX, NYSE, 0.94, 2013-02-07\\n AFG, NYSE, 3.08, 2013-02-12\\n RMD, NYSE, 3.92, 2013-01-25\\n WAB, NYSE, 2.29, 2013-02-20\\n CIB, NYSE, 20.39, 2013-03-05\\n CAM, NYSE, -1.04, 2013-02-01\\n FCX, NYSE, 5.41, 2013-01-23\\n RNR, NYSE, 70.27, 2013-02-06\\n AVX, NYSE, -20.00, 2013-01-25\\n RWT, NYSE, 85.19, 2013-02-22\\n AXE, NYSE, 0.76, 2013-01-30\\n CLB, NYSE, 3.54, 2013-01-31\\n MD, NYSE, 1.54, 2013-02-01\\n THG, NYSE, 6.25, 2013-02-07\\n BAP, NYSE, 3.72, 2013-02-06\\n DO, NYSE, 28.18, 2013-02-06\\n RE, NYSE, 175.86, 2013-02-07\\n DST, NYSE, 17.82, 2013-02-01\\n EL, NYSE, 11.54, 2013-02-06\\n ESC, NYSE, -34.88, 2013-03-01\\n MIG, NYSE, -100.00, 2013-02-13\\n WAT, NYSE, 0.63, 2013-01-23\\n EME, NYSE, 11.48, 2013-02-27\\n HIG, NYSE, 80.00, 2013-02-05\\n ITT, NYSE, 2.63, 2013-02-28\\n SPN, NYSE, 4.26, 2013-02-27\\n SWM, NYSE, -9.18, 2013-02-07\\n SCCO, NYSE, 0.00, 2013-02-02\\n RCI, NYSE, 20.55, 2013-02-15\\n EIX, NYSE, 66.04, 2013-02-27\\n IRM, NYSE, -20.00, 2013-03-01\\n REV, NYSE, -19.18, 2013-02-06\\n SPH, NYSE, -17.46, 2013-02-08\\n CCJ, NYSE, 46.34, 2013-02-09\\n PGI, NYSE, -6.67, 2013-02-14\\n CRR, NYSE, 2.30, 2013-02-01\\n BVN, NYSE, -26.67, 2013-03-01\\n FCN, NYSE, 11.67, 2013-03-01\\n RPT, NYSE, 8.00, 2013-02-13\\n TUP, NYSE, 1.79, 2013-01-30\\n ASB, NYSE, 0.00, 2013-01-18\\n GWR, NYSE, -2.47, 2013-02-13\\n TBI, NYSE, 35.71, 2013-02-07\\n FFG, NYSE, 24.00, 2013-02-08\\n USNA, NYSE, 4.96, 2013-02-06\\n CSV, NYSE, 4.35, 2013-02-26\\n LVB, NYSE, 12.77, 2013-03-07\\n ALR, NYSE, 6.25, 2013-02-16\\n OCN, NYSE, -7.84, 2013-03-01\\n PAA, NYSE, 42.03, 2013-02-07\\n DNR, NYSE, 24.14, 2013-02-22\\n HMY, NYSE, 50.00, 2013-02-05\\n TGI, NYSE, 5.80, 2013-01-31\\n PAG, NYSE, 7.55, 2013-02-07\\n GEL, NYSE, -2.86, 2013-02-15\\n IM, NYSE, 23.73, 2013-02-14\\n LIN, NYSE, -21.92, 2013-03-01\\n NUS, NYSE, 2.11, 2013-02-07\\n CNI, NYSE, -0.70, 2013-01-23\\n LAD, NYSE, 10.45, 2013-02-21\\n NSP, NYSE, 4.44, 2013-02-09\\n DEL, NYSE, -29.63, 2013-02-28\\n DGX, NYSE, -3.81, 2013-01-24\\n KRC, NYSE, 3.23, 2013-01-31\\n MTH, NYSE, 50.00, 2013-02-01\\n NCR, NYSE, 4.35, 2013-02-08\\n OFG, NYSE, -50.00, 2013-02-08\\n IVZ, NYSE, -4.26, 2013-02-01\\n DX, NYSE, 9.68, 2013-02-21\\n FBC, NYSE, 38.27, 2013-02-09\\n ALV, NYSE, 9.85, 2013-02-01\\n ARE, NYSE, 0.87, 2013-02-08\\n BBT, NYSE, 2.86, 2013-01-18\\n CGG, NYSE, -59.32, 2013-03-02\\n BXP, NYSE, 2.42, 2013-01-30\\n MS, NYSE, 73.08, 2013-01-19\\n SRT, NYSE, 200.00, 2013-02-28\\n HLX, NYSE, 162.86, 2013-02-21\\n FLS, NYSE, 0.35, 2013-02-22\\n MT, NYSE, -880.00, 2013-02-07\\n PXD, NYSE, -2.35, 2013-02-14\\n SLG, NYSE, 0.87, 2013-01-31\\n NAT, NYSE, 0.00, 2013-02-12\\n CSU, NYSE, -22.22, 2013-03-07\\n DRQ, NYSE, 2.70, 2013-03-01\\n FDP, NYSE, -100.00, 2013-02-20\\n NLY, NYSE, 35.29, 2013-02-07\\n TLM, NYSE, -300.00, 2013-02-18\\n TSM, NYSE, 0.00, 2013-01-18\\n YUM, NYSE, 2.47, 2013-02-05\\n AMG, NYSE, 4.94, 2013-01-30\\n EPR, NYSE, -4.40, 2013-02-27\\n FE, NYSE, 1.27, 2013-02-26\\n LFL, NYSE, -80.00, 2013-05-01\\n MTD, NYSE, 8.44, 2013-02-07\\n SID, NYSE, 57.14, 2013-03-29\\n IN, NYSE, -18.18, 2013-03-12\\n AI, NYSE, 9.91, 2013-02-07\\n URI, NYSE, 23.30, 2013-01-24\\n INGR, NYSE, 4.26, 2013-02-08\\n RAS, NYSE, 153.85, 2013-02-14\\n UNS, NYSE, 12.50, 2013-02-27\\n ASI, NYSE, -17.95, 2013-03-07\\n ANH, NYSE, 7.14, 2013-02-08\\n OFC, NYSE, 4.08, 2013-02-09\\n GPX, NYSE, 6.67, 2013-02-27\\n WAC, NYSE, 11.32, 2013-03-19\\n RBA, NYSE, -12.50, 2013-02-27\\n WDR, NYSE, 5.17, 2013-01-30\\n LHO, NYSE, 4.44, 2013-02-21\\n LNT, NYSE, -1.72, 2013-02-15\\n LVLT, NYSE, 11.11, 2013-02-13\\n MFA, NYSE, 0.00, 2013-03-07\\n OME, NYSE, 33.33, 2013-03-06\\n EQY, NYSE, 7.14, 2013-02-21\\n FII, NYSE, 10.00, 2013-01-25\\n FMX, NYSE, 39.60, 2013-02-28\\n LLL, NYSE, 6.13, 2013-01-31\\n VTR, NYSE, 2.06, 2013-02-16\\n WCN, NYSE, -7.69, 2013-02-15\\n AVB, NYSE, -0.71, 2013-01-31\\n GIL, NYSE, 6.67, 2013-02-07\\n HZO, NYSE, 10.00, 2013-01-30\\n AWR, NYSE, 43.24, 2013-03-01\\n CLS, NYSE, 46.67, 2013-01-23\\n EPD, NYSE, 7.58, 2013-02-01\\n RSG, NYSE, -13.95, 2013-02-08\\n WM, NYSE, -5.00, 2013-02-15\\n AKR, NYSE, 3.57, 2013-02-06\\n CVG, NYSE, 4.17, 2013-02-08\\n RRC, NYSE, 228.57, 2013-02-27\\n SAP, NYSE, -2.38, 2013-01-24\\n CCI, NYSE, 57.14, 2013-01-24\\n PQ, NYSE, -20.00, 2013-03-01\\n WFT, NYSE, -94.44, 2013-02-27\\n CAA, NYSE, 14.29, 2013-02-01\\n ENB, NYSE, -6.67, 2013-02-16\\n GMK, NYSE, -8.33, 2013-02-28\\n MMR, NYSE, 75.00, 2013-01-19\\n PB, NYSE, 1.19, 2013-01-26\\n VIV, NYSE, -7.25, 2013-02-26\\n AXL, NYSE, -111.76, 2013-02-09\\n BP, NYSE, 19.05, 2013-02-06\\n ETM, NYSE, 13.04, 2013-02-09\\n HT, NYSE, 10.00, 2013-02-21\\n BYI, NYSE, 5.26, 2013-02-01\\n CEB, NYSE, 4.84, 2013-02-07\\n INFY, NYSE, 5.56, 2013-01-12\\n JLL, NYSE, -0.38, 2013-01-30\\n AZN, NYSE, 24.64, 2013-02-01\\n SFG, NYSE, 7.23, 2013-01-30\\n TREX, NYSE, 27.78, 2013-02-20\\n GS, NYSE, 61.38, 2013-01-17\\n SYX, NYSE, -144.44, 2013-03-06\\n WCC, NYSE, -2.75, 2013-02-01\\n JNPR, NYSE, 26.67, 2013-01-25\\n RDN, NYSE, -146.43, 2013-02-12\\n RAI, NYSE, 4.11, 2013-02-13\\n SKX, NYSE, 172.73, 2013-02-14\\n WTM, NYSE, 724.10, 2013-02-06\\n NCI, NYSE, 29.17, 2013-02-15\\n BLT, NYSE, -21.74, 2013-03-08\\n BLK, NYSE, 5.88, 2013-01-18\\n CIR, NYSE, 25.45, 2013-03-01\\n PKG, NYSE, -1.61, 2013-01-23\\n PKI, NYSE, 0.00, 2013-02-01\\n UGP, NYSE, 38.10, 2013-02-21\\n WWE, NYSE, 0.00, 2013-03-01\\n SNN, NYSE, 2.86, 2013-02-08\\n UPS, NYSE, -4.35, 2013-02-01\\n XOXO, NYSE, 62.50, 2013-03-07\\n SLF, NYSE, 36.36, 2013-02-14\\n CDR, NYSE, 33.33, 2013-03-08\\n RLH, NYSE, -21.43, 2013-03-01\\n EW, NYSE, 16.88, 2013-02-05\\n MET, NYSE, 5.93, 2013-02-13\\n FBR, NYSE, -28.57, 2013-01-31\\n VVC, NYSE, 23.81, 2013-02-15\\n BAM, NYSE, 148.28, 2013-02-16\\n NVS, NYSE, 0.00, 2013-01-24\\n VGR, NYSE, -43.75, 2013-02-27\\n BHLB, NYSE, 0.00, 2013-01-29\\n CRL, NYSE, 6.67, 2013-02-14\\n CYH, NYSE, 0.00, 2013-02-22\\n MBT, NYSE, 65.71, 2013-03-20\\n MTOR, NYSE, -375.00, 2013-01-31\\n CNQ, NYSE, -29.55, 2013-03-08\\n ERJ, NYSE, -25.27, 2013-03-13\\n VZ, NYSE, -28.30, 2013-01-23\\n EVC, NYSE, 12.50, 2013-02-28\\n PBR, NYSE, 0.00, 2013-02-05\\n XEL, NYSE, 3.57, 2013-02-01\\n ALE, NYSE, 0.00, 2013-02-16\\n HW, NYSE, -20.00, 2013-01-30\\n POL, NYSE, 0.00, 2013-01-30\\n UMC, NYSE, 0.00, 2013-02-07\\n ASX, NYSE, 41.43, 2013-01-31\\n COH, NYSE, -4.65, 2013-01-23\\n CXW, NYSE, 7.32, 2013-02-14\\n DVA, NYSE, 6.33, 2013-02-15\\n EXC, NYSE, -1.54, 2013-02-08\\n MCO, NYSE, 7.14, 2013-02-09\\n BRFS, NYSE, 43.48, 2013-03-06\\n TU, NYSE, -1.15, 2013-02-16\\n WIT, NYSE, 0.00, 2013-01-18\\n ERF, NYSE, 462.50, 2013-02-22\\n GG, NYSE, -22.22, 2013-02-15\\n HNT, NYSE, -2.70, 2013-01-31\\n NXY, NYSE, -23.44, 2013-02-26\\n NYCB, NYSE, -3.45, 2013-01-31\\n SXT, NYSE, -8.33, 2013-02-08\\n CPG, NYSE, -191.67, 2013-03-15\\n AMX, NYSE, -40.00, 2013-02-13\\n MPX, NYSE, -50.00, 2013-01-24\\n OIS, NYSE, -5.82, 2013-02-20\\n BH, NYSE, -35.35, 2013-01-26\\n MMP, NYSE, 6.15, 2013-02-06\\n PES, NYSE, 250.00, 2013-02-14\\n ABB, NYSE, -18.75, 2013-02-15\\n RDY, NYSE, -27.27, 2013-02-15\\n KMR, NYSE, -19.23, 2013-02-22\\n GEN, NYSE, -20.00, 2013-02-12\\n ADS, NYSE, 2.38, 2013-02-01\\n CVI, NYSE, 5.15, 2013-03-13\\n FTI, NYSE, 0.00, 2013-02-13\\n PRA, NYSE, 10.64, 2013-02-20\\n STO, NYSE, 26.47, 2013-02-08\\n BEL, NYSE, -266.67, 2013-02-21\\n FIS, NYSE, -8.82, 2013-02-13\\n COL, NYSE, 4.44, 2013-01-19\\n KAI, NYSE, 7.32, 2013-02-27\\n FRM, NYSE, 233.33, 2013-03-09\\n ABC, NYSE, 0.00, 2013-01-25\\n BG, NYSE, -76.15, 2013-02-08\\n FRO, NYSE, 106.52, 2013-02-22\\n ECA, NYSE, -3.12, 2013-02-15\\n CS, NYSE, -54.76, 2013-02-08\\n EEP, NYSE, -30.77, 2013-02-14\\n CVX, NYSE, -1.65, 2013-02-02\\n DB, NYSE, 280.49, 2013-02-01\\n GXP, NYSE, 200.00, 2013-03-01\\n JHX, NYSE, 371.43, 2013-02-28\\n PFG, NYSE, 10.81, 2013-02-01\\n PVR, NYSE, -227.78, 2013-02-21\\n AAP, NYSE, 17.33, 2013-02-08\\n KND, NYSE, 4.55, 2013-02-26\\n WTW, NYSE, 9.09, 2013-02-14\\n CNC, NYSE, 42.42, 2013-02-06\\n PRU, NYSE, -2.87, 2013-02-07\\n BCH, NYSE, 12.94, 2013-02-06\\n NS, NYSE, -19.35, 2013-02-02\\n ITUB, NYSE, -5.00, 2013-02-05\\n SXL, NYSE, 20.88, 2013-02-21\\n VALE, NYSE, -26.00, 2013-02-28\\n TNP, NYSE, -128.57, 2013-04-20\\n LCI, NYSE, 233.33, 2013-02-08\\n AUO, NYSE, -122.73, 2013-02-07\\n GTI, NYSE, 19.05, 2013-02-27\\n HNR, NYSE, -127.27, 2013-05-04\\n MWE, NYSE, -38.89, 2013-02-28\\n NLS, NYSE, 4.55, 2013-03-05\\n RGC, NYSE, 40.00, 2013-02-08\\n SBS, NYSE, 48.25, 2013-03-22\\n JAH, NYSE, 2.40, 2013-02-15\\n NPO, NYSE, 110.71, 2013-02-08\\n TRI, NYSE, 9.09, 2013-02-14\\n CAE, NYSE, 12.50, 2013-02-14\\n LF, NYSE, 971.43, 2013-02-07\\n SNY, NYSE, 1.30, 2013-02-08\\n WHG, NYSE, 15.91, 2013-02-08\\n BANC, NYSE, -300.00, 2013-03-02\\n GTN, NYSE, 4.35, 2013-02-21\\n BAK, NYSE, -150.00, 2013-02-08\\n COP, NYSE, 1.42, 2013-01-31\\n CNP, NYSE, 40.00, 2013-02-28\\n EEQ, NYSE, -18.18, 2013-02-15\\n MRH, NYSE, 60.26, 2013-02-08\\n NGS, NYSE, 26.09, 2013-03-15\\n NRP, NYSE, 34.88, 2013-02-14\\n PXP, NYSE, -22.64, 2013-02-22\\n XEC, NYSE, 9.26, 2013-02-20\\n IAG, NYSE, -11.11, 2013-02-21\\n TS, NYSE, -16.44, 2013-02-22\\n EGO, NYSE, 6.67, 2013-02-23\\n JNS, NYSE, 35.71, 2013-01-25\\n PFS, NYSE, 7.41, 2013-02-02\\n ENH, NYSE, 21.68, 2013-02-08\\n IHG, NYSE, 5.56, 2013-02-20\\n CNX, NYSE, 95.45, 2013-02-01\\n AMT, NYSE, -17.07, 2013-02-27\\n ABG, NYSE, 10.77, 2013-02-20\\n LII, NYSE, 0.00, 2013-02-06\\n SRE, NYSE, 11.34, 2013-02-27\\n AEE, NYSE, -36.36, 2013-02-21\\n PLD, NYSE, 0.00, 2013-02-07\\n SAH, NYSE, 4.00, 2013-02-21\\n GPI, NYSE, -17.50, 2013-02-20\\n FIX, NYSE, -11.11, 2013-03-01\\n MMS, NYSE, 12.50, 2013-02-08\\n SRI, NYSE, -28.57, 2013-03-02\\n RTEC, NYSE, 6.25, 2013-02-05\\n NOV, NYSE, 3.47, 2013-02-02\\n DF, NYSE, 33.33, 2013-02-14\\n SAM, NYSE, 1.63, 2013-02-21\\n RL, NYSE, 8.60, 2013-02-07\\n FLR, NYSE, 132.35, 2013-02-21\\n ALL, NYSE, 942.86, 2013-02-07\\n ATI, NYSE, 5.88, 2013-01-24\\n EE, NYSE, -14.29, 2013-02-20\\n AIT, NYSE, 0.00, 2013-02-01\\n CHH, NYSE, 9.76, 2013-02-12\\n FMS, NYSE, 105.77, 2013-02-27\\n BCO, NYSE, -7.69, 2013-02-02\\n CBB, NYSE, -125.00, 2013-02-28\\n MWW, NYSE, 0.00, 2013-02-08\\n PSA, NYSE, 5.68, 2013-02-22\\n E, NYSE, 2.83, 2013-02-16\\n JPM, NYSE, 15.83, 2013-01-17\\n USB, NYSE, 1.35, 2013-01-17\\n HON, NYSE, 0.92, 2013-01-26\\n ITG, NYSE, 100.00, 2013-02-01\\n ARB, NYSE, 6.25, 2013-02-26\\n APL, NYSE, 0.00, 2013-02-19\\n AVA, NYSE, -42.22, 2013-02-21\\n AXS, NYSE, 64.96, 2013-02-05\\n CHT, NYSE, 5.26, 2013-01-31\\n MOH, NYSE, 145.45, 2013-02-08\\n CVD, NYSE, 2.82, 2013-01-25\\n AHT, NYSE, 2.63, 2013-02-28\\n GPK, NYSE, 12.50, 2013-02-08\\n CNO, NYSE, 8.70, 2013-02-12\\n AUQ, NYSE, -28.57, 2013-03-26\\n JRN, NYSE, 34.62, 2013-03-08\\nGRP.U, NYSE, -14.92, 2013-03-06\\n NFP, NYSE, 11.43, 2013-02-15\\n CRI, NYSE, 2.30, 2013-02-28\\n FMD, NYSE, -20.00, 2013-02-08\\n FPO, NYSE, 10.34, 2013-02-22\\n TRQ, NYSE, -350.00, 2013-03-26\\n WLL, NYSE, 9.21, 2013-02-28\\n AEL, NYSE, 14.63, 2013-02-21\\n AHL, NYSE, 87.60, 2013-02-08\\n AUY, NYSE, -3.70, 2013-02-21\\n CMP, NYSE, 0.00, 2013-02-07\\n KRO, NYSE, -400.00, 2013-03-13\\n TPX, NYSE, 9.09, 2013-01-25\\n UTI, NYSE, 75.00, 2013-02-01\\n PJC, NYSE, 31.34, 2013-01-31\\n TRW, NYSE, 14.81, 2013-02-16\\n AIZ, NYSE, 122.58, 2013-02-07\\n HTH, NYSE, 62.50, 2013-03-16\\n ETP, NYSE, 0.00, 2013-02-21\\n SMI, NYSE, 500.00, 2013-02-07\\n LSE, NYSE, -6.25, 2013-02-16\\n BBD, NYSE, -2.63, 2013-01-29\\n NRG, NYSE, 124.14, 2013-02-28\\n HOS, NYSE, 29.17, 2013-02-07\\n ABR, NYSE, 160.00, 2013-02-16\\n FHN, NYSE, 0.00, 2013-01-19\\n AGO, NYSE, 32.39, 2013-02-28\\n HSP, NYSE, 1.85, 2013-02-14\\n HNI, NYSE, -6.98, 2013-02-06\\n GHL, NYSE, -32.43, 2013-01-24\\n XPO, NYSE, -14.00, 2013-02-28\\n CVO, NYSE, 23.08, 2013-02-28\\n CHE, NYSE, 16.92, 2013-02-19\\n GNW, NYSE, 30.77, 2013-02-06\\n CBG, NYSE, 12.24, 2013-02-07\\n SFL, NYSE, -26.67, 2013-02-26\\n NEU, NYSE, -15.57, 2013-01-29\\n GOL, NYSE, -109.09, 2013-03-26\\n CAB, NYSE, 4.17, 2013-02-15\\n LTM, NYSE, 1.82, 2013-02-22\\n VVI, NYSE, 10.53, 2013-02-02\\n WCG, NYSE, 0.00, 2013-02-14\\n HEP, NYSE, -2.63, 2013-02-22\\n DPZ, NYSE, 8.47, 2013-03-01\\n BDC, NYSE, 9.86, 2013-02-08\\n EGY, NYSE, -171.43, 2013-03-15\\n LPL, NYSE, 2.63, 2013-02-22\\n ENS, NYSE, 12.82, 2013-02-07\\n BMR, NYSE, 5.88, 2013-02-06\\n ACC, NYSE, 9.26, 2013-02-13\\n KRG, NYSE, -9.09, 2013-02-08\\n WLK, NYSE, 13.60, 2013-02-20\\n EXR, NYSE, 4.65, 2013-02-22\\n CNS, NYSE, 16.67, 2013-01-24\\n IOC, NYSE, 264.29, 2013-02-28\\n STON, NYSE, -233.33, 2013-03-16\\n CPL, NYSE, 38.10, 2013-03-13\\n TPGI, NYSE, -114.29, 2013-02-14\\n SHO, NYSE, -3.33, 2013-02-20\\n CUBE, NYSE, 5.00, 2013-02-22\\n NRF, NYSE, 170.37, 2013-02-15\\n BBW, NYSE, -68.29, 2013-02-15\\n DLR, NYSE, 4.31, 2013-02-16\\n NWE, NYSE, 2.63, 2013-02-15\\n ORA, NYSE, 200.00, 2013-02-28\\n NP, NYSE, 5.26, 2013-02-21\\n SMA, NYSE, -21.05, 2013-02-22\\n BBG, NYSE, 25.00, 2013-02-22\\n BXC, NYSE, -163.16, 2013-02-14\\n KNL, NYSE, 32.14, 2013-02-06\\n LVS, NYSE, -8.47, 2013-01-31\\n HLF, NYSE, 0.96, 2013-02-20\\n MIC, NYSE, -20.41, 2013-02-21\\n PHH, NYSE, -11.54, 2013-02-07\\n CE, NYSE, 6.35, 2013-01-29\\n EDR, NYSE, 0.00, 2013-02-20\\n WTI, NYSE, 8.33, 2013-02-27\\n ARC, NYSE, -100.00, 2013-03-01\\n PBH, NYSE, 8.82, 2013-02-08\\n HUN, NYSE, 0.00, 2013-02-13\\n DLB, NYSE, 4.44, 2013-01-30\\n DSX, NYSE, -33.33, 2013-03-15\\n LAZ, NYSE, 84.85, 2013-02-08\\n TGP, NYSE, 1.82, 2013-02-22\\n TLP, NYSE, -43.48, 2013-03-13\\n DRH, NYSE, 16.00, 2013-03-01\\n HTGC, NYSE, 8.70, 2013-03-01\\n KFN, NYSE, 5.26, 2013-02-06\\n THS, NYSE, 0.00, 2013-02-22\\n NSR, NYSE, -12.50, 2013-02-06\\n WAL, NYSE, 0.00, 2013-01-25\\n SLW, NYSE, 2.04, 2013-03-22\\n MPW, NYSE, 0.00, 2013-02-08\\nRDS.B, NYSE, 16.00, 2013-02-01\\n GNK, NYSE, -24.71, 2013-02-21\\n MFB, NYSE, 4.76, 2013-03-07\\nRDS.A, NYSE, 9.95, 2013-02-01\\n ITC, NYSE, 0.93, 2013-02-28\\n FTK, NYSE, -158.82, 2013-03-14\\n PIKE, NYSE, 168.00, 2013-02-06\\n ALJ, NYSE, 0.00, 2013-03-07\\n DRC, NYSE, -4.55, 2013-03-01\\n STN, NYSE, 8.06, 2013-02-22\\n SSW, NYSE, -6.90, 2013-03-06\\n CF, NYSE, 3.41, 2013-02-20\\n HPY, NYSE, 0.00, 2013-02-08\\n ACCO, NYSE, 0.00, 2013-02-14\\n ROC, NYSE, -6.25, 2013-02-20\\n WPZ, NYSE, -28.57, 2013-02-20\\n LCC, NYSE, 44.44, 2013-01-24\\n GLP, NYSE, 58.82, 2013-03-15\\n AMP, NYSE, 15.54, 2013-01-31\\n DHT, NYSE, 108.33, 2013-01-30\\n FNF, NYSE, 17.86, 2013-02-20\\n NM, NYSE, 20.00, 2013-02-20\\n CCO, NYSE, 25.00, 2013-02-20\\n BWP, NYSE, 0.00, 2013-02-12\\n ICE, NYSE, 5.14, 2013-02-07\\n BKD, NYSE, -57.14, 2013-02-12\\n AAV, NYSE, 350.00, 2013-03-28\\n BAS, NYSE, -42.11, 2013-02-20\\n CPA, NYSE, -9.87, 2013-02-07\\n LYV, NYSE, -147.06, 2013-02-27\\n WNR, NYSE, 5.84, 2013-03-01\\n CMG, NYSE, 0.00, 2013-02-06\\n RGP, NYSE, -180.00, 2013-02-21\\n KOP, NYSE, 11.86, 2013-02-15\\n UAL, NYSE, -7.41, 2013-01-25\\n ETE, NYSE, -90.91, 2013-02-21\\n RSO, NYSE, -17.65, 2013-03-05\\n XCO, NYSE, 6.25, 2013-02-21\\n PAC, NYSE, 41.18, 2013-02-28\\n NYX, NYSE, 10.26, 2013-02-06\\n TDG, NYSE, 51.65, 2013-02-05\\n BMA, NYSE, 18.40, 2013-02-15\\n THI, NYSE, -2.82, 2013-02-22\\n BTE, NYSE, -40.48, 2013-03-08\\n CNH, NYSE, 29.58, 2013-02-01\\n GLA, NYSE, 67.44, 2013-02-14\\n POR, NYSE, -9.52, 2013-02-23\\n HIL, NYSE, -100.00, 2013-03-12\\n HVB, NYSE, -20.00, 2013-02-01\\n KS, NYSE, 0.00, 2013-02-14\\n HK, NYSE, 0.00, 2013-03-01\\n DCP, NYSE, 59.62, 2013-02-28\\n DK, NYSE, 10.10, 2013-03-08\\n CODI, NYSE, 14.81, 2013-03-07\\n VG, NYSE, 25.00, 2013-02-14\\n MA, NYSE, 1.46, 2013-02-01\\n MWA, NYSE, -200.00, 2013-02-06\\n KOG, NYSE, 14.29, 2013-03-01\\n PWE, NYSE, -500.00, 2013-02-15\\n PGTI, NYSE, 100.00, 2013-02-21\\n AWH, NYSE, 16.23, 2013-02-14\\n NSH, NYSE, -65.71, 2013-02-02\\n WYN, NYSE, 5.00, 2013-02-07\\n WNS, NYSE, 0.00, 2013-01-17\\n AYR, NYSE, 36.84, 2013-02-22\\n EVR, NYSE, 55.77, 2013-01-31\\n HBI, NYSE, 7.00, 2013-02-06\\n WU, NYSE, 20.00, 2013-02-13\\n OC, NYSE, -31.25, 2013-02-21\\n MR, NYSE, 2.08, 2013-02-26\\n DAC, NYSE, -21.43, 2013-02-12\\n AWI, NYSE, 3.03, 2013-02-20\\n SUSS, NYSE, 444.44, 2013-02-28\\n DEI, NYSE, 0.00, 2013-02-13\\n OB, NYSE, -200.00, 2013-02-06\\n SBH, NYSE, -5.88, 2013-02-08\\n EBS, NYSE, -4.35, 2013-03-08\\n KBR, NYSE, 122.22, 2013-02-21\\n AER, NYSE, 30.95, 2013-02-21\\n NOA, NYSE, -11.11, 2013-02-06\\n SPR, NYSE, -2.27, 2013-02-13\\n ANW, NYSE, 0.00, 2013-02-28\\n DCT, NYSE, 10.00, 2013-02-08\\n SE, NYSE, -3.03, 2013-02-06\\n TOO, NYSE, 16.67, 2013-02-22\\n TSL, NYSE, -39.77, 2013-02-27\\n TWC, NYSE, 1.95, 2013-02-01\\n MVO, NYSE, -5.06, 2013-03-15\\n CO, NYSE, 40.00, 2013-02-27\\n EXK, NYSE, -45.83, 2013-03-13\\n EIG, NYSE, -25.00, 2013-02-28\\n HF, NYSE, 21.62, 2013-03-07\\n CEL, NYSE, 34.78, 2013-03-05\\n FIG, NYSE, 53.85, 2013-02-28\\n NGLS, NYSE, 0.00, 2013-02-15\\n TCAP, NYSE, 3.64, 2013-03-07\\n GFA, NYSE, -483.33, 2013-03-12\\n BR, NYSE, -5.56, 2013-02-08\\n SCR, NYSE, 85.71, 2013-03-08\\n CNK, NYSE, -12.82, 2013-02-21\\n DAL, NYSE, 0.00, 2013-01-23\\n ORN, NYSE, 250.00, 2013-03-01\\n ACM, NYSE, 9.09, 2013-02-06\\n JMP, NYSE, 62.50, 2013-02-14\\n SLH, NYSE, 1.69, 2013-02-08\\n CLR, NYSE, 16.85, 2013-02-28\\n BGS, NYSE, -17.95, 2013-02-15\\n STAR, NYSE, 12.50, 2013-02-27\\n YGE, NYSE, -74.07, 2013-03-05\\n DFS, NYSE, -9.40, 2013-03-06\\n TEL, NYSE, 1.56, 2013-01-24\\n BX, NYSE, 25.53, 2013-02-01\\n SEP, NYSE, 8.11, 2013-02-06\\n BZ, NYSE, -30.00, 2013-02-27\\n PPO, NYSE, -28.26, 2013-02-21\\n PRO, NYSE, 25.00, 2013-02-13\\n WBC, NYSE, 13.68, 2013-02-16\\n DHX, NYSE, 7.14, 2013-01-31\\n PMC, NYSE, 13.79, 2013-02-08\\n HGG, NYSE, 0.00, 2013-02-01\\n OWW, NYSE, -14.29, 2013-02-15\\n VR, NYSE, 35.58, 2013-02-01\\n CXO, NYSE, -5.88, 2013-02-21\\n G, NYSE, 4.76, 2013-02-08\\n EJ, NYSE, 160.00, 2013-03-13\\n WX, NYSE, 32.00, 2013-03-08\\n CMLP, NYSE, -50.00, 2013-02-06\\n VMW, NYSE, -5.56, 2013-01-29\\n CZZ, NYSE, 63.64, 2013-02-08\\n CGA, NYSE, -3.23, 2013-02-09\\n TDC, NYSE, 5.71, 2013-02-08\\n FLY, NYSE, 137.65, 2013-03-08\\n DUF, NYSE, 6.25, 2013-02-26\\n MAIN, NYSE, 12.00, 2013-03-08\\n REN, NYSE, -50.00, 2013-03-08\\n TGH, NYSE, 9.57, 2013-02-13\\n DFT, NYSE, -5.00, 2013-02-07\\n RF, NYSE, 10.00, 2013-01-23\\n PZN, NYSE, -22.22, 2013-02-13\\n LL, NYSE, 19.05, 2013-02-21\\n NMM, NYSE, 0.00, 2013-01-25\\n OZM, NYSE, 5.48, 2013-02-08\\n ES, NYSE, -5.08, 2013-02-20\\n MSCI, NYSE, -1.89, 2013-02-08\\n ARR, NYSE, -18.52, 2013-02-23\\n KW, NYSE, 275.00, 2013-03-13\\n GTS, NYSE, -10.17, 2013-02-07\\n FOR, NYSE, 222.22, 2013-02-14\\n LRN, NYSE, 4.35, 2013-02-06\\n TNK, NYSE, -125.00, 2013-02-22\\n N, NYSE, 21.43, 2013-02-01\\n DAN, NYSE, 5.56, 2013-02-22\\n BIP, NYSE, 12.07, 2013-02-09\\n CPN, NYSE, -500.00, 2013-02-14\\n SOL, NYSE, 2.70, 2013-03-15\\n PM, NYSE, 1.64, 2013-02-08\\n HI, NYSE, 7.89, 2013-02-05\\n V, NYSE, 2.25, 2013-02-07\\n IPI, NYSE, 0.00, 2013-02-14\\n AWK, NYSE, -14.29, 2013-02-27\\n HTS, NYSE, 37.84, 2013-02-13\\n DPS, NYSE, -4.71, 2013-02-14\\n CFX, NYSE, 7.69, 2013-02-07\\n WES, NYSE, -27.91, 2013-02-28\\n SB, NYSE, -10.00, 2013-02-21\\n LO, NYSE, 3.95, 2013-02-14\\n LPS, NYSE, 10.45, 2013-02-08\\n FF, NYSE, -31.82, 2013-03-19\\n NNA, NYSE, 150.00, 2013-02-13\\n EPB, NYSE, 14.55, 2013-01-17\\n JBT, NYSE, 3.23, 2013-03-07\\n DL, NYSE, 33.33, 2013-02-27\\n RAX, NYSE, -4.55, 2013-02-13\\n HCI, NYSE, 67.61, 2013-03-06\\n EC, NYSE, -20.47, 2013-02-16\\n CLW, NYSE, 10.53, 2013-02-21\\n MJN, NYSE, 5.88, 2013-02-01\\n EPC, NYSE, 1.85, 2013-02-01\\n BPI, NYSE, -3.33, 2013-03-13\\n RST, NYSE, 55.56, 2013-03-01\\n DGI, NYSE, 92.31, 2013-02-27\\n SWI, NYSE, 10.34, 2013-02-05\\n CYS, NYSE, -46.15, 2013-02-07\\n IVR, NYSE, 20.31, 2013-02-06\\n BUD, NYSE, -5.08, 2013-02-28\\n PMT, NYSE, -2.35, 2013-02-08\\n STWD, NYSE, 15.38, 2013-02-28\\n CFN, NYSE, -16.98, 2013-02-09\\n SPB, NYSE, 71.43, 2013-02-07\\n ARI, NYSE, -10.34, 2013-02-28\\n CLNY, NYSE, -13.89, 2013-03-07\\n ART, NYSE, 300.00, 2013-02-15\\n SEM, NYSE, 12.00, 2013-02-22\\n BSBR, NYSE, 578.57, 2013-03-28\\n DOLE, NYSE, -6100.00, 2013-03-13\\n VSI, NYSE, 0.00, 2013-02-27\\n TWO, NYSE, -15.15, 2013-02-07\\n CVE, NYSE, -14.29, 2013-02-15\\n H, NYSE, 81.82, 2013-02-14\\n LEA, NYSE, 7.25, 2013-02-02\\n CLD, NYSE, 8.00, 2013-02-14\\n AOL, NYSE, 7.50, 2013-02-09\\n CHSP, NYSE, 5.13, 2013-02-22\\n PEB, NYSE, 0.00, 2013-02-22\\n CIT, NYSE, 60.94, 2013-01-30\\n KAR, NYSE, -4.55, 2013-02-21\\n CIE, NYSE, -66.67, 2013-02-27\\n TMH, NYSE, 8.33, 2013-02-06\\n KRA, NYSE, -300.00, 2013-02-28\\n SYA, NYSE, -29.41, 2013-02-05\\n TRNO, NYSE, -162.50, 2013-02-16\\n PDM, NYSE, -2.70, 2013-02-08\\n GNRC, NYSE, 26.09, 2013-02-15\\n ACW, NYSE, -2.17, 2013-03-07\\n BALT, NYSE, -11.76, 2013-02-21\\n ST, NYSE, 2.17, 2013-01-31\\n SEMG, NYSE, 55.56, 2013-03-01\\n CALX, NYSE, 20.00, 2013-02-06\\n MXL, NYSE, -57.14, 2013-02-06\\n STNG, NYSE, -60.00, 2013-02-26\\n PRI, NYSE, -1.43, 2013-02-08\\n SDRL, NYSE, -93.65, 2013-03-01\\n CLDT, NYSE, 0.00, 2013-02-20\\n EXL, NYSE, 0.00, 2013-02-28\\n LYB, NYSE, -0.88, 2013-02-02\\n PNG, NYSE, 7.14, 2013-02-07\\n PLOW, NYSE, -25.00, 2013-03-12\\n SIX, NYSE, 198.00, 2013-02-21\\n NKA, NYSE, 1066.67, 2013-02-01\\n RRTS, NYSE, 0.00, 2013-02-07\\n JKS, NYSE, -332.48, 2013-04-11\\n CODE, NYSE, -13.64, 2013-01-30\\n FAF, NYSE, 44.64, 2013-02-22\\n QEP, NYSE, 3.13, 2013-02-20\\n OAS, NYSE, 6.52, 2013-02-26\\n VPG, NYSE, 15.38, 2013-02-13\\n HPP, NYSE, 9.52, 2013-03-07\\n FN, NYSE, 9.09, 2013-02-05\\n ECT, NYSE, 65.85, 2013-03-16\\n QUAD, NYSE, -6.67, 2013-03-05\\n KKR, NYSE, 54.84, 2013-02-08\\n RLD, NYSE, 20.00, 2013-02-07\\n AMRC, NYSE, 44.44, 2013-03-19\\n GDOT, NYSE, 50.00, 2013-02-01\\n AT, NYSE, -160.00, 2013-03-01\\n ENV, NYSE, 0.00, 2013-02-15\\n IL, NYSE, 200.00, 2013-02-22\\n WSR, NYSE, -12.00, 2013-03-13\\n SFUN, NYSE, 35.71, 2013-02-09\\n COR, NYSE, 5.00, 2013-02-23\\n VC, NYSE, 20.62, 2013-03-01\\n CCSC, NYSE, -20.00, 2013-03-07\\n CCG, NYSE, 0.00, 2013-02-27\\n EFC, NYSE, -72.73, 2013-02-14\\n TOWR, NYSE, 183.33, 2013-02-16\\n CHMT, NYSE, -53.13, 2013-02-26\\n HBM, NYSE, 200.00, 2013-02-21\\n EXAM, NYSE, 55.56, 2013-02-28\\n XUE, NYSE, 7.69, 2013-02-28\\n CMRE, NYSE, 6.67, 2013-01-24\\n NOAH, NYSE, 20.00, 2013-02-26\\n IPHI, NYSE, -40.00, 2013-02-05\\n BITA, NYSE, 33.33, 2013-03-08\\n BAH, NYSE, 11.11, 2013-01-31\\n GM, NYSE, -2.04, 2013-02-15\\n TROX, NYSE, -60.00, 2013-02-21\\n DANG, NYSE, 20.00, 2013-03-08\\n YOKU, NYSE, 9.09, 2013-03-01\\n FRC, NYSE, -16.44, 2013-01-17\\n RFP, NYSE, 52.38, 2013-02-13\\n ISS, NYSE, 15.38, 2013-03-09\\n WD, NYSE, -14.29, 2013-03-07\\n FLT, NYSE, 10.00, 2013-02-08\\n GCAP, NYSE, -325.00, 2013-03-13\\n FRF, NYSE, -25.93, 2013-03-29\\n SWFT, NYSE, 46.15, 2013-01-24\\n AG, NYSE, -10.34, 2013-02-27\\n QRE, NYSE, -174.07, 2013-03-07\\n AAT, NYSE, 11.76, 2013-02-20\\n MCC, NYSE, 5.41, 2013-02-07\\n NLSN, NYSE, 3.51, 2013-02-12\\n AGRO, NYSE, -71.43, 2013-03-22\\n BKU, NYSE, 27.08, 2013-01-30\\n INXN, NYSE, -38.89, 2013-02-28\\n NPTN, NYSE, 16.67, 2013-02-22\\n INN, NYSE, 25.00, 2013-02-27\\n KMI, NYSE, -5.88, 2013-01-17\\n HCA, NYSE, 9.64, 2013-02-05\\n MX, NYSE, 135.21, 2013-01-31\\n HII, NYSE, 8.89, 2013-02-28\\n QIHU, NYSE, 175.00, 2013-03-06\\n APO, NYSE, 119.48, 2013-02-09\\n GNC, NYSE, 8.70, 2013-02-15\\n SDT, NYSE, 11.48, 2013-03-16\\n UAN, NYSE, 16.67, 2013-02-28\\n ARCO, NYSE, 5.00, 2013-03-09\\n ELLI, NYSE, 36.36, 2013-02-15\\n TMS, NYSE, -23.81, 2013-02-15\\n SQNS, NYSE, -16.00, 2013-02-08\\n STAG, NYSE, 17.24, 2013-02-21\\n AL, NYSE, 8.33, 2013-03-01\\n TLLP, NYSE, 10.42, 2013-02-12\\n RENN, NYSE, 14.29, 2013-03-12\\n NQ, NYSE, 800.00, 2013-03-07\\n THR, NYSE, -14.29, 2013-02-08\\n KOS, NYSE, 125.00, 2013-02-26\\n RLJ, NYSE, 4.35, 2013-02-28\\n NGL, NYSE, -7.41, 2013-02-16\\n FENG, NYSE, 100.00, 2013-03-07\\n LNKD, NYSE, 900.00, 2013-02-08\\n NMFC, NYSE, 5.88, 2013-03-07\\n ACTV, NYSE, 5.26, 2013-02-15\\n TAOM, NYSE, 700.00, 2013-03-15\\n RATE, NYSE, -60.00, 2013-02-13\\n VHS, NYSE, -22.22, 2013-01-31\\n MPC, NYSE, 8.13, 2013-01-31\\n MITT, NYSE, -1.16, 2013-03-06\\n OILT, NYSE, 0.00, 2013-03-07\\n SXC, NYSE, 14.71, 2013-02-06\\n AMTG, NYSE, -8.57, 2013-03-07\\n AMID, NYSE, -2500.00, 2013-04-17\\n WAIR, NYSE, -7.41, 2013-01-30\\n PER, NYSE, -7.58, 2013-03-02\\n PPP, NYSE, -44.44, 2013-02-22\\n FNV, NYSE, -8.33, 2013-03-20\\n FSM, NYSE, 16.67, 2013-03-21\\n FBHS, NYSE, 4.55, 2013-02-01\\n XLS, NYSE, 4.44, 2013-03-02\\n XYL, NYSE, 2.17, 2013-02-08\\n NDRO, NYSE, 4.76, 2013-03-19\\n RNF, NYSE, -33.33, 2013-03-20\\n VAC, NYSE, 25.53, 2013-02-22\\n CHKR, NYSE, -7.25, 2013-03-16\\n PACD, NYSE, 14.29, 2013-02-28\\n INVN, NYSE, 0.00, 2013-01-24\\n DLPH, NYSE, 3.45, 2013-02-06\\n MN, NYSE, 0.00, 2013-02-14\\n RRMS, NYSE, -25.00, 2013-03-01\\n WPX, NYSE, -400.00, 2013-03-01\\n LPI, NYSE, 0.00, 2013-03-13\\n SN, NYSE, -80.00, 2013-03-07\\n KORS, NYSE, 60.00, 2013-02-13\\n BCEI, NYSE, -7.89, 2013-03-15\\n BOXC, NYSE, 4.78, 2013-01-29\\n PVG, NYSE, -25.00, 2013-03-06\\n POST, NYSE, 30.43, 2013-02-08\\n SLCA, NYSE, 32.26, 2013-02-27\\n MTDR, NYSE, -116.67, 2013-03-14\\n GWAY, NYSE, -200.00, 2013-02-13\\n EPAM, NYSE, -10.81, 2013-02-28\\n RNDY, NYSE, 5.56, 2013-03-01\\n CPAC, NYSE, -13.33, 2013-02-21\\n PRLB, NYSE, 7.69, 2013-02-14\\n YELP, NYSE, -50.00, 2013-02-07\\n NSM, NYSE, 7.58, 2013-03-08\\n ALSN, NYSE, 257.14, 2013-02-20\\n DWRE, NYSE, 350.00, 2013-02-15\\n VNTV, NYSE, 16.13, 2013-02-21\\n ET, NYSE, 34.78, 2013-02-22\\n VIPS, NYSE, 1100.00, 2013-02-22\\n VCRA, NYSE, -33.33, 2013-02-28\\n RM, NYSE, -1.89, 2013-02-28\\n BNNY, NYSE, 0.00, 2013-02-12\\n MM, NYSE, 200.00, 2013-02-20\\n RXN, NYSE, -15.00, 2013-02-12\\n GLOG, NYSE, -20.00, 2013-02-28\\n PBA, NYSE, 44.44, 2013-03-02\\n RPAI, NYSE, 15.79, 2013-02-20\\n OAK, NYSE, 63.33, 2013-02-15\\n FET, NYSE, -3.45, 2013-02-15\\n MRC, NYSE, 17.02, 2013-02-22\\n PSX, NYSE, 21.18, 2013-01-31\\n TUMI, NYSE, 0.00, 2013-03-21\\n ACRE, NYSE, -38.10, 2013-04-02\\n EVER, NYSE, 17.24, 2013-01-31\\n PDH, NYSE, -13.79, 2013-02-07\\n WMC, NYSE, 3.23, 2013-04-03\\n WAGE, NYSE, 0.00, 2013-02-21\\n HTA, NYSE, 0.00, 2013-02-21\\n ALEX, NYSE, 42.86, 2013-02-20\\n BKW, NYSE, 53.33, 2013-02-16\\n EQM, NYSE, 51.22, 2013-01-25\\n NOW, NYSE, 38.46, 2013-01-31\\n EGL, NYSE, 18.46, 2013-03-13\\n NGVC, NYSE, 25.00, 2013-02-01\\n NTI, NYSE, -25.00, 2013-03-14\\n AMRE, NYSE, 4.35, 2013-02-20\\n GMED, NYSE, 15.79, 2013-02-28\\n MANU, NYSE, -46.43, 2013-02-15\\n HCLP, NYSE, -28.57, 2013-02-01\\n ADT, NYSE, 4.76, 2013-01-31\\n TRLA, NYSE, -20.00, 2013-02-13\\n SRC, NYSE, 8.82, 2013-02-28\\n NBHC, NYSE, -14.29, 2013-01-29\\n BSMX, NYSE, -4.17, 2013-02-19\\n HY, NYSE, 14.53, 2013-02-20\\n SMLP, NYSE, 40.00, 2013-03-14\\n DYN, NYSE, -1714.29, 2013-03-15\\n LXFR, NYSE, 43.75, 2013-03-12\\n LOCK, NYSE, 16.67, 2013-02-21\\n JMI, NYSE, 97.78, 2013-03-22\\n BERY, NYSE, -40.00, 2013-02-01\\n FLTX, NYSE, 0.00, 2013-02-21\\n ANFI, NYSE, 30.77, 2013-02-26\\n SSTK, NYSE, -100.00, 2013-02-22\\n SDLP, NYSE, 90.91, 2013-03-01\\n MPLX, NYSE, -25.00, 2013-01-31\\n WWAV, NYSE, 5.88, 2013-02-14\\n SXE, NYSE, -4121.43, 2013-03-29\\n DKL, NYSE, -5.56, 2013-03-06\\n RKUS, NYSE, -20.00, 2013-02-13\\n WGP, NYSE, 57.14, 2013-02-28\\n PBF, NYSE, -92.31, 2013-03-01\\n SBY, NYSE, 0.00, 2013-03-01\\n RIOM, NYSE, 77.78, 2013-03-29\\n BFAM, NYSE, -1186.36, 2013-03-27\\n ZTS, NYSE, -79.41, 2013-03-29\\n DDC, NYSE, -39.13, 2013-04-04\\n ABM, NYSE, 18.18, 2013-03-05\\n ANN, NYSE, 0.00, 2013-03-09\\n BBY, NYSE, 5.81, 2013-03-02\\n BF.B, NYSE, 4.29, 2013-03-07\\n BKE, NYSE, 2.40, 2013-03-15\\n BNS, NYSE, -3.17, 2013-03-06\\n BRC, NYSE, -22.45, 2013-02-22\\n CATO, NYSE, -3.57, 2013-03-22\\n COO, NYSE, 2.50, 2013-03-08\\n CPB, NYSE, 6.06, 2013-02-16\\n CFI, NYSE, 10.34, 2013-02-28\\n DCI, NYSE, -10.53, 2013-02-26\\n DDS, NYSE, -1.03, 2013-02-26\\n DE, NYSE, 17.02, 2013-02-14\\n DY, NYSE, 50.00, 2013-02-27\\n EV, NYSE, -3.85, 2013-02-21\\n ENZ, NYSE, -133.33, 2013-03-13\\n ESL, NYSE, 13.11, 2013-03-01\\nFCE.A, NYSE, 9.09, 2013-03-28\\n M, NYSE, 3.54, 2013-02-27\\n GCO, NYSE, 1.41, 2013-03-09\\n GPS, NYSE, 2.82, 2013-03-01\\n HD, NYSE, 4.69, 2013-02-27\\n HEI, NYSE, -12.50, 2013-02-21\\n HNZ, NYSE, 10.00, 2013-02-28\\n HOV, NYSE, -66.67, 2013-03-07\\n HRB, NYSE, -633.33, 2013-03-08\\n HRL, NYSE, -2.04, 2013-02-22\\n HPQ, NYSE, 15.49, 2013-02-22\\n JCP, NYSE, -926.32, 2013-02-28\\n KR, NYSE, 25.71, 2013-03-08\\n KSS, NYSE, 1.84, 2013-03-01\\n LB, NYSE, 1.15, 2013-02-28\\n LOW, NYSE, 13.04, 2013-02-26\\n LZB, NYSE, 16.67, 2013-02-20\\n MDT, NYSE, 2.20, 2013-02-20\\n MEI, NYSE, 350.00, 2013-03-01\\n MPR, NYSE, 0.00, 2013-03-22\\n NAV, NYSE, 14.11, 2013-03-08\\n JWN, NYSE, 4.48, 2013-02-22\\n ODC, NYSE, -35.42, 2013-03-12\\n OXM, NYSE, -5.80, 2013-04-03\\n PBY, NYSE, -225.00, 2013-04-16\\n PLL, NYSE, 8.96, 2013-02-28\\n PNY, NYSE, 1.72, 2013-03-07\\n PVH, NYSE, 6.67, 2013-03-28\\n THO, NYSE, 0.00, 2013-03-08\\n TIF, NYSE, 2.19, 2013-03-23\\n TJX, NYSE, 1.23, 2013-02-28\\n TOL, NYSE, -81.82, 2013-02-21\\n TTC, NYSE, 23.26, 2013-02-22\\n VAL, NYSE, -9.09, 2013-02-13\\n JW.A, NYSE, 13.41, 2013-03-08\\n WMT, NYSE, 6.37, 2013-02-22\\n WSM, NYSE, 4.69, 2013-03-20\\n FL, NYSE, -11.11, 2013-03-09\\n CHS, NYSE, 0.00, 2013-03-01\\n REX, NYSE, -800.00, 2013-03-29\\n BKS, NYSE, -136.00, 2013-03-01\\n CAL, NYSE, 75.00, 2013-03-16\\n SIG, NYSE, 1.44, 2013-03-29\\n ZLC, NYSE, -1.92, 2013-02-22\\n AEO, NYSE, 0.00, 2013-03-07\\n FGP, NYSE, -10.00, 2013-03-08\\n BMO, NYSE, 1.37, 2013-02-27\\n RY, NYSE, 0.75, 2013-03-01\\n GEF, NYSE, -13.21, 2013-02-28\\n MOV, NYSE, 70.83, 2013-03-22\\n SKS, NYSE, 13.33, 2013-02-27\\n TD, NYSE, 1.55, 2013-03-01\\n ANF, NYSE, 14.51, 2013-02-23\\n CIEN, NYSE, 116.00, 2013-03-08\\n KMG, NYSE, -17.65, 2013-03-09\\n IRET, NYSE, -5.88, 2013-03-13\\n CM, NYSE, 0.00, 2013-03-01\\nHEI.A, NYSE, -18.60, 2013-02-21\\n UBA, NYSE, 13.04, 2013-03-07\\n KFY, NYSE, 6.90, 2013-03-07\\n TGT, NYSE, 12.24, 2013-02-28\\n KKD, NYSE, 0.00, 2013-03-15\\n NDZ, NYSE, 0.00, 2013-03-06\\n MVC, NYSE, -20.00, 2013-03-08\\n CBK, NYSE, 52.17, 2013-03-14\\n SJM, NYSE, 7.30, 2013-02-16\\n BIG, NYSE, 5.03, 2013-03-07\\n IDT, NYSE, -7.14, 2013-03-08\\n JOY, NYSE, 14.91, 2013-02-28\\n SSI, NYSE, -5.93, 2013-03-13\\n GME, NYSE, 3.35, 2013-03-29\\n DKS, NYSE, -3.74, 2013-03-12\\n A, NYSE, -5.97, 2013-02-15\\n MTN, NYSE, -3.51, 2013-03-07\\n GES, NYSE, 10.47, 2013-03-21\\n CRM, NYSE, 66.67, 2013-03-01\\n NWY, NYSE, 25.00, 2013-03-22\\n PAY, NYSE, 8.11, 2013-03-06\\n DSW, NYSE, -4.17, 2013-03-20\\n NX, NYSE, -183.33, 2013-03-08\\n AGX, NYSE, 15.00, 2013-04-11\\n CMD, NYSE, -5.26, 2013-03-08\\n DG, NYSE, 7.78, 2013-03-26\\n EXPR, NYSE, 1.35, 2013-03-14\\n P, NYSE, 0.00, 2013-03-07\\n GWRE, NYSE, 181.82, 2013-02-27\\n BLOX, NYSE, -20.00, 2013-02-22\\n TLYS, NYSE, 6.67, 2013-03-21\\n PANW, NYSE, -250.00, 2013-03-01\\n WDAY, NYSE, 24.00, 2013-03-08\\n RH, NYSE, 4.92, 2013-04-19\\n AIR, NYSE, 4.55, 2013-03-20\\n ATU, NYSE, -5.41, 2013-03-21\\n AZO, NYSE, 0.84, 2013-02-27\\n AZZ, NYSE, 2.04, 2013-04-09\\n CAG, NYSE, -3.51, 2013-04-04\\n CLC, NYSE, 2.17, 2013-03-21\\n CMC, NYSE, -80.00, 2013-03-29\\n KMX, NYSE, 0.00, 2013-04-11\\n FC, NYSE, -27.27, 2013-04-05\\n FDO, NYSE, -0.82, 2013-04-11\\n FDX, NYSE, -10.87, 2013-03-21\\n FUL, NYSE, -3.92, 2013-03-28\\n GIS, NYSE, 12.28, 2013-03-21\\n KBH, NYSE, 30.43, 2013-03-22\\n LEN, NYSE, 100.00, 2013-03-21\\n LNN, NYSE, 16.28, 2013-03-28\\n LUB, NYSE, -100.00, 2013-03-21\\n MKC, NYSE, 1.79, 2013-04-03\\n RT, NYSE, 0.00, 2013-04-11\\n MSM, NYSE, 0.00, 2013-04-11\\n NKE, NYSE, 8.96, 2013-03-22\\n ORCL, NYSE, -1.56, 2013-03-21\\n PIR, NYSE, 0.00, 2013-04-12\\n PKE, NYSE, -21.43, 2013-05-10\\n RPM, NYSE, 16.67, 2013-04-05\\n SVU, NYSE, -200.00, 2013-04-25\\n TXI, NYSE, 25.00, 2013-03-28\\n UNF, NYSE, 18.75, 2013-03-28\\n WGO, NYSE, 37.50, 2013-03-29\\n WOR, NYSE, 6.12, 2013-03-22\\n JBL, NYSE, -2.17, 2013-03-21\\n GBX, NYSE, 21.62, 2013-04-05\\n DRI, NYSE, 0.99, 2013-03-23\\n FDS, NYSE, -21.24, 2013-03-20\\n SCS, NYSE, 0.00, 2013-03-28\\n SJR, NYSE, 5.56, 2013-04-13\\n RHT, NYSE, 19.05, 2013-03-28\\n OMN, NYSE, -75.00, 2013-04-04\\n MON, NYSE, 7.06, 2013-04-04\\n GPN, NYSE, -1.14, 2013-04-03\\n AYI, NYSE, 0.00, 2013-04-04\\n CCL, NYSE, 100.00, 2013-03-16\\n CUK, NYSE, 33.33, 2013-03-16\\n STZ, NYSE, 4.44, 2013-04-11\\n ACN, NYSE, 3.09, 2013-03-29\\n SNX, NYSE, 1.15, 2013-03-28\\n TAL, NYSE, 50.00, 2013-04-24\\n IHS, NYSE, 11.90, 2013-03-22\\n EDU, NYSE, 63.64, 2013-04-25\\n KED, NYSE, -99.22, 2013-05-02\\n CORR, NYSE, -9.09, 2013-05-11\\n DFS, NYSE, 18.75, 2013-04-24\\n ZEP, NYSE, 54.55, 2013-04-10\\n MG, NYSE, -58.82, 2013-04-09\\n MOS, NYSE, 5.62, 2013-03-28\\n ABT, NYSE, 0.00, 2013-04-18\\n ABX, NYSE, 6.98, 2013-04-25\\n AB, NYSE, 8.57, 2013-05-02\\n ACO, NYSE, -10.64, 2013-04-27\\n ADM, NYSE, -5.88, 2013-05-01\\n AEM, NYSE, -35.29, 2013-04-26\\n AEP, NYSE, 0.00, 2013-04-27\\n AES, NYSE, -14.29, 2013-05-10\\n AET, NYSE, 8.70, 2013-05-01\\n AFL, NYSE, 4.32, 2013-04-25\\n AGCO, NYSE, 35.23, 2013-05-01\\n HES, NYSE, 24.20, 2013-04-25\\n AIG, NYSE, 52.27, 2013-05-03\\n AIN, NYSE, 0.00, 2013-05-02\\n AJG, NYSE, 33.33, 2013-05-01\\n ALU, NYSE, -81.82, 2013-04-27\\n MATX, NYSE, 31.25, 2013-05-07\\n ALK, NYSE, 15.09, 2013-04-26\\n ALX, NYSE, -2.56, 2013-05-07\\n BEAM, NYSE, 18.52, 2013-05-03\\n AME, NYSE, 3.92, 2013-04-26\\n TWX, NYSE, 9.33, 2013-05-02\\n AVD, NYSE, 47.50, 2013-05-03\\n AMN, NYSE, 33.33, 2013-05-03\\n AN, NYSE, 7.94, 2013-04-19\\n AON, NYSE, 0.00, 2013-04-27\\n APA, NYSE, -9.01, 2013-05-10\\n APC, NYSE, 17.39, 2013-05-07\\n APD, NYSE, 0.00, 2013-04-24\\n APH, NYSE, 1.16, 2013-04-19\\n ARG, NYSE, 0.88, 2013-05-03\\n AAN, NYSE, -5.63, 2013-04-26\\n ARW, NYSE, 3.49, 2013-05-02\\n ASGN, NYSE, 94.44, 2013-04-25\\n ASH, NYSE, 14.10, 2013-04-25\\n ASR, NYSE, -13.25, 2013-04-23\\n GAS, NYSE, -2.96, 2013-05-01\\n ATO, NYSE, 1.63, 2013-05-02\\n ATW, NYSE, 2.40, 2013-05-02\\n AU, NYSE, -26.67, 2013-05-14\\n AVP, NYSE, 85.71, 2013-05-01\\n AVT, NYSE, 3.45, 2013-04-26\\n AVY, NYSE, 3.51, 2013-04-25\\n AXP, NYSE, 3.60, 2013-04-18\\n B, NYSE, -11.11, 2013-04-27\\n BA, NYSE, 17.69, 2013-04-25\\n BAC, NYSE, -13.04, 2013-04-17\\n BAX, NYSE, 0.96, 2013-04-19\\n BC, NYSE, 22.58, 2013-04-26\\n OMX, NYSE, -52.17, 2013-05-08\\n BCE, NYSE, 10.00, 2013-05-10\\n BCR, NYSE, 0.00, 2013-04-24\\n BDX, NYSE, 6.67, 2013-05-03\\n BEN, NYSE, 8.47, 2013-05-01\\n BGG, NYSE, -17.59, 2013-04-20\\n BHE, NYSE, 10.00, 2013-04-26\\n BHI, NYSE, 4.84, 2013-04-20\\n BID, NYSE, -175.00, 2013-05-10\\n BIO, NYSE, -38.18, 2013-05-08\\n BK, NYSE, 9.62, 2013-04-18\\n BKH, NYSE, 19.18, 2013-05-03\\n WRB, NYSE, 0.00, 2013-04-24\\n BLC, NYSE, 6.67, 2013-04-26\\n BLL, NYSE, -9.38, 2013-04-26\\n BLX, NYSE, -21.82, 2013-04-18\\n BMI, NYSE, -58.33, 2013-04-17\\n BMS, NYSE, -1.85, 2013-04-26\\n BMY, NYSE, 0.00, 2013-04-26\\n BOH, NYSE, -6.90, 2013-04-23\\n BXS, NYSE, 4.76, 2013-04-23\\n BPL, NYSE, 19.44, 2013-05-04\\nBRK.A, NYSE, 197.70, 2013-05-04\\n BRO, NYSE, 5.13, 2013-04-16\\n BSX, NYSE, 0.00, 2013-04-26\\n MTRN, NYSE, -2.94, 2013-04-26\\n CAI, NYSE, -1.32, 2013-04-25\\n CAT, NYSE, -2.24, 2013-04-23\\n CB, NYSE, 12.44, 2013-04-23\\n CBI, NYSE, 15.49, 2013-05-03\\n CBM, NYSE, 85.00, 2013-05-04\\n CBU, NYSE, -1.96, 2013-04-24\\n CBT, NYSE, -7.25, 2013-05-01\\n CCC, NYSE, 20.00, 2013-05-07\\n CCE, NYSE, 2.63, 2013-04-26\\n C, NYSE, 9.32, 2013-04-16\\n CCK, NYSE, 4.17, 2013-04-18\\n CDE, NYSE, -74.07, 2013-05-10\\n CDI, NYSE, -40.91, 2013-05-03\\n CAH, NYSE, 26.32, 2013-05-03\\n CFR, NYSE, -4.21, 2013-04-25\\n CHD, NYSE, 5.56, 2013-05-03\\n CPK, NYSE, 14.93, 2013-05-03\\n CI, NYSE, 20.28, 2013-05-03\\n CIA, NYSE, 0.00, 2013-05-03\\n CKH, NYSE, -156.12, 2013-04-30\\n CL, NYSE, 0.00, 2013-04-26\\n CLF, NYSE, 87.50, 2013-04-25\\n CLH, NYSE, 25.81, 2013-05-02\\n CLX, NYSE, -5.66, 2013-05-02\\n CMA, NYSE, 4.48, 2013-04-17\\n CMO, NYSE, 3.33, 2013-04-25\\n CRK, NYSE, -11.36, 2013-04-30\\n CMS, NYSE, 15.22, 2013-04-26\\n CNA, NYSE, 21.13, 2013-05-01\\n CNW, NYSE, -29.63, 2013-05-02\\n CHG, NYSE, 19.00, 2013-05-10\\n CNL, NYSE, -8.33, 2013-04-30\\n COG, NYSE, -20.00, 2013-04-25\\n COT, NYSE, -100.00, 2013-05-02\\n CP, NYSE, 2.54, 2013-04-25\\n CPF, NYSE, 105.00, 2013-04-27\\n CQB, NYSE, 28.57, 2013-05-08\\n CR, NYSE, -0.95, 2013-04-23\\nCRD.B, NYSE, -29.17, 2013-05-09\\n CRS, NYSE, -9.21, 2013-04-26\\n CSC, NYSE, 32.29, 2013-05-16\\n CSL, NYSE, 0.00, 2013-04-25\\n CTB, NYSE, 31.82, 2013-05-10\\n CTL, NYSE, 10.14, 2013-05-09\\n CTS, NYSE, 16.67, 2013-04-24\\n CUB, NYSE, 52.24, 2013-05-03\\n CMI, NYSE, -22.58, 2013-05-01\\n CUZ, NYSE, -8.33, 2013-05-09\\n CVC, NYSE, -185.71, 2013-05-10\\n CVH, NYSE, 26.58, 2013-05-02\\n CW, NYSE, 28.21, 2013-05-02\\n CWT, NYSE, -200.00, 2013-05-02\\n CX, NYSE, -140.00, 2013-04-27\\n CYN, NYSE, -2.17, 2013-04-19\\n D, NYSE, -7.78, 2013-04-26\\n DBD, NYSE, -125.00, 2013-05-01\\n DCO, NYSE, -18.60, 2013-05-07\\n DD, NYSE, 1.30, 2013-04-24\\n CVA, NYSE, -61.54, 2013-04-18\\n DHR, NYSE, -1.32, 2013-04-19\\n DIS, NYSE, 2.60, 2013-05-08\\n DLX, NYSE, 3.41, 2013-04-26\\n DNB, NYSE, 2.26, 2013-05-03\\n RRD, NYSE, 12.12, 2013-04-26\\n DOV, NYSE, 1.85, 2013-04-18\\n DOW, NYSE, 15.00, 2013-04-26\\n DRE, NYSE, 0.00, 2013-04-25\\n DHI, NYSE, 60.00, 2013-04-27\\n UFS, NYSE, -35.37, 2013-04-26\\n DTE, NYSE, 30.10, 2013-04-27\\n DUK, NYSE, -1.92, 2013-05-04\\n DVN, NYSE, 17.86, 2013-05-02\\n DV, NYSE, 8.43, 2013-04-24\\n EAT, NYSE, 4.35, 2013-04-24\\n ECL, NYSE, 3.45, 2013-05-01\\n ED, NYSE, 4.85, 2013-05-03\\n EDE, NYSE, 11.11, 2013-04-26\\n EFX, NYSE, 0.00, 2013-04-25\\n EGN, NYSE, -7.32, 2013-04-30\\n EGP, NYSE, -1.30, 2013-04-19\\n ELP, NYSE, 0.00, 2013-05-17\\n ELY, NYSE, 65.00, 2013-04-26\\n EMC, NYSE, 3.23, 2013-04-25\\n EMR, NYSE, -1.28, 2013-05-08\\n EOG, NYSE, 59.29, 2013-05-07\\n EQT, NYSE, 26.92, 2013-04-26\\n ESE, NYSE, -17.65, 2013-05-08\\n ESV, NYSE, 5.43, 2013-04-30\\n ETN, NYSE, 6.33, 2013-04-30\\n ETR, NYSE, 0.00, 2013-04-26\\n EXAR, NYSE, 16.67, 2013-05-01\\n F, NYSE, 7.89, 2013-04-25\\n CLGX, NYSE, 8.11, 2013-04-25\\n FNB, NYSE, -4.76, 2013-04-24\\n FCF, NYSE, 0.00, 2013-04-24\\n FBP, NYSE, -122.22, 2013-05-04\\n FICO, NYSE, -9.38, 2013-04-25\\n FLO, NYSE, 6.98, 2013-05-17\\n FMC, NYSE, 1.85, 2013-05-01\\n FOE, NYSE, 66.67, 2013-04-25\\n S, NYSE, 38.24, 2013-04-25\\n NEE, NYSE, 10.89, 2013-05-01\\n FRT, NYSE, 0.88, 2013-05-02\\n FRX, NYSE, 47.06, 2013-04-24\\n FSS, NYSE, 20.00, 2013-05-07\\n FUN, NYSE, 24.32, 2013-05-09\\n FUR, NYSE, 77.78, 2013-05-03\\n GBL, NYSE, 17.86, 2013-05-08\\n GVA, NYSE, -103.85, 2013-05-10\\n BGC, NYSE, -319.23, 2013-05-01\\n GD, NYSE, 8.00, 2013-04-25\\n GE, NYSE, 11.43, 2013-04-20\\n RHP, NYSE, 26.47, 2013-05-08\\n AXLL, NYSE, -38.02, 2013-05-08\\n GGG, NYSE, 15.07, 2013-04-25\\n GHM, NYSE, 28.13, 2013-06-01\\n GIB, NYSE, 14.58, 2013-05-01\\n GLT, NYSE, 17.65, 2013-05-01\\n GLW, NYSE, 15.38, 2013-04-25\\n GSK, NYSE, 6.49, 2013-04-26\\n GLF, NYSE, 175.00, 2013-04-30\\n GNI, NYSE, -14.58, 2013-04-26\\n GPC, NYSE, -6.06, 2013-04-20\\n GRA, NYSE, 0.00, 2013-04-25\\n GTY, NYSE, 0.00, 2013-05-03\\n GWW, NYSE, 7.69, 2013-04-17\\n HAE, NYSE, 4.35, 2013-05-02\\n HAL, NYSE, 17.54, 2013-04-23\\n HAR, NYSE, 25.40, 2013-05-03\\n HVT, NYSE, 33.33, 2013-05-02\\n HRC, NYSE, -2.00, 2013-04-25\\n HCC, NYSE, 31.71, 2013-05-01\\n HCN, NYSE, 1.11, 2013-05-08\\n HCP, NYSE, 2.78, 2013-05-01\\n HOG, NYSE, 2.06, 2013-04-26\\n HE, NYSE, -12.82, 2013-05-09\\n HL, NYSE, -66.67, 2013-05-11\\n HMA, NYSE, 0.00, 2013-05-03\\n HMC, NYSE, -28.57, 2013-04-27\\n HMN, NYSE, 7.84, 2013-04-25\\n HFC, NYSE, -7.91, 2013-05-08\\n HOT, NYSE, 43.40, 2013-05-01\\n HP, NYSE, 5.43, 2013-04-26\\n HLS, NYSE, 14.29, 2013-04-26\\n HRS, NYSE, 0.00, 2013-05-01\\n HSC, NYSE, 50.00, 2013-05-10\\n HSY, NYSE, 4.81, 2013-04-26\\n HUBB, NYSE, -0.90, 2013-04-19\\n HUM, NYSE, 51.12, 2013-05-02\\n HXL, NYSE, 4.88, 2013-04-23\\n IBM, NYSE, -1.96, 2013-04-19\\n IDA, NYSE, 17.54, 2013-05-03\\n IEX, NYSE, 4.23, 2013-04-23\\n IFF, NYSE, 5.31, 2013-05-08\\n DIN, NYSE, 12.87, 2013-05-03\\n INT, NYSE, 14.06, 2013-05-01\\n IP, NYSE, -12.16, 2013-05-03\\n IPG, NYSE, -7.69, 2013-04-20\\n IO, NYSE, -85.71, 2013-05-01\\n IR, NYSE, 2.44, 2013-04-24\\n IRF, NYSE, 27.50, 2013-04-30\\n ITW, NYSE, 0.00, 2013-04-24\\n JEC, NYSE, -2.44, 2013-04-30\\n JNJ, NYSE, 2.13, 2013-04-17\\n JNY, NYSE, 0.00, 2013-05-02\\n K, NYSE, 0.00, 2013-05-03\\n KAMN, NYSE, -2.94, 2013-04-30\\n KDN, NYSE, 5.71, 2013-05-10\\n KEX, NYSE, 2.15, 2013-04-25\\n KEY, NYSE, 5.00, 2013-04-19\\n KIM, NYSE, 3.13, 2013-05-01\\n KMB, NYSE, 10.45, 2013-04-20\\n KEM, NYSE, -133.33, 2013-05-10\\n KMT, NYSE, -8.45, 2013-04-26\\n KO, NYSE, 2.22, 2013-04-17\\n KSU, NYSE, 2.30, 2013-04-20\\n LDR, NYSE, -9.52, 2013-05-07\\n LEG, NYSE, -13.16, 2013-04-26\\n LLY, NYSE, 8.57, 2013-04-25\\n LM, NYSE, -13.33, 2013-05-01\\n LNC, NYSE, -7.27, 2013-05-02\\n LPX, NYSE, 0.00, 2013-05-08\\n LXU, NYSE, -110.53, 2013-05-07\\n LTC, NYSE, -1.67, 2013-05-01\\n L, NYSE, 1.19, 2013-04-30\\n LUV, NYSE, 133.33, 2013-04-26\\n LUX, NYSE, 7.14, 2013-05-02\\n MKL, NYSE, 40.11, 2013-05-01\\n MAN, NYSE, 40.00, 2013-04-20\\n MTW, NYSE, -35.71, 2013-05-01\\n SM, NYSE, 46.43, 2013-05-01\\n MAS, NYSE, -7.14, 2013-04-30\\n MTZ, NYSE, 12.50, 2013-05-03\\n MCD, NYSE, -0.79, 2013-04-20\\n MDC, NYSE, 73.08, 2013-05-03\\n MDP, NYSE, 4.35, 2013-04-26\\n MDR, NYSE, -40.00, 2013-05-09\\n MDU, NYSE, 36.36, 2013-05-01\\n MED, NYSE, 26.47, 2013-05-09\\n CVS, NYSE, 5.06, 2013-05-02\\n MFC, NYSE, 18.52, 2013-05-03\\n MGA, NYSE, 13.57, 2013-05-11\\n MGM, NYSE, 130.00, 2013-05-03\\n MMC, NYSE, 4.29, 2013-05-03\\n MMM, NYSE, -2.42, 2013-04-26\\n MSA, NYSE, -20.31, 2013-04-25\\n MNR, NYSE, -7.69, 2013-05-09\\n MO, NYSE, 1.89, 2013-04-26\\n MOD, NYSE, 5.88, 2013-05-31\\nMOG.A, NYSE, -1.23, 2013-04-27\\n MHK, NYSE, 3.57, 2013-05-03\\n MSI, NYSE, -1.79, 2013-04-25\\n MCY, NYSE, 46.81, 2013-04-30\\n MRK, NYSE, 8.97, 2013-05-02\\n MRO, NYSE, -28.17, 2013-05-08\\n POWR, NYSE, 0.00, 2013-05-09\\n MTG, NYSE, -60.00, 2013-05-01\\n MTB, NYSE, 6.19, 2013-04-16\\n MTX, NYSE, 0.00, 2013-04-26\\n MUR, NYSE, 11.34, 2013-05-02\\n MYE, NYSE, -11.11, 2013-04-25\\n NBL, NYSE, 21.31, 2013-04-26\\n NBR, NYSE, 13.79, 2013-04-24\\n NE, NYSE, 3.51, 2013-04-18\\n NEM, NYSE, -8.97, 2013-04-30\\n NFG, NYSE, 7.37, 2013-05-03\\n NHI, NYSE, 4.94, 2013-05-07\\n NI, NYSE, -1.43, 2013-05-01\\n NJR, NYSE, 3.16, 2013-05-03\\n THC, NYSE, 17.86, 2013-05-01\\n NNN, NYSE, 4.35, 2013-05-03\\n NOC, NYSE, 12.14, 2013-04-25\\n NR, NYSE, 5.88, 2013-04-26\\n NSC, NYSE, 3.39, 2013-04-24\\n NUE, NYSE, 4.00, 2013-04-19\\n NVR, NYSE, -9.64, 2013-04-23\\n NWL, NYSE, 9.38, 2013-05-04\\n NWN, NYSE, -5.41, 2013-05-03\\n NYT, NYSE, -20.00, 2013-04-26\\n OCR, NYSE, 4.65, 2013-04-25\\n OGE, NYSE, -32.35, 2013-05-03\\n OHI, NYSE, 5.08, 2013-05-08\\n OI, NYSE, 7.14, 2013-04-24\\n OII, NYSE, 16.95, 2013-04-24\\n OKE, NYSE, -6.90, 2013-05-01\\n OLN, NYSE, 10.64, 2013-04-26\\n BRS, NYSE, -1.94, 2013-05-23\\n OMC, NYSE, 1.33, 2013-04-19\\n OMI, NYSE, 4.76, 2013-04-24\\n ORB, NYSE, 43.48, 2013-04-24\\n ORI, NYSE, 600.00, 2013-04-26\\n OSK, NYSE, 12.94, 2013-05-01\\n OXY, NYSE, 7.64, 2013-04-26\\n FCFS, NYSE, 0.00, 2013-04-18\\n PBI, NYSE, 0.00, 2013-05-01\\n PCG, NYSE, -10.00, 2013-05-03\\n PCL, NYSE, 9.38, 2013-04-30\\n PCP, NYSE, 1.81, 2013-05-10\\n TPC, NYSE, 34.78, 2013-05-02\\n PDS, NYSE, 14.29, 2013-04-26\\n PEG, NYSE, 14.86, 2013-05-01\\n PEI, NYSE, 4.76, 2013-04-23\\n PEP, NYSE, 8.45, 2013-04-19\\n PFE, NYSE, -1.82, 2013-05-01\\n PG, NYSE, 3.13, 2013-04-25\\n PGR, NYSE, -4.55, 2013-04-11\\n PH, NYSE, 0.60, 2013-04-26\\n PHM, NYSE, 31.25, 2013-04-26\\n PKD, NYSE, 200.00, 2013-05-02\\n PKY, NYSE, 15.38, 2013-05-07\\n PNC, NYSE, 12.10, 2013-04-18\\n PNM, NYSE, -10.00, 2013-05-07\\n PNR, NYSE, 3.57, 2013-04-24\\n PNW, NYSE, 175.00, 2013-05-04\\n POM, NYSE, -4.00, 2013-05-04\\n POT, NYSE, 3.28, 2013-04-26\\n PPG, NYSE, 1.28, 2013-04-19\\n PPL, NYSE, 0.00, 2013-05-03\\n PRGO, NYSE, -1.39, 2013-05-08\\n PL, NYSE, -4.30, 2013-05-07\\n PSB, NYSE, 0.00, 2013-05-07\\n WTR, NYSE, 7.41, 2013-05-02\\n CSH, NYSE, 8.21, 2013-04-26\\n PWR, NYSE, 24.14, 2013-05-03\\n PX, NYSE, 0.00, 2013-04-25\\n KWR, NYSE, 14.29, 2013-04-30\\n R, NYSE, 1.28, 2013-04-24\\n RBC, NYSE, -6.09, 2013-05-01\\n RDC, NYSE, 5.77, 2013-05-02\\n HTSI, NYSE, 11.67, 2013-05-03\\n RES, NYSE, -33.33, 2013-04-25\\n RGS, NYSE, -90.77, 2013-05-08\\n RGR, NYSE, 15.38, 2013-04-30\\n RHI, NYSE, -2.44, 2013-04-24\\n RJF, NYSE, -9.33, 2013-04-25\\n RLI, NYSE, -1.89, 2013-04-18\\n ROG, NYSE, 0.00, 2013-05-01\\n ROK, NYSE, 2.31, 2013-04-25\\n ROL, NYSE, -5.88, 2013-04-25\\n ROP, NYSE, 4.10, 2013-04-30\\n RTI, NYSE, 20.00, 2013-05-01\\n RTN, NYSE, 21.88, 2013-04-26\\n RYL, NYSE, 43.33, 2013-04-25\\n BSAC, NYSE, -21.74, 2013-04-26\\n T, NYSE, 0.00, 2013-04-24\\n SCG, NYSE, 7.77, 2013-04-26\\n SCHW, NYSE, -6.25, 2013-04-16\\n SCL, NYSE, -4.08, 2013-05-01\\n SMG, NYSE, -19.60, 2013-05-07\\n SEE, NYSE, -5.56, 2013-05-02\\n SF, NYSE, 1.75, 2013-05-10\\n SFE, NYSE, -46.15, 2013-04-26\\n SHW, NYSE, 2.78, 2013-04-19\\n SJI, NYSE, -8.43, 2013-05-04\\n JOE, NYSE, -200.00, 2013-05-09\\n SJW, NYSE, -12.50, 2013-04-25\\n SLB, NYSE, 2.02, 2013-04-20\\n HSH, NYSE, 9.38, 2013-05-03\\n AOS, NYSE, 24.68, 2013-04-24\\n SMP, NYSE, 31.25, 2013-05-04\\n SNA, NYSE, 4.48, 2013-04-19\\n PII, NYSE, 5.94, 2013-04-24\\n SNV, NYSE, 0.00, 2013-04-24\\n SO, NYSE, -3.92, 2013-04-25\\n SON, NYSE, -5.66, 2013-04-19\\n SPA, NYSE, -46.15, 2013-05-08\\n TRV, NYSE, 14.93, 2013-04-24\\n SR, NYSE, -3.36, 2013-05-01\\n NVE, NYSE, 12.50, 2013-05-04\\n SCI, NYSE, 21.74, 2013-04-25\\n SSP, NYSE, 58.33, 2013-05-07\\n STT, NYSE, 3.23, 2013-04-20\\n STI, NYSE, 3.28, 2013-04-20\\n STJ, NYSE, 0.00, 2013-04-18\\n STL, NYSE, 7.14, 2013-04-23\\n STR, NYSE, -2.38, 2013-05-01\\n STE, NYSE, 6.06, 2013-05-08\\n SYK, NYSE, 1.98, 2013-04-25\\n SUN, NYSE, -7.32, 2013-05-09\\n SUP, NYSE, 5.88, 2013-05-04\\n SWK, NYSE, 7.29, 2013-04-26\\n SWN, NYSE, 7.69, 2013-05-03\\n SWX, NYSE, 0.61, 2013-05-04\\n SWY, NYSE, -2.78, 2013-04-26\\n SYY, NYSE, 16.67, 2013-05-07\\n TAC, NYSE, -33.33, 2013-04-24\\n TNC, NYSE, -17.14, 2013-04-23\\n TCB, NYSE, -15.79, 2013-04-20\\n TCO, NYSE, 7.14, 2013-04-26\\n TDS, NYSE, 350.00, 2013-05-04\\n TDW, NYSE, 55.74, 2013-05-22\\n TDY, NYSE, 10.31, 2013-04-25\\n TE, NYSE, 11.76, 2013-05-01\\n TER, NYSE, 200.00, 2013-04-25\\n TEVA, NYSE, 1.82, 2013-05-03\\n TEX, NYSE, -17.86, 2013-04-25\\n TFX, NYSE, 1.98, 2013-05-01\\n TEN, NYSE, 10.77, 2013-04-30\\n TKR, NYSE, 0.00, 2013-04-25\\n TMK, NYSE, 1.46, 2013-04-24\\n TMO, NYSE, 6.20, 2013-04-25\\n TOT, NYSE, -2.38, 2013-04-27\\n TM, NYSE, 80.67, 2013-05-09\\n TR, NYSE, -11.76, 2013-04-25\\n TRN, NYSE, 13.75, 2013-05-01\\n TRP, NYSE, -8.93, 2013-04-27\\n TSO, NYSE, 2.82, 2013-05-02\\n TSS, NYSE, -2.94, 2013-04-24\\n TTI, NYSE, -40.00, 2013-05-09\\n TXT, NYSE, -14.89, 2013-04-18\\n TYL, NYSE, 26.09, 2013-04-25\\n TSN, NYSE, -21.74, 2013-05-07\\n UDR, NYSE, 3.03, 2013-05-01\\n UFI, NYSE, -43.75, 2013-04-25\\n UAM, NYSE, 17.65, 2013-04-30\\n UHS, NYSE, 5.17, 2013-04-25\\n UIL, NYSE, 3.06, 2013-05-03\\n UIS, NYSE, -145.61, 2013-04-24\\n UNH, NYSE, 0.00, 2013-04-19\\n KMPR, NYSE, 35.85, 2013-05-03\\n UNM, NYSE, 2.56, 2013-05-02\\n UNP, NYSE, 3.57, 2013-04-19\\n UNT, NYSE, 6.98, 2013-05-08\\n URS, NYSE, -14.29, 2013-05-08\\n USG, NYSE, -88.89, 2013-04-25\\n MUX, NYSE, -300.00, 2013-05-10\\n USM, NYSE, 214.29, 2013-05-04\\n USPH, NYSE, -3.12, 2013-05-10\\n UTL, NYSE, -9.20, 2013-04-24\\n UTX, NYSE, -1.54, 2013-04-24\\n VMI, NYSE, 15.60, 2013-04-19\\n VAR, NYSE, 2.97, 2013-04-25\\n CBS, NYSE, 7.35, 2013-05-02\\n VLO, NYSE, 16.83, 2013-05-01\\n VMC, NYSE, -24.32, 2013-05-03\\n VLY, NYSE, -11.11, 2013-04-25\\n VNO, NYSE, -38.38, 2013-05-07\\n VSH, NYSE, 63.64, 2013-05-01\\n WTS, NYSE, -14.04, 2013-05-01\\n WBS, NYSE, -2.22, 2013-04-16\\n WEC, NYSE, 7.04, 2013-05-01\\n WFC, NYSE, 5.75, 2013-04-13\\n WG, NYSE, -2400.00, 2013-05-09\\n WGL, NYSE, 19.05, 2013-05-02\\n WHR, NYSE, 1.03, 2013-04-25\\n WMB, NYSE, -8.33, 2013-05-08\\n WNC, NYSE, 0.00, 2013-05-01\\n TEG, NYSE, 10.69, 2013-05-02\\n WR, NYSE, 33.33, 2013-05-09\\n WRE, NYSE, -4.35, 2013-04-26\\n WRI, NYSE, 4.35, 2013-05-01\\n WPP, NYSE, 33.33, 2013-04-30\\n WSO, NYSE, 18.18, 2013-04-19\\n WST, NYSE, 1.16, 2013-05-03\\n WWW, NYSE, 50.00, 2013-04-17\\n WY, NYSE, 18.18, 2013-04-27\\n X, NYSE, -84.21, 2013-05-01\\n XL, NYSE, 38.81, 2013-05-03\\n XOM, NYSE, 4.43, 2013-04-26\\n XRX, NYSE, 12.50, 2013-04-24\\n Y, NYSE, 53.96, 2013-05-07\\n HRG, NYSE, 60.00, 2013-05-10\\n CRY, NYSE, 28.57, 2013-05-01\\n CHK, NYSE, 30.43, 2013-05-02\\n DDR, NYSE, 0.00, 2013-05-01\\n ELS, NYSE, 0.71, 2013-04-23\\n ALG, NYSE, 5.56, 2013-05-02\\n ETH, NYSE, -22.22, 2013-04-24\\n ATR, NYSE, -3.03, 2013-04-26\\n GGP, NYSE, 4.17, 2013-04-30\\n MSL, NYSE, 3.70, 2013-05-01\\n RCL, NYSE, 84.21, 2013-04-26\\n CWEI, NYSE, -61.22, 2013-04-25\\n HR, NYSE, 0.00, 2013-05-02\\n RGA, NYSE, 2.48, 2013-04-26\\n RIG, NYSE, -7.92, 2013-05-09\\n SKT, NYSE, 2.44, 2013-05-01\\n TWI, NYSE, -16.28, 2013-04-25\\n BDN, NYSE, 2.94, 2013-04-25\\n KGC, NYSE, 25.00, 2013-05-08\\n CPT, NYSE, 2.11, 2013-05-03\\n SGY, NYSE, 18.84, 2013-05-07\\n BFS, NYSE, -24.49, 2013-05-01\\n BWA, NYSE, 6.56, 2013-04-26\\n EQR, NYSE, -1.54, 2013-05-01\\n CLP, NYSE, 3.03, 2013-04-26\\n KOF, NYSE, -16.24, 2013-04-25\\n OKS, NYSE, -27.59, 2013-05-01\\n SQM, NYSE, -6.45, 2013-05-29\\n BYD, NYSE, 114.29, 2013-04-25\\n CBL, NYSE, 3.92, 2013-04-30\\n DECK, NYSE, 133.33, 2013-04-26\\n IT, NYSE, -2.50, 2013-05-03\\n HST, NYSE, 21.74, 2013-05-04\\n LXP, NYSE, 0.00, 2013-05-03\\n REG, NYSE, 3.23, 2013-05-08\\n TUC, NYSE, -24.00, 2013-05-03\\n AF, NYSE, 7.69, 2013-04-18\\n BFR, NYSE, -2.56, 2013-05-11\\n HHS, NYSE, 10.00, 2013-04-26\\n MHO, NYSE, 28.57, 2013-04-26\\n NFX, NYSE, -2.17, 2013-04-24\\n SPG, NYSE, 1.99, 2013-04-27\\n SU, NYSE, -1.41, 2013-04-30\\n SUI, NYSE, 2.20, 2013-04-26\\n TV, NYSE, -22.50, 2013-04-26\\n CGI, NYSE, -26.92, 2013-04-26\\n CYT, NYSE, -12.79, 2013-04-19\\n EMN, NYSE, 3.18, 2013-04-26\\n GRT, NYSE, 14.29, 2013-04-25\\n MAA, NYSE, 5.04, 2013-05-02\\n PLT, NYSE, 4.62, 2013-05-08\\n BZH, NYSE, 15.38, 2013-05-03\\n ELX, NYSE, 114.29, 2013-05-03\\n MLM, NYSE, -69.44, 2013-05-01\\n AKS, NYSE, 41.67, 2013-04-24\\n ALB, NYSE, -7.00, 2013-04-18\\n VRX, NYSE, 1.56, 2013-05-03\\n CBR, NYSE, 0.00, 2013-05-01\\n MAC, NYSE, 8.86, 2013-05-02\\n RKT, NYSE, 9.80, 2013-04-24\\n RYN, NYSE, 27.42, 2013-04-26\\n ADC, NYSE, -2.00, 2013-04-30\\nBRK.B, NYSE, 52.31, 2013-05-04\\n EXP, NYSE, 5.00, 2013-05-15\\n GGB, NYSE, -66.67, 2013-05-08\\n SSD, NYSE, -52.38, 2013-04-26\\n ESS, NYSE, -0.53, 2013-05-02\\n FR, NYSE, -7.69, 2013-04-26\\n HIW, NYSE, -2.90, 2013-05-01\\n IMAX, NYSE, 0.00, 2013-04-26\\n AIV, NYSE, 2.13, 2013-05-03\\n FCH, NYSE, 0.00, 2013-05-01\\n ITGR, NYSE, 2.33, 2013-04-26\\n NOK, NYSE, 33.33, 2013-04-19\\n GEO, NYSE, -3.51, 2013-05-09\\n CLI, NYSE, 0.00, 2013-04-26\\n RS, NYSE, -5.22, 2013-04-26\\n CPE, NYSE, 100.00, 2013-05-10\\n KNX, NYSE, 0.00, 2013-04-25\\n O, NYSE, 1.69, 2013-04-26\\n COF, NYSE, 17.79, 2013-04-19\\n IRS, NYSE, 10.34, 2013-05-18\\n MCK, NYSE, -0.43, 2013-05-08\\n SWC, NYSE, 200.00, 2013-04-30\\n STM, NYSE, 23.53, 2013-04-23\\n TEO, NYSE, 1.30, 2013-04-30\\n TRK, NYSE, -400.00, 2013-05-02\\n LMT, NYSE, 23.38, 2013-04-24\\n APU, NYSE, -35.48, 2013-05-16\\n AGU, NYSE, -12.15, 2013-05-10\\n LH, NYSE, -1.69, 2013-04-20\\n DDD, NYSE, -10.00, 2013-05-01\\n AFG, NYSE, 10.84, 2013-05-09\\n RMD, NYSE, 3.51, 2013-04-26\\n WAB, NYSE, 3.60, 2013-04-25\\n CIB, NYSE, 6.78, 2013-05-08\\n CAM, NYSE, -5.41, 2013-04-26\\n FCX, NYSE, 1.39, 2013-04-19\\n RNR, NYSE, 34.25, 2013-05-02\\n AVX, NYSE, 7.14, 2013-04-25\\n RWT, NYSE, 46.81, 2013-05-03\\n AXE, NYSE, -6.62, 2013-04-24\\n CLB, NYSE, 6.09, 2013-04-18\\n MD, NYSE, 0.92, 2013-05-03\\n THG, NYSE, 30.69, 2013-04-30\\n BAP, NYSE, -10.94, 2013-05-07\\n DO, NYSE, 10.43, 2013-04-26\\n RE, NYSE, 36.11, 2013-04-23\\n DST, NYSE, -6.60, 2013-04-26\\n EL, NYSE, 36.36, 2013-05-03\\n ESC, NYSE, -57.14, 2013-05-03\\n LXK, NYSE, -7.55, 2013-04-24\\n MIG, NYSE, 7.69, 2013-05-01\\n WAT, NYSE, -1.83, 2013-04-24\\n EME, NYSE, 2.27, 2013-04-26\\n HIG, NYSE, 10.84, 2013-04-30\\n ITT, NYSE, 9.30, 2013-05-03\\n SPN, NYSE, 0.00, 2013-04-26\\n SWM, NYSE, 8.60, 2013-05-09\\n SCCO, NYSE, -4.84, 2013-04-27\\n RCI, NYSE, -1.27, 2013-04-23\\n EIX, NYSE, 20.31, 2013-05-01\\n IRM, NYSE, 0.00, 2013-05-02\\n SPH, NYSE, -4.82, 2013-05-10\\n CCJ, NYSE, 0.00, 2013-05-02\\n PGI, NYSE, 0.00, 2013-04-19\\n CRR, NYSE, -14.61, 2013-04-26\\n BVN, NYSE, -40.30, 2013-04-30\\n FCN, NYSE, 13.46, 2013-05-10\\n RPT, NYSE, 6.90, 2013-04-24\\n TUP, NYSE, 4.42, 2013-04-25\\n ASB, NYSE, 8.00, 2013-04-19\\n GWR, NYSE, -10.11, 2013-05-02\\n TBI, NYSE, -50.00, 2013-04-25\\n FFG, NYSE, 12.66, 2013-05-03\\n USNA, NYSE, 14.29, 2013-04-24\\n CSV, NYSE, -3.03, 2013-05-08\\n LVB, NYSE, 10.53, 2013-05-09\\n ALR, NYSE, 6.25, 2013-05-10\\n OCN, NYSE, 0.00, 2013-05-03\\n PAA, NYSE, 37.50, 2013-05-07\\n DNR, NYSE, 13.79, 2013-05-03\\n HMY, NYSE, -119.23, 2013-05-04\\n TGI, NYSE, 5.66, 2013-05-02\\n PAG, NYSE, 1.61, 2013-04-30\\n GEL, NYSE, -17.65, 2013-05-03\\n IM, NYSE, 0.00, 2013-04-26\\n NUS, NYSE, 13.92, 2013-05-03\\n CNI, NYSE, -1.67, 2013-04-23\\n LAD, NYSE, 16.67, 2013-04-25\\n NSP, NYSE, 0.00, 2013-04-30\\n DGX, NYSE, -14.42, 2013-04-18\\n KRC, NYSE, 0.00, 2013-05-01\\n MTH, NYSE, 32.00, 2013-04-25\\n NCR, NYSE, 35.00, 2013-05-01\\n OFG, NYSE, 2.78, 2013-04-26\\n IVZ, NYSE, 10.64, 2013-05-01\\n DX, NYSE, 9.68, 2013-05-02\\n FBC, NYSE, -65.98, 2013-04-24\\n ALV, NYSE, 1.57, 2013-04-27\\n ARE, NYSE, 0.00, 2013-04-30\\n BBT, NYSE, 2.99, 2013-04-19\\n CGG, NYSE, 6.25, 2013-05-04\\n BXP, NYSE, -0.83, 2013-05-01\\n CBD, NYSE, -23.73, 2013-05-01\\n MS, NYSE, 7.02, 2013-04-19\\n SRT, NYSE, -314.29, 2013-05-10\\n HLX, NYSE, 38.89, 2013-04-22\\n FLS, NYSE, 3.61, 2013-04-25\\n MT, NYSE, -400.00, 2013-05-11\\n PXD, NYSE, 5.15, 2013-05-02\\n SLG, NYSE, 0.83, 2013-04-24\\n NAT, NYSE, -16.22, 2013-05-14\\n CSU, NYSE, -36.36, 2013-05-07\\n DRQ, NYSE, 22.50, 2013-05-04\\n FDP, NYSE, -24.47, 2013-05-01\\n NLY, NYSE, 30.56, 2013-05-02\\n TLM, NYSE, -250.00, 2013-05-02\\n TSM, NYSE, 13.04, 2013-04-19\\n YUM, NYSE, 12.90, 2013-04-24\\n AMG, NYSE, 12.38, 2013-05-01\\n EPR, NYSE, -1.05, 2013-05-01\\n FE, NYSE, 10.14, 2013-05-08\\n LFL, NYSE, 80.00, 2013-05-15\\n MTD, NYSE, 2.79, 2013-05-03\\n SID, NYSE, -66.67, 2013-05-16\\n IN, NYSE, -271.43, 2013-05-04\\n CBZ, NYSE, 25.64, 2013-05-03\\n URI, NYSE, 11.54, 2013-04-17\\n INGR, NYSE, 6.82, 2013-05-03\\n RAS, NYSE, 181.82, 2013-05-03\\n UNS, NYSE, 35.00, 2013-04-30\\n ASI, NYSE, 18.92, 2013-05-09\\n ANH, NYSE, 15.38, 2013-04-30\\n OFC, NYSE, 17.07, 2013-04-27\\n GPX, NYSE, 0.00, 2013-05-03\\n WAC, NYSE, 1427.27, 2013-05-10\\n RBA, NYSE, -13.33, 2013-05-01\\n WDR, NYSE, 1.61, 2013-04-24\\n LHO, NYSE, 8.00, 2013-04-18\\n LNT, NYSE, 18.03, 2013-05-04\\n LVLT, NYSE, 7.14, 2013-04-26\\n MFA, NYSE, -4.76, 2013-05-02\\n OME, NYSE, 50.00, 2013-05-08\\n EQY, NYSE, 6.90, 2013-05-02\\n FII, NYSE, -2.38, 2013-04-26\\n FMX, NYSE, -37.89, 2013-04-25\\n LLL, NYSE, 3.63, 2013-04-26\\n VTR, NYSE, 4.04, 2013-04-27\\n WCN, NYSE, 20.00, 2013-05-02\\n AVB, NYSE, 0.74, 2013-05-01\\n GIL, NYSE, 5.36, 2013-05-03\\n HZO, NYSE, -92.86, 2013-04-26\\n AWR, NYSE, 38.00, 2013-05-11\\n CLS, NYSE, 10.00, 2013-04-24\\n EPD, NYSE, 16.67, 2013-05-01\\n RSG, NYSE, 15.00, 2013-04-26\\n WM, NYSE, -2.44, 2013-04-25\\n AKR, NYSE, 3.33, 2013-04-24\\n CVG, NYSE, 17.39, 2013-05-01\\n RRC, NYSE, -38.89, 2013-04-26\\n SAP, NYSE, 41.51, 2013-04-20\\n CCI, NYSE, 0.00, 2013-04-25\\n PQ, NYSE, 100.00, 2013-05-08\\n WFT, NYSE, 0.00, 2013-05-03\\n CAA, NYSE, 0.00, 2013-05-03\\n ENB, NYSE, 13.21, 2013-05-09\\n GMK, NYSE, 60.00, 2013-04-25\\n MMR, NYSE, 0.00, 2013-05-07\\n PB, NYSE, 2.38, 2013-04-25\\n VIV, NYSE, -20.00, 2013-05-08\\n AXL, NYSE, 53.33, 2013-05-04\\n BP, NYSE, 33.33, 2013-05-01\\n ETM, NYSE, 0.00, 2013-05-09\\n HT, NYSE, 0.00, 2013-05-01\\n BYI, NYSE, 10.71, 2013-04-25\\n CEB, NYSE, 1.64, 2013-05-02\\n INFY, NYSE, 5.41, 2013-04-13\\n JLL, NYSE, 56.52, 2013-05-01\\n AZN, NYSE, 5.22, 2013-04-26\\n SFG, NYSE, 33.75, 2013-04-24\\n TREX, NYSE, 14.68, 2013-05-04\\n GS, NYSE, 11.43, 2013-04-17\\n SYX, NYSE, -157.14, 2013-05-01\\n WCC, NYSE, -4.27, 2013-04-19\\n JNPR, NYSE, 33.33, 2013-04-24\\n RDN, NYSE, 28.57, 2013-05-02\\n RAI, NYSE, 4.35, 2013-04-24\\n SKX, NYSE, -27.78, 2013-05-16\\n WTM, NYSE, 178.02, 2013-04-30\\n NCI, NYSE, 12.50, 2013-04-26\\n BLT, NYSE, -17.39, 2013-05-08\\n QTM, NYSE, -33.33, 2013-05-09\\n BLK, NYSE, 1.67, 2013-04-17\\n CIR, NYSE, 4.00, 2013-05-03\\n MSO, NYSE, 12.50, 2013-05-01\\n PKG, NYSE, 10.71, 2013-04-23\\n PKI, NYSE, -25.00, 2013-04-26\\n WWE, NYSE, -37.50, 2013-05-03\\n SNN, NYSE, -2.11, 2013-05-03\\n UPS, NYSE, 2.97, 2013-04-26\\n XOXO, NYSE, 16.67, 2013-05-10\\n SLF, NYSE, 7.25, 2013-05-09\\n CDR, NYSE, 9.09, 2013-05-10\\n EW, NYSE, -5.26, 2013-04-24\\n MET, NYSE, 13.85, 2013-05-01\\n FBR, NYSE, -89.47, 2013-04-24\\n VVC, NYSE, -7.58, 2013-05-02\\n BAM, NYSE, 70.00, 2013-05-10\\n NVS, NYSE, 4.00, 2013-04-25\\n BHLB, NYSE, -1.82, 2013-04-30\\n CRL, NYSE, -2.82, 2013-05-02\\n CYH, NYSE, 3.57, 2013-04-30\\n MBT, NYSE, -13.04, 2013-06-08\\n MTOR, NYSE, 500.00, 2013-05-01\\n CNQ, NYSE, -44.19, 2013-05-03\\n ERJ, NYSE, -62.79, 2013-04-30\\n VZ, NYSE, 3.03, 2013-04-19\\n EVC, NYSE, 0.00, 2013-05-03\\n PBR, NYSE, 0.00, 2013-04-27\\n XEL, NYSE, 11.63, 2013-05-03\\n ALE, NYSE, 10.67, 2013-05-09\\n HW, NYSE, -30.00, 2013-05-01\\n POL, NYSE, 14.81, 2013-05-02\\n COH, NYSE, 3.70, 2013-04-24\\n CXW, NYSE, 6.38, 2013-05-09\\n DVA, NYSE, 3.37, 2013-05-08\\n EXC, NYSE, 4.41, 2013-05-02\\n MCO, NYSE, 11.49, 2013-05-04\\n BRFS, NYSE, 23.53, 2013-04-30\\n TU, NYSE, 3.77, 2013-05-10\\n WIT, NYSE, 0.00, 2013-04-20\\n ERF, NYSE, 100.00, 2013-05-11\\n GG, NYSE, -35.00, 2013-05-03\\n HNT, NYSE, 34.15, 2013-04-30\\n NYCB, NYSE, 3.85, 2013-04-25\\n SXT, NYSE, 3.33, 2013-04-19\\n CPG, NYSE, -20.00, 2013-05-10\\n AMX, NYSE, 16.67, 2013-04-20\\n MPX, NYSE, 0.00, 2013-04-25\\n OIS, NYSE, -2.70, 2013-04-25\\n MMP, NYSE, 4.08, 2013-05-03\\n PES, NYSE, 33.33, 2013-05-01\\n ABB, NYSE, -12.12, 2013-04-25\\n KMR, NYSE, -3.28, 2013-05-02\\n GEN, NYSE, -41.18, 2013-05-07\\n ADS, NYSE, -2.88, 2013-04-19\\n CVI, NYSE, 25.00, 2013-05-03\\n FTI, NYSE, -6.52, 2013-04-24\\n PRA, NYSE, 27.63, 2013-05-07\\n STO, NYSE, -16.46, 2013-05-03\\n BEL, NYSE, 41.67, 2013-05-02\\n FIS, NYSE, 1.64, 2013-05-01\\n COL, NYSE, 0.86, 2013-04-20\\n KAI, NYSE, 20.51, 2013-04-30\\n ABC, NYSE, -2.25, 2013-04-26\\n BG, NYSE, 18.56, 2013-04-26\\n FRO, NYSE, 27.08, 2013-05-31\\n ECA, NYSE, 150.00, 2013-04-24\\n CIG, NYSE, 108.33, 2013-05-17\\n EEP, NYSE, 16.67, 2013-05-01\\n CVX, NYSE, 3.25, 2013-04-27\\n GXP, NYSE, 41.67, 2013-05-10\\n JHX, NYSE, -2.78, 2013-05-24\\n PFG, NYSE, 5.33, 2013-04-26\\n PVR, NYSE, 14.29, 2013-04-26\\n AAP, NYSE, 2.48, 2013-05-24\\n KND, NYSE, 36.11, 2013-05-02\\n WTW, NYSE, 38.10, 2013-05-03\\n CNC, NYSE, 5.00, 2013-04-24\\n BCH, NYSE, 3.70, 2013-05-09\\n NS, NYSE, -86.67, 2013-04-25\\n ITUB, NYSE, -4.88, 2013-04-26\\n SXL, NYSE, 26.74, 2013-05-09\\n VALE, NYSE, 50.00, 2013-04-25\\n TNP, NYSE, 150.00, 2013-05-25\\n LCI, NYSE, 40.00, 2013-05-09\\n GTI, NYSE, 50.00, 2013-04-26\\n HNR, NYSE, -26.67, 2013-06-06\\n MWE, NYSE, -90.00, 2013-05-09\\n NLS, NYSE, 50.00, 2013-05-07\\n RGC, NYSE, -7.14, 2013-05-01\\n JAH, NYSE, 30.43, 2013-04-25\\n NPO, NYSE, -23.29, 2013-05-03\\n TRI, NYSE, 22.58, 2013-05-01\\n CAE, NYSE, 10.53, 2013-05-17\\n LF, NYSE, 28.57, 2013-05-02\\n SNY, NYSE, -10.11, 2013-05-03\\n BANC, NYSE, 400.00, 2013-05-09\\n COP, NYSE, 0.00, 2013-04-26\\n CNP, NYSE, -8.11, 2013-05-03\\n EEQ, NYSE, -321.43, 2013-05-02\\n MRH, NYSE, 32.58, 2013-04-25\\n NGS, NYSE, 23.08, 2013-05-10\\n NRP, NYSE, 4.88, 2013-05-07\\n PXP, NYSE, 17.98, 2013-05-03\\n XEC, NYSE, -0.93, 2013-05-08\\n IAG, NYSE, 7.14, 2013-05-08\\n EGO, NYSE, 0.00, 2013-05-03\\n JNS, NYSE, -6.25, 2013-04-24\\n PFS, NYSE, 14.81, 2013-04-27\\n ENH, NYSE, 74.79, 2013-05-02\\n CNX, NYSE, -5.00, 2013-04-26\\n AMT, NYSE, -10.42, 2013-05-02\\n ABG, NYSE, 13.43, 2013-04-25\\n LII, NYSE, 22.22, 2013-04-23\\n SRE, NYSE, -4.90, 2013-05-03\\n AEE, NYSE, -21.43, 2013-05-03\\n PLD, NYSE, 0.00, 2013-04-25\\n SAH, NYSE, -2.38, 2013-04-24\\n GPI, NYSE, 11.54, 2013-05-03\\n FIX, NYSE, 800.00, 2013-05-02\\n MMS, NYSE, 1.41, 2013-05-10\\n SRI, NYSE, 50.00, 2013-05-10\\n RTEC, NYSE, 50.00, 2013-05-03\\n NOV, NYSE, -5.84, 2013-04-27\\n DF, NYSE, 11.54, 2013-05-10\\n SAM, NYSE, -17.74, 2013-05-02\\n RL, NYSE, 8.46, 2013-05-24\\n FLR, NYSE, 6.25, 2013-05-03\\n ALL, NYSE, 2.27, 2013-05-02\\n ATI, NYSE, 0.00, 2013-04-25\\n EE, NYSE, 72.73, 2013-05-02\\n AIT, NYSE, 0.00, 2013-05-03\\n CHH, NYSE, -3.70, 2013-04-30\\n FMS, NYSE, -17.78, 2013-05-01\\n BCO, NYSE, 16.67, 2013-04-26\\n CBB, NYSE, 133.33, 2013-05-10\\n MWW, NYSE, 14.29, 2013-05-03\\n PSA, NYSE, -3.09, 2013-05-10\\n E, NYSE, 0.00, 2013-04-25\\n JPM, NYSE, 15.22, 2013-04-13\\n USB, NYSE, 0.00, 2013-04-17\\n HON, NYSE, 6.14, 2013-04-20\\n ITG, NYSE, 50.00, 2013-05-03\\n ARB, NYSE, -15.49, 2013-05-08\\n APL, NYSE, -28.95, 2013-04-30\\n AVA, NYSE, 0.00, 2013-05-02\\n AXS, NYSE, 85.71, 2013-04-26\\n MOH, NYSE, 146.15, 2013-04-26\\n CVD, NYSE, 4.17, 2013-05-02\\n AHT, NYSE, 2.94, 2013-05-09\\n GPK, NYSE, 25.00, 2013-04-26\\n CNO, NYSE, 0.00, 2013-04-25\\n AUQ, NYSE, -60.00, 2013-05-10\\n NFP, NYSE, -5.45, 2013-05-04\\n CRI, NYSE, 12.86, 2013-05-10\\n FMD, NYSE, 27.27, 2013-04-30\\n FPO, NYSE, 3.45, 2013-04-26\\n TRQ, NYSE, -25.00, 2013-05-14\\n WLL, NYSE, 2.17, 2013-04-25\\n AEL, NYSE, 11.36, 2013-05-02\\n AHL, NYSE, 0.95, 2013-04-25\\n AUY, NYSE, -23.81, 2013-05-01\\n CMP, NYSE, 24.32, 2013-04-30\\n KRO, NYSE, -800.00, 2013-05-09\\n TPX, NYSE, 3.33, 2013-05-03\\n UTI, NYSE, -300.00, 2013-05-01\\n PJC, NYSE, 9.09, 2013-04-18\\n TRW, NYSE, 3.42, 2013-05-01\\n AIZ, NYSE, -14.56, 2013-04-25\\n HTH, NYSE, 11.43, 2013-05-07\\n ETP, NYSE, 33.33, 2013-05-09\\n LSE, NYSE, 0.00, 2013-05-09\\n BBD, NYSE, 0.00, 2013-04-23\\n NRG, NYSE, -37.04, 2013-05-08\\n HOS, NYSE, 96.67, 2013-05-02\\n ABR, NYSE, 84.62, 2013-05-04\\n FHN, NYSE, 0.00, 2013-04-20\\n AGO, NYSE, 86.11, 2013-05-10\\n HSP, NYSE, 18.18, 2013-05-02\\n HNI, NYSE, 250.00, 2013-04-18\\n GHL, NYSE, -34.78, 2013-04-18\\n XPO, NYSE, -16.44, 2013-05-08\\n CVO, NYSE, -200.00, 2013-05-09\\n CHE, NYSE, 9.92, 2013-04-19\\n GNW, NYSE, 11.11, 2013-05-01\\n CBG, NYSE, -5.88, 2013-04-26\\n SFL, NYSE, -43.33, 2013-05-31\\n NEU, NYSE, 3.28, 2013-04-25\\n GOL, NYSE, -1200.00, 2013-05-14\\n CAB, NYSE, 18.64, 2013-04-26\\n LTM, NYSE, 3.08, 2013-04-26\\n VVI, NYSE, 68.00, 2013-04-27\\n WCG, NYSE, -8.70, 2013-05-04\\n HEP, NYSE, -36.36, 2013-05-01\\n DPZ, NYSE, 5.36, 2013-05-01\\n BDC, NYSE, 6.33, 2013-05-03\\n ENS, NYSE, 2.56, 2013-05-29\\n BMR, NYSE, 7.89, 2013-05-02\\n ACC, NYSE, -1.54, 2013-04-24\\n KRG, NYSE, 27.27, 2013-05-03\\n WLK, NYSE, 42.64, 2013-05-07\\n EXR, NYSE, 4.55, 2013-04-30\\n CNS, NYSE, 7.32, 2013-04-18\\n IOC, NYSE, 161.54, 2013-05-14\\n STON, NYSE, -150.00, 2013-05-08\\n TTM, NYSE, 60.56, 2013-05-30\\n CPL, NYSE, 7.69, 2013-05-11\\n TPGI, NYSE, -460.00, 2013-05-07\\n SHO, NYSE, 0.00, 2013-05-07\\n CUBE, NYSE, 0.00, 2013-05-03\\n NRF, NYSE, -51.35, 2013-05-04\\n DLR, NYSE, -1.69, 2013-04-27\\n MTL, NYSE, 100.00, 2013-06-19\\n NWE, NYSE, 8.60, 2013-04-26\\n ORA, NYSE, 550.00, 2013-05-08\\n NP, NYSE, 7.25, 2013-05-09\\n SMA, NYSE, -73.33, 2013-05-03\\n BBG, NYSE, -2600.00, 2013-05-03\\n BXC, NYSE, 35.29, 2013-05-02\\n KNL, NYSE, 8.33, 2013-04-19\\n LVS, NYSE, 7.58, 2013-05-02\\n HLF, NYSE, 18.69, 2013-04-30\\n MIC, NYSE, -89.09, 2013-04-30\\n PHH, NYSE, -81.13, 2013-05-02\\n CE, NYSE, 44.30, 2013-04-19\\n EDR, NYSE, 0.00, 2013-04-30\\n WTI, NYSE, 34.62, 2013-05-08\\n ARC, NYSE, 0.00, 2013-05-08\\n PBH, NYSE, 5.88, 2013-05-17\\n HUN, NYSE, 18.75, 2013-05-01\\n WEX, NYSE, 3.16, 2013-05-02\\n DLB, NYSE, 14.29, 2013-04-26\\n DSX, NYSE, 66.67, 2013-05-23\\n LAZ, NYSE, -17.65, 2013-04-27\\n TGP, NYSE, 14.29, 2013-05-10\\n TLP, NYSE, 7.69, 2013-05-08\\n DRH, NYSE, 55.56, 2013-05-11\\n HTGC, NYSE, 8.00, 2013-05-03\\n KFN, NYSE, 27.78, 2013-05-02\\n THS, NYSE, 5.71, 2013-05-10\\n NSR, NYSE, -8.86, 2013-05-03\\n WAL, NYSE, 14.29, 2013-04-19\\n SLW, NYSE, -9.76, 2013-05-11\\n MPW, NYSE, -3.85, 2013-04-27\\n GNK, NYSE, -2.75, 2013-05-02\\n MFB, NYSE, 28.57, 2013-05-09\\nRDS.A, NYSE, 21.74, 2013-05-03\\n ITC, NYSE, -3.45, 2013-04-24\\n FTK, NYSE, -11.76, 2013-05-10\\n PIKE, NYSE, -20.00, 2013-05-07\\n ALJ, NYSE, 63.27, 2013-05-09\\n DRC, NYSE, 2.38, 2013-04-26\\n STN, NYSE, 0.00, 2013-05-10\\n SSW, NYSE, -8.70, 2013-04-30\\n CF, NYSE, 0.50, 2013-05-09\\n HPY, NYSE, 12.50, 2013-05-01\\n ROC, NYSE, 1.49, 2013-05-01\\n WPZ, NYSE, -57.58, 2013-05-01\\n LCC, NYSE, 29.17, 2013-04-24\\n GLP, NYSE, -7.27, 2013-05-10\\n AMP, NYSE, 1.27, 2013-04-23\\n DHT, NYSE, 58.33, 2013-04-30\\n FNF, NYSE, 5.00, 2013-05-02\\n NM, NYSE, 52.38, 2013-05-22\\n CCO, NYSE, -57.14, 2013-05-03\\n BWP, NYSE, 5.00, 2013-04-30\\n ICE, NYSE, 2.53, 2013-05-02\\n BKD, NYSE, 50.00, 2013-05-02\\n BAS, NYSE, 12.00, 2013-04-25\\n CPA, NYSE, 21.21, 2013-05-14\\n LYV, NYSE, 8.33, 2013-05-08\\n WNR, NYSE, -6.93, 2013-05-03\\n CMG, NYSE, 9.81, 2013-04-19\\n RGP, NYSE, -50.00, 2013-05-09\\n KOP, NYSE, -16.92, 2013-05-04\\n TX, NYSE, 40.43, 2013-05-01\\n UAL, NYSE, 10.09, 2013-04-26\\n ETE, NYSE, -27.03, 2013-05-09\\n RSO, NYSE, -45.00, 2013-05-08\\n XCO, NYSE, 62.50, 2013-05-01\\n PAC, NYSE, 30.00, 2013-04-26\\n NYX, NYSE, 1.79, 2013-05-01\\n TDG, NYSE, 0.61, 2013-05-08\\n BMA, NYSE, 11.68, 2013-05-09\\n THI, NYSE, 1.67, 2013-05-09\\n BTE, NYSE, -112.00, 2013-05-10\\n CNH, NYSE, 41.49, 2013-05-01\\n GLA, NYSE, -82.35, 2013-05-02\\n POR, NYSE, 0.00, 2013-05-02\\n HIL, NYSE, 50.00, 2013-05-03\\n HVB, NYSE, 12.50, 2013-04-24\\n KS, NYSE, -9.30, 2013-05-08\\n HK, NYSE, -28.57, 2013-05-03\\n DCP, NYSE, 3.28, 2013-05-07\\n DK, NYSE, 7.56, 2013-05-09\\n CODI, NYSE, 0.00, 2013-05-08\\n MA, NYSE, 0.65, 2013-05-02\\n MWA, NYSE, 150.00, 2013-05-01\\n KOG, NYSE, -21.43, 2013-05-03\\n PWE, NYSE, -150.00, 2013-05-03\\n PGTI, NYSE, 100.00, 2013-05-02\\n AWH, NYSE, 8.45, 2013-04-25\\n NSH, NYSE, -29.73, 2013-04-25\\n WYN, NYSE, 7.58, 2013-04-25\\n WNS, NYSE, 15.38, 2013-04-18\\n PGH, NYSE, 0.00, 2013-05-02\\n AYR, NYSE, 34.48, 2013-05-03\\n EVR, NYSE, -24.49, 2013-04-25\\n HBI, NYSE, 2.00, 2013-04-24\\n WU, NYSE, 12.12, 2013-05-01\\n OC, NYSE, 45.00, 2013-04-25\\n DAC, NYSE, 44.44, 2013-04-30\\n AWI, NYSE, -43.59, 2013-04-30\\n SUSS, NYSE, 0.00, 2013-05-09\\n DEI, NYSE, 5.71, 2013-05-08\\n OB, NYSE, 79.31, 2013-04-30\\n SBH, NYSE, -7.69, 2013-05-03\\n EBS, NYSE, -144.44, 2013-05-03\\n KBR, NYSE, 25.53, 2013-04-26\\n AER, NYSE, 23.40, 2013-05-08\\n NOA, NYSE, -442.86, 2013-06-11\\n SPR, NYSE, 29.79, 2013-05-03\\n ANW, NYSE, -7.14, 2013-05-16\\n DCT, NYSE, 10.00, 2013-05-03\\n SE, NYSE, 6.25, 2013-05-04\\n TOO, NYSE, -17.86, 2013-05-10\\n TSL, NYSE, -27.78, 2013-05-30\\n TWC, NYSE, 2.92, 2013-04-26\\n MVO, NYSE, -13.92, 2013-05-09\\n CO, NYSE, 150.00, 2013-06-19\\n EXK, NYSE, -18.75, 2013-05-07\\n EIG, NYSE, 22.22, 2013-05-09\\n HF, NYSE, -50.00, 2013-05-02\\n FIG, NYSE, 33.33, 2013-05-03\\n NGLS, NYSE, -20.00, 2013-05-04\\n TCAP, NYSE, -1.75, 2013-05-09\\n GFA, NYSE, -211.11, 2013-05-14\\n BR, NYSE, 18.18, 2013-05-08\\n SCR, NYSE, 12.50, 2013-05-10\\n CNK, NYSE, 12.00, 2013-05-08\\n DAL, NYSE, 42.86, 2013-04-24\\n ORN, NYSE, 42.86, 2013-05-03\\n ACM, NYSE, 3.92, 2013-05-08\\n SLH, NYSE, 5.00, 2013-05-08\\n CLR, NYSE, 2.63, 2013-05-09\\n BGS, NYSE, -5.13, 2013-04-19\\n STAR, NYSE, 26.42, 2013-05-01\\n YGE, NYSE, -40.00, 2013-05-31\\n DFS, NYSE, 18.75, 2013-04-24\\n TEL, NYSE, 7.04, 2013-04-25\\n BX, NYSE, 1.85, 2013-04-19\\n SEP, NYSE, 4.65, 2013-05-04\\n BZ, NYSE, -77.78, 2013-05-03\\n PPO, NYSE, -41.18, 2013-05-09\\n PRO, NYSE, 100.00, 2013-05-03\\n WBC, NYSE, 7.34, 2013-04-26\\n DHX, NYSE, 0.00, 2013-04-24\\n PMC, NYSE, 23.53, 2013-05-02\\n HGG, NYSE, 3.33, 2013-05-21\\n OWW, NYSE, -33.33, 2013-05-10\\n VR, NYSE, 35.97, 2013-04-26\\n CXO, NYSE, -27.50, 2013-05-02\\n G, NYSE, 5.00, 2013-05-02\\n EJ, NYSE, 89.47, 2013-05-16\\n WX, NYSE, 11.11, 2013-05-14\\n CMLP, NYSE, -92.86, 2013-05-08\\n VMW, NYSE, 10.87, 2013-04-24\\n CZZ, NYSE, -40.00, 2013-06-06\\n CGA, NYSE, 6.67, 2013-05-14\\n TDC, NYSE, -26.92, 2013-05-03\\n FLY, NYSE, 61.73, 2013-05-03\\n MAIN, NYSE, 2.04, 2013-05-10\\n REN, NYSE, 100.00, 2013-05-07\\n TGH, NYSE, -12.90, 2013-05-08\\n DFT, NYSE, -5.00, 2013-05-08\\n RF, NYSE, 15.00, 2013-04-24\\n PZN, NYSE, 0.00, 2013-04-25\\n LL, NYSE, 29.55, 2013-04-25\\n NMM, NYSE, 0.00, 2013-04-26\\n OZM, NYSE, 81.25, 2013-05-03\\n ES, NYSE, 12.31, 2013-05-02\\n MSCI, NYSE, 5.56, 2013-05-02\\n ARR, NYSE, -21.74, 2013-05-03\\n KW, NYSE, 62.50, 2013-05-08\\n GTS, NYSE, 52.78, 2013-05-02\\n FOR, NYSE, 450.00, 2013-05-09\\n LRN, NYSE, 34.78, 2013-05-04\\n TNK, NYSE, -100.00, 2013-05-10\\n N, NYSE, -21.43, 2013-04-26\\n DAN, NYSE, -33.33, 2013-04-26\\n BIP, NYSE, 0.00, 2013-05-03\\n CPN, NYSE, -6.67, 2013-05-03\\n SOL, NYSE, -15.38, 2013-05-17\\n PM, NYSE, -4.44, 2013-04-19\\n V, NYSE, 6.08, 2013-05-02\\n IPI, NYSE, 5.26, 2013-05-02\\n AWK, NYSE, -5.88, 2013-05-08\\n HTS, NYSE, -7.46, 2013-04-23\\n DPS, NYSE, 12.77, 2013-04-25\\n CFX, NYSE, 8.33, 2013-04-26\\n WES, NYSE, -22.50, 2013-05-02\\n SB, NYSE, 0.00, 2013-05-16\\n LO, NYSE, 4.76, 2013-04-25\\n LPS, NYSE, 0.00, 2013-04-25\\n FF, NYSE, -6.90, 2013-05-08\\n NNA, NYSE, 200.00, 2013-05-03\\n EPB, NYSE, 7.41, 2013-04-18\\n JBT, NYSE, -17.65, 2013-05-08\\n DL, NYSE, -33.33, 2013-05-22\\n RAX, NYSE, -5.00, 2013-05-09\\n GSL, NYSE, -50.00, 2013-05-10\\n HCI, NYSE, 66.06, 2013-05-03\\n EC, NYSE, -18.58, 2013-05-04\\n CLW, NYSE, -98.08, 2013-04-25\\n MJN, NYSE, -1.16, 2013-04-26\\n EPC, NYSE, 39.53, 2013-05-02\\n BPI, NYSE, 0.00, 2013-05-07\\n RST, NYSE, 25.00, 2013-05-09\\n DGI, NYSE, 22.22, 2013-05-08\\n SWI, NYSE, 6.25, 2013-05-01\\n CYS, NYSE, -45.16, 2013-04-18\\n IVR, NYSE, 1.59, 2013-05-02\\n BUD, NYSE, 50.65, 2013-05-01\\n SLD, NYSE, -66.67, 2013-05-15\\n PMT, NYSE, 11.11, 2013-04-24\\n STWD, NYSE, -20.93, 2013-05-09\\n CFN, NYSE, 11.32, 2013-05-10\\n SPB, NYSE, 7.32, 2013-05-01\\n ARI, NYSE, 33.33, 2013-05-02\\n CLNY, NYSE, -26.47, 2013-05-07\\n ART, NYSE, -800.00, 2013-05-07\\n SEM, NYSE, -11.11, 2013-05-03\\n BSBR, NYSE, -71.43, 2013-04-26\\n DOLE, NYSE, -50.00, 2013-05-03\\n VSI, NYSE, 2.86, 2013-05-08\\n TWO, NYSE, -9.38, 2013-05-08\\n CVE, NYSE, -6.38, 2013-04-25\\n H, NYSE, 12.50, 2013-05-02\\n LEA, NYSE, 19.27, 2013-04-26\\n SVN, NYSE, -81.82, 2013-05-14\\n CLD, NYSE, -59.26, 2013-05-01\\n AOL, NYSE, 6.25, 2013-05-09\\n CHSP, NYSE, 25.00, 2013-05-08\\n PEB, NYSE, 5.88, 2013-04-26\\n CIT, NYSE, -8.99, 2013-04-24\\n KAR, NYSE, -3.03, 2013-05-02\\n CIE, NYSE, -15.38, 2013-05-01\\n TMH, NYSE, 0.00, 2013-05-01\\n KRA, NYSE, -75.00, 2013-05-02\\n SYA, NYSE, 8.82, 2013-04-25\\n TRNO, NYSE, -11.11, 2013-05-09\\n PDM, NYSE, 0.00, 2013-05-03\\n GNRC, NYSE, 23.47, 2013-05-03\\n ACW, NYSE, -9.68, 2013-04-24\\n BALT, NYSE, -9.52, 2013-05-02\\n ST, NYSE, 4.35, 2013-04-24\\n SEMG, NYSE, -15.00, 2013-05-09\\n CALX, NYSE, 50.00, 2013-04-26\\n MXL, NYSE, 33.33, 2013-05-01\\n STNG, NYSE, 60.00, 2013-04-30\\n PRI, NYSE, -4.35, 2013-05-08\\n SDRL, NYSE, 16.95, 2013-05-29\\n CLDT, NYSE, 7.50, 2013-05-08\\n EXL, NYSE, 5.00, 2013-05-02\\n LYB, NYSE, 9.09, 2013-04-27\\n PNG, NYSE, 4.35, 2013-05-07\\n PLOW, NYSE, 13.33, 2013-05-07\\n SIX, NYSE, 19.61, 2013-04-23\\n NKA, NYSE, -140.00, 2013-05-10\\n RRTS, NYSE, 3.57, 2013-05-02\\n JKS, NYSE, 66.27, 2013-06-08\\n CODE, NYSE, 7.69, 2013-05-01\\n FAF, NYSE, -31.71, 2013-04-26\\n QEP, NYSE, -6.67, 2013-05-01\\n OAS, NYSE, 31.37, 2013-05-08\\n HPP, NYSE, 18.18, 2013-05-07\\n FN, NYSE, 3.70, 2013-04-30\\n ECT, NYSE, 7.32, 2013-05-11\\n QUAD, NYSE, -88.10, 2013-05-08\\n KKR, NYSE, 4.76, 2013-04-26\\n RLD, NYSE, 70.00, 2013-06-07\\n AMRC, NYSE, -200.00, 2013-05-10\\n GDOT, NYSE, 9.37, 2013-05-01\\n AT, NYSE, 40.00, 2013-05-09\\n ENV, NYSE, 0.00, 2013-05-17\\n COR, NYSE, 0.00, 2013-04-25\\n VC, NYSE, 75.65, 2013-05-10\\n CCG, NYSE, 5.88, 2013-05-01\\n EFC, NYSE, -32.00, 2013-05-07\\n TOWR, NYSE, 255.56, 2013-05-03\\n CHMT, NYSE, -21.05, 2013-05-03\\n HBM, NYSE, 200.00, 2013-05-02\\n EXAM, NYSE, 0.00, 2013-05-09\\n XUE, NYSE, -25.00, 2013-05-17\\n CMRE, NYSE, 26.09, 2013-04-25\\n NOAH, NYSE, 112.50, 2013-05-07\\n IPHI, NYSE, 18.18, 2013-05-02\\n BITA, NYSE, 0.00, 2013-05-10\\n BAH, NYSE, 11.43, 2013-05-23\\n GM, NYSE, 19.64, 2013-05-03\\n XNY, NYSE, 28.57, 2013-05-20\\n TROX, NYSE, -181.25, 2013-05-09\\n TRGP, NYSE, 52.38, 2013-05-04\\n DANG, NYSE, 21.05, 2013-05-17\\n YOKU, NYSE, 0.00, 2013-05-16\\n FRC, NYSE, 0.00, 2013-04-16\\n RFP, NYSE, 64.29, 2013-05-01\\n ISS, NYSE, 50.00, 2013-05-18\\n WD, NYSE, -45.65, 2013-05-09\\n FLT, NYSE, 10.39, 2013-05-03\\n GCAP, NYSE, -15.38, 2013-05-08\\n FRF, NYSE, -27.27, 2013-05-14\\n SWFT, NYSE, 23.53, 2013-04-23\\n AG, NYSE, -8.00, 2013-05-16\\n QRE, NYSE, 0.00, 2013-05-09\\n AAT, NYSE, 8.57, 2013-05-01\\n MCC, NYSE, -2.70, 2013-05-03\\n NLSN, NYSE, 9.09, 2013-04-26\\n AGRO, NYSE, -100.00, 2013-05-17\\n BKU, NYSE, 4.44, 2013-04-25\\n INXN, NYSE, -7.14, 2013-05-09\\n NPTN, NYSE, 10.00, 2013-05-10\\n INN, NYSE, 5.88, 2013-05-07\\n KMI, NYSE, -12.50, 2013-04-18\\n HCA, NYSE, -4.82, 2013-05-03\\n MX, NYSE, 13.04, 2013-05-01\\n HII, NYSE, 0.00, 2013-05-09\\n QIHU, NYSE, 100.00, 2013-05-20\\n APO, NYSE, 56.20, 2013-05-07\\n GNC, NYSE, 1.39, 2013-04-27\\n SDT, NYSE, 16.07, 2013-05-11\\n UAN, NYSE, 4.26, 2013-05-02\\n ARCO, NYSE, -142.86, 2013-05-01\\n ELLI, NYSE, -16.67, 2013-05-01\\n TMS, NYSE, -12.00, 2013-04-26\\n SQNS, NYSE, 0.00, 2013-04-26\\n STAG, NYSE, 3.13, 2013-05-07\\n AL, NYSE, 5.13, 2013-05-10\\n TLLP, NYSE, -14.89, 2013-05-07\\n RENN, NYSE, 85.71, 2013-05-14\\n NQ, NYSE, -16.67, 2013-05-16\\n KOS, NYSE, -37.50, 2013-05-10\\n RLJ, NYSE, 10.81, 2013-05-09\\n NGL, NYSE, -62.86, 2013-06-15\\n FENG, NYSE, 60.00, 2013-05-15\\n LNKD, NYSE, 340.00, 2013-05-03\\n NMFC, NYSE, -2.86, 2013-05-07\\n ACTV, NYSE, 32.14, 2013-05-03\\n FIO, NYSE, 20.00, 2013-04-25\\n TAOM, NYSE, -25.00, 2013-05-24\\n RATE, NYSE, 10.00, 2013-05-01\\n VHS, NYSE, 8.33, 2013-05-01\\n MPC, NYSE, 0.00, 2013-05-01\\n MITT, NYSE, -9.64, 2013-05-07\\n OILT, NYSE, 17.07, 2013-05-09\\n SXC, NYSE, -40.00, 2013-04-26\\n AMTG, NYSE, 14.06, 2013-05-07\\n AMID, NYSE, -200.00, 2013-05-14\\n WAIR, NYSE, 22.22, 2013-04-30\\n PER, NYSE, -7.58, 2013-05-11\\n PPP, NYSE, 260.00, 2013-05-09\\n FSM, NYSE, -28.57, 2013-05-08\\n FBHS, NYSE, 41.18, 2013-05-03\\n XLS, NYSE, 73.91, 2013-05-04\\n XYL, NYSE, -3.57, 2013-05-01\\n GNE, NYSE, -550.00, 2013-05-08\\n NDRO, NYSE, -8.11, 2013-05-04\\n RNF, NYSE, -29.63, 2013-05-10\\n VAC, NYSE, 10.20, 2013-04-26\\n CHKR, NYSE, -2.90, 2013-05-10\\n PACD, NYSE, 250.00, 2013-05-07\\n INVN, NYSE, -13.33, 2013-05-03\\n DLPH, NYSE, 11.46, 2013-05-02\\n MN, NYSE, 0.00, 2013-05-02\\n RRMS, NYSE, 51.28, 2013-05-10\\n WPX, NYSE, -4.17, 2013-05-03\\n LPI, NYSE, -15.38, 2013-05-10\\n SN, NYSE, -82.61, 2013-05-08\\n KORS, NYSE, 35.14, 2013-05-30\\n BCEI, NYSE, -20.93, 2013-05-10\\n BOXC, NYSE, 2.56, 2013-04-23\\n PVG, NYSE, -25.00, 2013-05-11\\n POST, NYSE, -29.63, 2013-05-14\\n SLCA, NYSE, -2.78, 2013-05-01\\n MTDR, NYSE, 0.00, 2013-05-09\\n GWAY, NYSE, -120.00, 2013-05-07\\n EPAM, NYSE, -14.71, 2013-05-09\\n RNDY, NYSE, -9.52, 2013-05-10\\n PRLB, NYSE, 0.00, 2013-04-26\\n YELP, NYSE, -40.00, 2013-05-02\\n NSM, NYSE, 23.19, 2013-05-08\\n ALSN, NYSE, 95.24, 2013-04-30\\n DWRE, NYSE, -22.73, 2013-05-08\\n VNTV, NYSE, 3.70, 2013-05-07\\n ET, NYSE, 0.00, 2013-05-10\\n VCRA, NYSE, -160.00, 2013-05-03\\n RM, NYSE, -1.82, 2013-05-03\\n BNNY, NYSE, 3.57, 2013-06-11\\n MM, NYSE, 25.00, 2013-05-09\\n RXN, NYSE, 0.00, 2013-05-22\\n GLOG, NYSE, -16.67, 2013-05-16\\n RPAI, NYSE, 9.52, 2013-05-07\\n OAK, NYSE, 39.86, 2013-05-08\\n FET, NYSE, 3.03, 2013-04-26\\n MRC, NYSE, 4.65, 2013-05-03\\n PSX, NYSE, 17.74, 2013-05-02\\n TUMI, NYSE, 6.67, 2013-05-09\\n ACRE, NYSE, -5.88, 2013-05-16\\n EVER, NYSE, 13.79, 2013-04-25\\n PDH, NYSE, -13.24, 2013-04-25\\n ROYT, NYSE, 10.00, 2013-05-11\\n WMC, NYSE, -2.15, 2013-05-16\\n WAGE, NYSE, 35.71, 2013-05-10\\n HTA, NYSE, 6.67, 2013-05-08\\n ALEX, NYSE, -28.57, 2013-05-10\\n BKW, NYSE, 0.00, 2013-04-27\\n CNCO, NYSE, -88.24, 2013-05-31\\n EQM, NYSE, 41.30, 2013-04-26\\n NOW, NYSE, 0.00, 2013-04-25\\n EGL, NYSE, -11.24, 2013-05-14\\n NGVC, NYSE, 7.69, 2013-05-10\\n NTI, NYSE, 3.51, 2013-05-14\\n AMRE, NYSE, 4.00, 2013-05-08\\n GMED, NYSE, 5.00, 2013-05-03\\n MANU, NYSE, -25.00, 2013-05-03\\n HCLP, NYSE, -23.08, 2013-05-15\\n ADT, NYSE, -4.65, 2013-05-02\\n TRLA, NYSE, -75.00, 2013-05-01\\n SRC, NYSE, 19.44, 2013-05-09\\n NBHC, NYSE, -50.00, 2013-04-30\\n BSMX, NYSE, 30.43, 2013-04-27\\n HY, NYSE, 67.05, 2013-05-02\\n SMLP, NYSE, -10.71, 2013-05-14\\n DYN, NYSE, -254.55, 2013-05-03\\n LXFR, NYSE, 0.00, 2013-05-08\\n LOCK, NYSE, 25.00, 2013-05-02\\n JMI, NYSE, 224.44, 2013-05-08\\n BERY, NYSE, 16.67, 2013-05-03\\n FLTX, NYSE, 8.33, 2013-05-09\\n ANFI, NYSE, 0.00, 2013-06-11\\n SSTK, NYSE, 23.08, 2013-05-09\\n RLGY, NYSE, -13.33, 2013-05-02\\n SDLP, NYSE, 88.64, 2013-05-29\\n MPLX, NYSE, -7.14, 2013-05-01\\n WWAV, NYSE, 6.67, 2013-05-10\\n SXE, NYSE, -44.44, 2013-05-09\\n DKL, NYSE, 31.58, 2013-05-08\\n SCM, NYSE, -8.82, 2013-05-10\\n RKUS, NYSE, -100.00, 2013-05-07\\n ALDW, NYSE, -1.32, 2013-05-08\\n WGP, NYSE, 0.00, 2013-05-02\\n ABBV, NYSE, 3.03, 2013-04-27\\n PBF, NYSE, -54.72, 2013-05-03\\n SBY, NYSE, -433.33, 2013-05-14\\n RIOM, NYSE, 0.00, 2013-05-15\\n USAC, NYSE, -30.00, 2013-05-10\\n CVRR, NYSE, -2.56, 2013-05-03\\n SXCP, NYSE, -9.76, 2013-04-26\\n BFAM, NYSE, 81.82, 2013-05-10\\n TPH, NYSE, 200.00, 2013-05-15\\n ZTS, NYSE, 5.88, 2013-05-01\\n BCC, NYSE, 146.15, 2013-04-23\\n AGI, NYSE, 0.00, 2013-04-26\\n APAM, NYSE, -11.32, 2013-05-02\\n SSNI, NYSE, -1211.77, 2013-05-02\\n MODN, NYSE, 0.00, 2013-05-08\\n AVIV, NYSE, 150.00, 2013-05-08\\n OAKS, NYSE, 509.09, 2013-05-04\\n MRIN, NYSE, -7.50, 2013-05-09\\n PF, NYSE, 17.24, 2013-05-16\\n TMHC, NYSE, -66.67, 2013-05-16\\n ARPI, NYSE, -600.00, 2013-06-25\\n CSTM, NYSE, -105.08, 2013-06-18\\n DDC, NYSE, -80.00, 2013-06-06\\n ABM, NYSE, 9.09, 2013-06-04\\n ANN, NYSE, 4.76, 2013-06-07\\n BBY, NYSE, 28.00, 2013-05-22\\n BF.B, NYSE, -2.17, 2013-06-06\\n BKE, NYSE, -4.88, 2013-05-24\\n NCS, NYSE, -21.74, 2013-06-05\\n BNS, NYSE, -0.83, 2013-05-29\\n BRC, NYSE, -6.78, 2013-05-17\\n CATO, NYSE, 1.94, 2013-05-24\\n COO, NYSE, 9.49, 2013-06-07\\n CPB, NYSE, 10.71, 2013-05-21\\n CFI, NYSE, 10.81, 2013-06-13\\n DCI, NYSE, -4.17, 2013-05-18\\n DDS, NYSE, 15.38, 2013-05-15\\n DE, NYSE, 0.73, 2013-05-16\\n DY, NYSE, 0.00, 2013-05-22\\n EV, NYSE, 0.00, 2013-05-23\\n ESL, NYSE, -11.81, 2013-05-31\\n M, NYSE, 3.77, 2013-05-16\\n GCO, NYSE, 11.90, 2013-06-01\\n GPS, NYSE, 2.90, 2013-05-24\\n HD, NYSE, 7.79, 2013-05-22\\n HEI, NYSE, 10.00, 2013-05-23\\n HOV, NYSE, 120.00, 2013-06-06\\n HRB, NYSE, -1.93, 2013-06-13\\n HRL, NYSE, 0.00, 2013-05-24\\n HPQ, NYSE, 7.41, 2013-05-23\\n JCP, NYSE, -12.93, 2013-05-17\\n KR, NYSE, 4.55, 2013-06-21\\n KSS, NYSE, 15.79, 2013-05-17\\n LB, NYSE, 4.35, 2013-05-23\\n LOW, NYSE, -3.92, 2013-05-23\\n LZB, NYSE, 7.14, 2013-06-19\\n MDT, NYSE, 6.80, 2013-05-22\\n MEI, NYSE, 60.00, 2013-06-21\\n MPR, NYSE, -33.33, 2013-06-07\\n NAV, NYSE, -302.75, 2013-06-11\\n JWN, NYSE, -3.95, 2013-05-17\\n OXM, NYSE, 5.13, 2013-06-12\\n PBY, NYSE, -85.71, 2013-06-11\\n PLL, NYSE, 1.37, 2013-05-31\\n PNY, NYSE, 0.00, 2013-06-08\\n PVH, NYSE, 39.42, 2013-06-13\\n THO, NYSE, -7.87, 2013-06-07\\n TIF, NYSE, 32.08, 2013-05-29\\n TJX, NYSE, 0.00, 2013-05-22\\n TOL, NYSE, 0.00, 2013-05-23\\n TTC, NYSE, 10.92, 2013-05-24\\n VAL, NYSE, 2.25, 2013-05-15\\n JW.A, NYSE, -16.47, 2013-06-19\\n TGT, NYSE, 23.53, 2013-05-23\\n WMT, NYSE, -0.87, 2013-05-17\\n WSM, NYSE, 11.11, 2013-05-24\\n FL, NYSE, 3.41, 2013-05-25\\n CHS, NYSE, -11.11, 2013-05-30\\n BKS, NYSE, 52.22, 2013-06-26\\n CAL, NYSE, 45.45, 2013-05-30\\n SIG, NYSE, 0.89, 2013-05-24\\n ZLC, NYSE, 1200.00, 2013-05-23\\n AEO, NYSE, 5.88, 2013-05-23\\n FGP, NYSE, 15.69, 2013-06-07\\n BMO, NYSE, -4.73, 2013-05-30\\n RY, NYSE, -2.34, 2013-05-31\\n GEF, NYSE, 1.45, 2013-06-06\\n SKS, NYSE, 0.00, 2013-05-22\\n TD, NYSE, 1.09, 2013-05-24\\n ANF, NYSE, -80.00, 2013-05-25\\n CIEN, NYSE, 20.00, 2013-06-07\\n KMG, NYSE, 8.70, 2013-06-11\\n IRET, NYSE, 11.76, 2013-07-02\\n CM, NYSE, 0.00, 2013-05-31\\n UBA, NYSE, 12.00, 2013-06-08\\n KFY, NYSE, 3.23, 2013-06-18\\n KKD, NYSE, 25.00, 2013-05-31\\n MVC, NYSE, -37.50, 2013-06-11\\n CBK, NYSE, 150.00, 2013-06-08\\n SJM, NYSE, 12.17, 2013-06-07\\n BIG, NYSE, 0.00, 2013-05-31\\n JOY, NYSE, 11.61, 2013-05-31\\n SSI, NYSE, -122.22, 2013-05-18\\n GME, NYSE, 15.00, 2013-05-24\\n DKS, NYSE, 0.00, 2013-05-22\\n A, NYSE, 14.93, 2013-05-15\\n MTN, NYSE, -3.62, 2013-06-07\\n GES, NYSE, 75.00, 2013-05-31\\n CRM, NYSE, -600.00, 2013-05-24\\n NWY, NYSE, 128.57, 2013-05-24\\n PAY, NYSE, -7.69, 2013-06-06\\n DSW, NYSE, 11.11, 2013-05-30\\n NX, NYSE, -300.00, 2013-06-08\\n DG, NYSE, -1.39, 2013-06-05\\n EXPR, NYSE, 5.56, 2013-05-31\\n P, NYSE, 0.00, 2013-05-23\\n GWRE, NYSE, 44.44, 2013-05-29\\n BLOX, NYSE, 100.00, 2013-05-24\\n TLYS, NYSE, 14.29, 2013-05-30\\n PANW, NYSE, -900.00, 2013-05-31\\n WDAY, NYSE, 13.04, 2013-05-23\\n RH, NYSE, 50.00, 2013-06-14\\n RALY, NYSE, 14.78, 2013-06-07\\n AIR, NYSE, 13.64, 2013-07-26\\n ATU, NYSE, -1.59, 2013-06-20\\n AZO, NYSE, 0.69, 2013-05-22\\n AZZ, NYSE, -8.20, 2013-06-29\\n CAG, NYSE, 1.69, 2013-06-28\\n CLC, NYSE, -1.49, 2013-06-20\\n CMC, NYSE, -15.79, 2013-06-28\\n FC, NYSE, 18.18, 2013-07-10\\n FDO, NYSE, 1.94, 2013-07-11\\n FDX, NYSE, 8.67, 2013-06-20\\n FUL, NYSE, -5.63, 2013-06-27\\n GIS, NYSE, -1.85, 2013-06-27\\n KBH, NYSE, 20.00, 2013-06-28\\n LEN, NYSE, 30.30, 2013-06-26\\n LNN, NYSE, 12.92, 2013-06-27\\n MKC, NYSE, 0.00, 2013-06-28\\n RT, NYSE, -36.84, 2013-07-25\\n MCS, NYSE, -6.25, 2013-07-26\\n MSM, NYSE, 9.37, 2013-07-11\\n NKE, NYSE, 2.70, 2013-06-28\\n ORCL, NYSE, 0.00, 2013-06-21\\n PIR, NYSE, 0.00, 2013-06-21\\n PKE, NYSE, -13.79, 2013-06-27\\n RAD, NYSE, 0.00, 2013-06-21\\n RPM, NYSE, 7.46, 2013-07-23\\n SVU, NYSE, 250.00, 2013-07-19\\n TISI, NYSE, 0.00, 2013-08-07\\n TXI, NYSE, 116.00, 2013-07-11\\n UNF, NYSE, 2.88, 2013-06-27\\n WGO, NYSE, 0.00, 2013-06-28\\n WOR, NYSE, -7.46, 2013-06-28\\n JBL, NYSE, 4.35, 2013-06-20\\n GBX, NYSE, -5.66, 2013-07-03\\n DRI, NYSE, -1.94, 2013-06-22\\n FDS, NYSE, -1.71, 2013-06-19\\n KMX, NYSE, 12.28, 2013-06-22\\n SCS, NYSE, 0.00, 2013-06-20\\n SJR, NYSE, 16.28, 2013-06-29\\n RHT, NYSE, 9.09, 2013-06-20\\n OMN, NYSE, 14.29, 2013-06-28\\n MON, NYSE, 3.75, 2013-06-27\\n GPN, NYSE, -3.92, 2013-07-26\\n AYI, NYSE, 7.78, 2013-07-03\\n CCL, NYSE, 50.00, 2013-06-26\\n CUK, NYSE, 50.00, 2013-06-26\\n STZ, NYSE, -7.32, 2013-07-03\\n ACN, NYSE, 0.00, 2013-06-28\\n SNX, NYSE, 0.00, 2013-06-26\\n TAL, NYSE, 66.67, 2013-07-23\\n IHS, NYSE, 1.45, 2013-06-21\\n EDU, NYSE, 20.00, 2013-07-24\\n ZEP, NYSE, -31.71, 2013-07-03\\n MG, NYSE, -5.88, 2013-08-08\\n MOS, NYSE, -0.88, 2013-07-16\\n ABT, NYSE, 4.55, 2013-07-18\\n ABX, NYSE, 17.86, 2013-08-02\\n AB, NYSE, 7.89, 2013-08-01\\n TAP, NYSE, 8.63, 2013-08-07\\n ACO, NYSE, 1.79, 2013-07-27\\n ADM, NYSE, 9.52, 2013-08-07\\n AEM, NYSE, -85.71, 2013-07-25\\n AEP, NYSE, -5.19, 2013-07-26\\n AES, NYSE, 23.08, 2013-08-09\\n AET, NYSE, 9.35, 2013-07-31\\n AFL, NYSE, 6.58, 2013-07-31\\n AGCO, NYSE, 18.78, 2013-08-01\\n AGN, NYSE, 1.01, 2013-07-26\\n HES, NYSE, 7.09, 2013-08-01\\n AIG, NYSE, 31.76, 2013-08-02\\n AIN, NYSE, -23.08, 2013-08-01\\n AJG, NYSE, 5.80, 2013-07-31\\n ALU, NYSE, 33.33, 2013-07-31\\n MATX, NYSE, 6.82, 2013-08-08\\n ALK, NYSE, -0.68, 2013-07-26\\n BEAM, NYSE, 6.67, 2013-08-09\\n AME, NYSE, 0.00, 2013-08-08\\n TWX, NYSE, 10.67, 2013-08-08\\n AVD, NYSE, -17.14, 2013-08-06\\n AMN, NYSE, 20.00, 2013-08-02\\n AN, NYSE, -1.35, 2013-07-19\\n AON, NYSE, 0.91, 2013-07-27\\n APA, NYSE, -0.50, 2013-08-02\\n APC, NYSE, 16.67, 2013-07-30\\n APD, NYSE, 0.00, 2013-07-24\\n APH, NYSE, 1.06, 2013-07-19\\n ARG, NYSE, -0.87, 2013-07-26\\n AAN, NYSE, 0.00, 2013-07-25\\n ARW, NYSE, 8.74, 2013-07-25\\n ASGN, NYSE, 14.29, 2013-07-25\\n ASH, NYSE, -8.29, 2013-07-26\\n ASR, NYSE, 21.90, 2013-07-23\\n GAS, NYSE, 51.85, 2013-08-01\\n ATO, NYSE, 13.51, 2013-08-07\\n ATW, NYSE, 0.74, 2013-08-01\\n AVP, NYSE, 11.54, 2013-08-02\\n AVT, NYSE, 3.16, 2013-08-08\\n AVY, NYSE, 2.90, 2013-07-24\\n AXP, NYSE, 4.96, 2013-07-18\\n B, NYSE, 0.00, 2013-07-27\\n BA, NYSE, 5.70, 2013-07-25\\n BAC, NYSE, 28.00, 2013-07-18\\n BAX, NYSE, 2.65, 2013-07-19\\n BC, NYSE, 13.89, 2013-07-26\\n OMX, NYSE, -33.33, 2013-08-07\\n BCE, NYSE, -2.67, 2013-08-09\\n BCR, NYSE, 2.90, 2013-07-24\\n BDX, NYSE, 7.48, 2013-08-02\\n BEN, NYSE, 1.18, 2013-07-30\\n BGG, NYSE, 15.79, 2013-08-16\\n BHE, NYSE, 10.71, 2013-07-26\\n BHI, NYSE, -6.15, 2013-07-20\\n BID, NYSE, -9.56, 2013-08-07\\n BIO, NYSE, 7.14, 2013-08-07\\n BK, NYSE, 6.90, 2013-07-18\\n BKH, NYSE, -2.38, 2013-08-06\\n WRB, NYSE, -2.99, 2013-07-23\\n BLC, NYSE, 9.09, 2013-07-31\\n BLL, NYSE, 1.19, 2013-07-26\\n BLX, NYSE, 5.56, 2013-07-19\\n BMI, NYSE, -20.00, 2013-07-19\\n BMS, NYSE, 1.67, 2013-07-26\\n BMY, NYSE, 0.00, 2013-07-26\\n BOH, NYSE, 2.41, 2013-07-23\\n BXS, NYSE, 10.00, 2013-07-23\\n BPL, NYSE, -8.86, 2013-08-03\\nBRK.A, NYSE, 176.30, 2013-08-03\\n BRO, NYSE, 2.86, 2013-07-16\\n BSX, NYSE, 12.50, 2013-07-26\\n BT, NYSE, 6.17, 2013-07-26\\n MTRN, NYSE, 7.50, 2013-07-27\\n CAI, NYSE, -8.54, 2013-07-31\\n CAT, NYSE, -15.20, 2013-07-25\\n CB, NYSE, 19.27, 2013-07-24\\n CBI, NYSE, 0.00, 2013-07-31\\n CBM, NYSE, -64.29, 2013-08-02\\n CBU, NYSE, 4.00, 2013-07-24\\n CBT, NYSE, -4.35, 2013-08-01\\n CCC, NYSE, 14.29, 2013-08-07\\n CCE, NYSE, 2.67, 2013-07-26\\n C, NYSE, 5.93, 2013-07-16\\n CCK, NYSE, 3.23, 2013-07-18\\n CCU, NYSE, 25.00, 2013-08-08\\n CDE, NYSE, -1100.00, 2013-08-09\\n CDI, NYSE, 6.25, 2013-08-02\\n CAH, NYSE, 2.60, 2013-08-02\\n CFR, NYSE, 0.00, 2013-07-25\\n CHD, NYSE, 1.67, 2013-08-03\\n CKP, NYSE, -15.38, 2013-08-07\\n CPK, NYSE, -7.02, 2013-08-10\\n CI, NYSE, 11.95, 2013-08-02\\n CKH, NYSE, 51.67, 2013-07-31\\n CL, NYSE, 0.00, 2013-07-26\\n CLF, NYSE, 85.25, 2013-07-26\\n CLH, NYSE, -25.00, 2013-08-08\\n CLX, NYSE, 2.99, 2013-08-02\\n CMA, NYSE, 8.57, 2013-07-17\\n CMO, NYSE, -15.63, 2013-07-25\\n CRK, NYSE, -6.67, 2013-07-30\\n CMS, NYSE, -14.71, 2013-07-26\\n CNA, NYSE, 17.19, 2013-07-31\\n CNW, NYSE, 13.56, 2013-08-01\\n CNL, NYSE, -6.06, 2013-08-01\\n COG, NYSE, 35.48, 2013-07-25\\n COT, NYSE, -4.76, 2013-08-02\\n CP, NYSE, -4.14, 2013-07-25\\n CPF, NYSE, 25.93, 2013-07-26\\n CQB, NYSE, 43.48, 2013-08-09\\n CR, NYSE, 0.00, 2013-07-23\\nCRD.B, NYSE, 42.86, 2013-08-06\\n CRS, NYSE, 11.59, 2013-07-31\\n CSC, NYSE, 42.19, 2013-08-07\\n CSL, NYSE, -14.93, 2013-07-24\\n CTB, NYSE, -38.20, 2013-08-09\\n CTL, NYSE, 2.99, 2013-08-08\\n CTS, NYSE, 33.33, 2013-07-23\\n CUB, NYSE, 9.52, 2013-08-02\\n CMI, NYSE, 11.11, 2013-07-31\\n CUZ, NYSE, 9.09, 2013-07-30\\n CVC, NYSE, 80.00, 2013-08-03\\n CW, NYSE, 6.06, 2013-08-01\\n CWT, NYSE, 0.00, 2013-08-01\\n CX, NYSE, 0.00, 2013-07-26\\n CYN, NYSE, 8.33, 2013-07-19\\n D, NYSE, -4.62, 2013-08-07\\n DBD, NYSE, 0.00, 2013-08-15\\n DCO, NYSE, 30.77, 2013-08-06\\n DD, NYSE, 0.79, 2013-07-24\\n CVA, NYSE, 150.00, 2013-07-18\\n DHR, NYSE, 2.35, 2013-07-19\\n DIS, NYSE, 0.00, 2013-08-07\\n DLX, NYSE, 10.34, 2013-07-26\\n DNB, NYSE, 2.00, 2013-08-08\\n RRD, NYSE, 4.65, 2013-07-30\\n DOV, NYSE, 5.43, 2013-07-19\\n DOW, NYSE, 1.59, 2013-07-26\\n DRE, NYSE, 0.00, 2013-08-01\\n DHI, NYSE, 23.53, 2013-07-26\\n UFS, NYSE, -25.00, 2013-07-26\\n DTE, NYSE, -21.52, 2013-07-27\\n DUK, NYSE, -6.45, 2013-08-08\\n DVN, NYSE, 28.72, 2013-08-08\\n DV, NYSE, 31.71, 2013-08-09\\n EAT, NYSE, 4.05, 2013-08-03\\n ECL, NYSE, 2.38, 2013-07-31\\n ED, NYSE, -5.26, 2013-08-02\\n EDE, NYSE, 8.00, 2013-07-26\\n EFX, NYSE, 2.22, 2013-07-25\\n EGN, NYSE, 8.20, 2013-08-01\\n EGP, NYSE, 2.56, 2013-07-19\\n ELP, NYSE, 17.65, 2013-08-16\\n ELY, NYSE, 20.00, 2013-07-26\\n EMC, NYSE, 2.94, 2013-07-25\\n EMR, NYSE, -2.02, 2013-08-07\\n EOG, NYSE, 19.32, 2013-08-07\\n EQT, NYSE, 3.64, 2013-07-26\\n ESE, NYSE, -41.07, 2013-08-09\\n ESV, NYSE, 3.33, 2013-07-30\\n ETN, NYSE, -1.80, 2013-08-03\\n ETR, NYSE, 3.06, 2013-07-31\\n EXAR, NYSE, 14.29, 2013-07-25\\n F, NYSE, 21.62, 2013-07-25\\n CLGX, NYSE, 13.64, 2013-07-25\\n FNB, NYSE, 0.00, 2013-07-24\\n FCF, NYSE, -50.00, 2013-07-25\\n FBP, NYSE, -11.11, 2013-07-25\\n FICO, NYSE, 6.35, 2013-07-31\\n FLO, NYSE, 4.35, 2013-08-14\\n FMC, NYSE, 0.00, 2013-07-30\\n FOE, NYSE, 27.27, 2013-08-01\\n S, NYSE, 6.06, 2013-07-31\\n NEE, NYSE, 13.18, 2013-07-31\\n FRT, NYSE, 0.88, 2013-08-01\\n FRX, NYSE, 300.00, 2013-07-24\\n FSS, NYSE, 64.29, 2013-08-10\\n FUN, NYSE, 2.41, 2013-08-09\\n FUR, NYSE, -48.15, 2013-08-02\\n GBL, NYSE, 17.20, 2013-08-07\\n GVA, NYSE, -78.13, 2013-08-02\\n BGC, NYSE, 23.21, 2013-08-01\\n GD, NYSE, 11.73, 2013-07-25\\n GE, NYSE, 0.00, 2013-07-20\\n RHP, NYSE, -26.85, 2013-08-07\\n AXLL, NYSE, 2.59, 2013-08-01\\n GGG, NYSE, 9.52, 2013-07-25\\n GHM, NYSE, 52.00, 2013-07-26\\n GIB, NYSE, 10.71, 2013-08-01\\n GLT, NYSE, 20.00, 2013-07-31\\n GLW, NYSE, 3.23, 2013-07-31\\n GSK, NYSE, -5.88, 2013-07-25\\n GLF, NYSE, 25.71, 2013-07-23\\n GPC, NYSE, 14.88, 2013-07-19\\n GRA, NYSE, 2.75, 2013-07-26\\n GTY, NYSE, 36.00, 2013-08-08\\n GWW, NYSE, 2.71, 2013-07-18\\n HAE, NYSE, 0.00, 2013-07-30\\n HAL, NYSE, 1.39, 2013-07-23\\n HAR, NYSE, 4.60, 2013-08-07\\n HVT, NYSE, 31.25, 2013-08-01\\n HRC, NYSE, 0.00, 2013-07-25\\n HCC, NYSE, 21.69, 2013-07-31\\n HCN, NYSE, 1.09, 2013-08-07\\n HCP, NYSE, -2.70, 2013-07-31\\n HOG, NYSE, 3.42, 2013-07-26\\n HE, NYSE, 7.89, 2013-08-09\\n HMA, NYSE, -46.15, 2013-08-10\\n HMN, NYSE, 30.00, 2013-07-25\\n HFC, NYSE, 0.00, 2013-08-08\\n HOT, NYSE, 8.22, 2013-07-26\\n HP, NYSE, 6.67, 2013-07-27\\n HLS, NYSE, 18.60, 2013-07-26\\n HRS, NYSE, 23.68, 2013-07-31\\n HSC, NYSE, -11.76, 2013-08-09\\n HSY, NYSE, 1.41, 2013-07-26\\n HUBB, NYSE, 5.38, 2013-07-19\\n HUM, NYSE, 6.91, 2013-08-01\\n HXL, NYSE, 2.13, 2013-07-23\\n IBM, NYSE, 3.44, 2013-07-18\\n IDA, NYSE, 33.82, 2013-08-02\\n IEX, NYSE, 2.70, 2013-07-23\\n IFF, NYSE, -3.39, 2013-08-07\\n DIN, NYSE, 12.09, 2013-07-31\\n INT, NYSE, 11.76, 2013-08-01\\n IP, NYSE, -5.45, 2013-07-26\\n IPG, NYSE, -14.29, 2013-07-20\\n IO, NYSE, -100.00, 2013-08-08\\n IR, NYSE, 5.56, 2013-07-20\\n IRF, NYSE, 81.82, 2013-08-20\\n ITW, NYSE, -0.92, 2013-07-24\\n JEC, NYSE, -1.19, 2013-07-30\\n JNJ, NYSE, 5.71, 2013-07-17\\n JNY, NYSE, 116.67, 2013-08-01\\n K, NYSE, 3.09, 2013-08-02\\n KAMN, NYSE, 13.56, 2013-07-30\\n KDN, NYSE, 10.53, 2013-07-26\\n KEX, NYSE, 0.94, 2013-07-25\\n KEY, NYSE, 5.00, 2013-07-19\\n KIM, NYSE, 6.06, 2013-07-30\\n KMB, NYSE, 1.44, 2013-07-23\\n KEM, NYSE, -95.00, 2013-07-26\\n KMT, NYSE, 4.11, 2013-07-26\\n KO, NYSE, 0.00, 2013-07-17\\n KSU, NYSE, 1.05, 2013-07-20\\n LDR, NYSE, -19.64, 2013-08-06\\n LEG, NYSE, 0.00, 2013-07-26\\n LLY, NYSE, 13.73, 2013-07-25\\n LM, NYSE, -1.45, 2013-07-26\\n LNC, NYSE, 10.43, 2013-08-01\\n LPX, NYSE, 32.26, 2013-08-07\\n LXU, NYSE, 29.17, 2013-08-09\\n LTC, NYSE, -3.39, 2013-08-09\\n L, NYSE, -5.48, 2013-07-30\\n LUV, NYSE, -2.56, 2013-07-26\\n LUX, NYSE, -1.67, 2013-07-26\\n MKL, NYSE, 7.46, 2013-08-08\\n MAN, NYSE, 17.98, 2013-07-20\\n MTW, NYSE, 25.00, 2013-07-30\\n SM, NYSE, 0.00, 2013-07-31\\n MAS, NYSE, 21.05, 2013-07-30\\n MTZ, NYSE, 2.33, 2013-08-02\\n MCD, NYSE, -1.43, 2013-07-23\\n MDC, NYSE, 38.18, 2013-07-31\\n MDP, NYSE, 5.63, 2013-07-26\\n MDR, NYSE, -1966.67, 2013-08-06\\n MDU, NYSE, -3.85, 2013-08-01\\n MED, NYSE, 2.00, 2013-08-07\\n CVS, NYSE, 1.04, 2013-08-07\\n MFC, NYSE, -3.12, 2013-08-09\\n MGA, NYSE, 11.25, 2013-08-10\\n MGM, NYSE, 300.00, 2013-08-07\\n MMC, NYSE, 2.94, 2013-08-08\\n MMM, NYSE, 0.59, 2013-07-26\\n MSA, NYSE, 0.00, 2013-07-25\\n MNR, NYSE, -27.78, 2013-08-07\\n MO, NYSE, -1.59, 2013-07-24\\n MOD, NYSE, 145.45, 2013-08-02\\nMOG.A, NYSE, 8.43, 2013-07-27\\n MHK, NYSE, 10.84, 2013-08-02\\n MSI, NYSE, 11.96, 2013-07-25\\n MCY, NYSE, 3.28, 2013-07-30\\n MRK, NYSE, 2.44, 2013-07-31\\n MRO, NYSE, -5.63, 2013-08-07\\n POWR, NYSE, 20.00, 2013-08-08\\n MTG, NYSE, 118.75, 2013-07-24\\n MTB, NYSE, 26.19, 2013-07-18\\n MTX, NYSE, 8.62, 2013-07-26\\n MUR, NYSE, 12.90, 2013-08-01\\n MYE, NYSE, 19.05, 2013-07-19\\n NBL, NYSE, -5.48, 2013-07-26\\n NBR, NYSE, -11.11, 2013-07-24\\n NE, NYSE, 12.50, 2013-07-18\\n NEM, NYSE, -124.39, 2013-07-27\\n NFG, NYSE, 6.15, 2013-08-09\\n NHI, NYSE, -1.14, 2013-08-07\\n NI, NYSE, -4.17, 2013-08-01\\n NJR, NYSE, 15.00, 2013-08-08\\n THC, NYSE, -4.35, 2013-08-07\\n NNN, NYSE, 0.00, 2013-08-02\\n NOC, NYSE, 20.59, 2013-07-25\\n NR, NYSE, -5.26, 2013-07-26\\n NSC, NYSE, -2.67, 2013-07-24\\n NUE, NYSE, -10.00, 2013-07-19\\n NVR, NYSE, -18.34, 2013-07-23\\n NWL, NYSE, 2.04, 2013-07-27\\n NWN, NYSE, -11.11, 2013-08-08\\n NYT, NYSE, 16.67, 2013-08-02\\n OCR, NYSE, 4.65, 2013-07-25\\n OGE, NYSE, -2.13, 2013-08-09\\n OHI, NYSE, 1.64, 2013-08-01\\n OI, NYSE, 2.53, 2013-07-25\\n OII, NYSE, 8.33, 2013-07-25\\n OKE, NYSE, -225.93, 2013-07-31\\n OLN, NYSE, 3.85, 2013-07-26\\n BRS, NYSE, 1.01, 2013-08-06\\n OMC, NYSE, 0.00, 2013-07-19\\n OMI, NYSE, 0.00, 2013-07-30\\n ORB, NYSE, 17.39, 2013-07-19\\n ORI, NYSE, 1750.00, 2013-07-26\\n OSK, NYSE, 53.21, 2013-07-31\\n OXY, NYSE, -1.86, 2013-07-31\\n FCFS, NYSE, 1.79, 2013-07-18\\n PBI, NYSE, 15.56, 2013-07-31\\n PCG, NYSE, 9.72, 2013-08-01\\n PCL, NYSE, 21.74, 2013-07-30\\n PCP, NYSE, -0.69, 2013-07-26\\n TPC, NYSE, -11.11, 2013-08-10\\n PEG, NYSE, 4.35, 2013-07-31\\n PEI, NYSE, 7.69, 2013-07-24\\n PEP, NYSE, 10.08, 2013-07-25\\n PFE, NYSE, 3.70, 2013-07-31\\n PG, NYSE, 2.60, 2013-08-02\\n PGR, NYSE, -2.44, 2013-07-12\\n PH, NYSE, -8.72, 2013-08-07\\n PHM, NYSE, -10.34, 2013-07-26\\n PKD, NYSE, 0.00, 2013-08-07\\n PKY, NYSE, 0.00, 2013-08-06\\n PNC, NYSE, 21.34, 2013-07-18\\n PNM, NYSE, 15.15, 2013-08-03\\n PNR, NYSE, 2.22, 2013-07-24\\n PNW, NYSE, 3.51, 2013-08-03\\n POM, NYSE, -8.33, 2013-08-08\\n POT, NYSE, -10.98, 2013-07-26\\n PPG, NYSE, 4.70, 2013-07-19\\n PPL, NYSE, 0.00, 2013-08-02'\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.compat.StringIO", "numpy.set_printoptions" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SJCosgrove/quantoipian
[ "8beba055aa4211dc2debc5c3083077cbd19d0bbc", "8beba055aa4211dc2debc5c3083077cbd19d0bbc" ]
[ "zipline/data/history_loader.py", "zipline/utils/calendars/exchange_calendar_bmf.py" ]
[ "# Copyright 2016 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import (\n ABCMeta,\n abstractmethod,\n abstractproperty,\n)\n\nfrom numpy import concatenate\nfrom lru import LRU\nfrom pandas import isnull\nfrom pandas.tslib import normalize_date\nfrom toolz import sliding_window\n\nfrom six import with_metaclass\n\nfrom zipline.assets import Equity, Future\nfrom zipline.assets.continuous_futures import ContinuousFuture\nfrom zipline.lib._int64window import AdjustedArrayWindow as Int64Window\nfrom zipline.lib._float64window import AdjustedArrayWindow as Float64Window\nfrom zipline.lib.adjustment import Float64Multiply, Float64Add\nfrom zipline.utils.cache import ExpiringCache\nfrom zipline.utils.math_utils import number_of_decimal_places\nfrom zipline.utils.memoize import lazyval\nfrom zipline.utils.numpy_utils import float64_dtype\nfrom zipline.utils.pandas_utils import find_in_sorted_index\n\n# Default number of decimal places used for rounding asset prices.\nDEFAULT_ASSET_PRICE_DECIMALS = 3\n\n\nclass HistoryCompatibleUSEquityAdjustmentReader(object):\n\n def __init__(self, adjustment_reader):\n self._adjustments_reader = adjustment_reader\n\n def load_adjustments(self, columns, dts, assets):\n \"\"\"\n Returns\n -------\n adjustments : list[dict[int -> Adjustment]]\n A list, where each element corresponds to the `columns`, of\n mappings from index to adjustment objects to apply at that index.\n \"\"\"\n out = [None] * len(columns)\n for i, column in enumerate(columns):\n adjs = {}\n for asset in assets:\n adjs.update(self._get_adjustments_in_range(\n asset, dts, column))\n out[i] = adjs\n return out\n\n def _get_adjustments_in_range(self, asset, dts, field):\n \"\"\"\n Get the Float64Multiply objects to pass to an AdjustedArrayWindow.\n\n For the use of AdjustedArrayWindow in the loader, which looks back\n from current simulation time back to a window of data the dictionary is\n structured with:\n - the key into the dictionary for adjustments is the location of the\n day from which the window is being viewed.\n - the start of all multiply objects is always 0 (in each window all\n adjustments are overlapping)\n - the end of the multiply object is the location before the calendar\n location of the adjustment action, making all days before the event\n adjusted.\n\n Parameters\n ----------\n asset : Asset\n The assets for which to get adjustments.\n dts : iterable of datetime64-like\n The dts for which adjustment data is needed.\n field : str\n OHLCV field for which to get the adjustments.\n\n Returns\n -------\n out : dict[loc -> Float64Multiply]\n The adjustments as a dict of loc -> Float64Multiply\n \"\"\"\n sid = int(asset)\n start = normalize_date(dts[0])\n end = normalize_date(dts[-1])\n adjs = {}\n if field != 'volume':\n mergers = self._adjustments_reader.get_adjustments_for_sid(\n 'mergers', sid)\n for m in mergers:\n dt = m[0]\n if start < dt <= end:\n end_loc = dts.searchsorted(dt)\n adj_loc = end_loc\n mult = Float64Multiply(0,\n end_loc - 1,\n 0,\n 0,\n m[1])\n try:\n adjs[adj_loc].append(mult)\n except KeyError:\n adjs[adj_loc] = [mult]\n divs = self._adjustments_reader.get_adjustments_for_sid(\n 'dividends', sid)\n for d in divs:\n dt = d[0]\n if start < dt <= end:\n end_loc = dts.searchsorted(dt)\n adj_loc = end_loc\n mult = Float64Multiply(0,\n end_loc - 1,\n 0,\n 0,\n d[1])\n try:\n adjs[adj_loc].append(mult)\n except KeyError:\n adjs[adj_loc] = [mult]\n splits = self._adjustments_reader.get_adjustments_for_sid(\n 'splits', sid)\n for s in splits:\n dt = s[0]\n if start < dt <= end:\n if field == 'volume':\n ratio = 1.0 / s[1]\n else:\n ratio = s[1]\n end_loc = dts.searchsorted(dt)\n adj_loc = end_loc\n mult = Float64Multiply(0,\n end_loc - 1,\n 0,\n 0,\n ratio)\n try:\n adjs[adj_loc].append(mult)\n except KeyError:\n adjs[adj_loc] = [mult]\n return adjs\n\n\nclass ContinuousFutureAdjustmentReader(object):\n \"\"\"\n Calculates adjustments for continuous futures, based on the\n close and open of the contracts on the either side of each roll.\n \"\"\"\n\n def __init__(self,\n trading_calendar,\n asset_finder,\n bar_reader,\n roll_finders,\n frequency):\n self._trading_calendar = trading_calendar\n self._asset_finder = asset_finder\n self._bar_reader = bar_reader\n self._roll_finders = roll_finders\n self._frequency = frequency\n\n def load_adjustments(self, columns, dts, assets):\n \"\"\"\n Returns\n -------\n adjustments : list[dict[int -> Adjustment]]\n A list, where each element corresponds to the `columns`, of\n mappings from index to adjustment objects to apply at that index.\n \"\"\"\n out = [None] * len(columns)\n for i, column in enumerate(columns):\n adjs = {}\n for asset in assets:\n adjs.update(self._get_adjustments_in_range(\n asset, dts, column))\n out[i] = adjs\n return out\n\n def _make_adjustment(self,\n adjustment_type,\n front_close,\n back_close,\n end_loc):\n adj_base = back_close - front_close\n if adjustment_type == 'mul':\n adj_value = 1.0 + adj_base / front_close\n adj_class = Float64Multiply\n elif adjustment_type == 'add':\n adj_value = adj_base\n adj_class = Float64Add\n return adj_class(0,\n end_loc,\n 0,\n 0,\n adj_value)\n\n def _get_adjustments_in_range(self, cf, dts, field):\n if field == 'volume' or field == 'sid':\n return {}\n if cf.adjustment is None:\n return {}\n rf = self._roll_finders[cf.roll_style]\n partitions = []\n\n rolls = rf.get_rolls(cf.root_symbol, dts[0], dts[-1],\n cf.offset)\n\n tc = self._trading_calendar\n\n adjs = {}\n\n for front, back in sliding_window(2, rolls):\n front_sid, roll_dt = front\n back_sid = back[0]\n dt = tc.previous_session_label(roll_dt)\n if self._frequency == 'minute':\n dt = tc.open_and_close_for_session(dt)[1]\n roll_dt = tc.open_and_close_for_session(roll_dt)[0]\n partitions.append((front_sid,\n back_sid,\n dt,\n roll_dt))\n for partition in partitions:\n front_sid, back_sid, dt, roll_dt = partition\n last_front_dt = self._bar_reader.get_last_traded_dt(\n self._asset_finder.retrieve_asset(front_sid), dt)\n last_back_dt = self._bar_reader.get_last_traded_dt(\n self._asset_finder.retrieve_asset(back_sid), dt)\n if isnull(last_front_dt) or isnull(last_back_dt):\n continue\n front_close = self._bar_reader.get_value(\n front_sid, last_front_dt, 'close')\n back_close = self._bar_reader.get_value(\n back_sid, last_back_dt, 'close')\n adj_loc = dts.searchsorted(roll_dt)\n end_loc = adj_loc - 1\n adj = self._make_adjustment(cf.adjustment,\n front_close,\n back_close,\n end_loc)\n try:\n adjs[adj_loc].append(adj)\n except KeyError:\n adjs[adj_loc] = [adj]\n return adjs\n\n\nclass SlidingWindow(object):\n \"\"\"\n Wrapper around an AdjustedArrayWindow which supports monotonically\n increasing (by datetime) requests for a sized window of data.\n\n Parameters\n ----------\n window : AdjustedArrayWindow\n Window of pricing data with prefetched values beyond the current\n simulation dt.\n cal_start : int\n Index in the overall calendar at which the window starts.\n \"\"\"\n\n def __init__(self, window, size, cal_start, offset):\n self.window = window\n self.cal_start = cal_start\n self.current = next(window)\n self.offset = offset\n self.most_recent_ix = self.cal_start + size\n\n def get(self, end_ix):\n \"\"\"\n Returns\n -------\n out : A np.ndarray of the equity pricing up to end_ix after adjustments\n and rounding have been applied.\n \"\"\"\n if self.most_recent_ix == end_ix:\n return self.current\n\n target = end_ix - self.cal_start - self.offset + 1\n self.current = self.window.seek(target)\n\n self.most_recent_ix = end_ix\n return self.current\n\n\nclass HistoryLoader(with_metaclass(ABCMeta)):\n \"\"\"\n Loader for sliding history windows, with support for adjustments.\n\n Parameters\n ----------\n trading_calendar: TradingCalendar\n Contains the grouping logic needed to assign minutes to periods.\n reader : DailyBarReader, MinuteBarReader\n Reader for pricing bars.\n adjustment_reader : SQLiteAdjustmentReader\n Reader for adjustment data.\n \"\"\"\n FIELDS = ('open', 'high', 'low', 'close', 'volume', 'sid')\n\n def __init__(self, trading_calendar, reader, equity_adjustment_reader,\n asset_finder,\n roll_finders=None,\n sid_cache_size=1000,\n prefetch_length=0):\n self.trading_calendar = trading_calendar\n self._asset_finder = asset_finder\n self._reader = reader\n self._adjustment_readers = {}\n if equity_adjustment_reader is not None:\n self._adjustment_readers[Equity] = \\\n HistoryCompatibleUSEquityAdjustmentReader(\n equity_adjustment_reader)\n if roll_finders:\n self._adjustment_readers[ContinuousFuture] =\\\n ContinuousFutureAdjustmentReader(trading_calendar,\n asset_finder,\n reader,\n roll_finders,\n self._frequency)\n self._window_blocks = {\n field: ExpiringCache(LRU(sid_cache_size))\n for field in self.FIELDS\n }\n self._prefetch_length = prefetch_length\n\n @abstractproperty\n def _frequency(self):\n pass\n\n @abstractproperty\n def _calendar(self):\n pass\n\n @abstractmethod\n def _array(self, start, end, assets, field):\n pass\n\n def _decimal_places_for_asset(self, asset, reference_date):\n if isinstance(asset, Future) and asset.tick_size:\n return number_of_decimal_places(asset.tick_size)\n elif isinstance(asset, ContinuousFuture):\n # Tick size should be the same for all contracts of a continuous\n # future, so arbitrarily get the contract with next upcoming auto\n # close date.\n oc = self._asset_finder.get_ordered_contracts(asset.root_symbol)\n contract_sid = oc.contract_before_auto_close(reference_date.value)\n if contract_sid is not None:\n contract = self._asset_finder.retrieve_asset(contract_sid)\n if contract.tick_size:\n return number_of_decimal_places(contract.tick_size)\n return DEFAULT_ASSET_PRICE_DECIMALS\n\n def _ensure_sliding_windows(self, assets, dts, field,\n is_perspective_after):\n \"\"\"\n Ensure that there is a Float64Multiply window for each asset that can\n provide data for the given parameters.\n If the corresponding window for the (assets, len(dts), field) does not\n exist, then create a new one.\n If a corresponding window does exist for (assets, len(dts), field), but\n can not provide data for the current dts range, then create a new\n one and replace the expired window.\n\n Parameters\n ----------\n assets : iterable of Assets\n The assets in the window\n dts : iterable of datetime64-like\n The datetimes for which to fetch data.\n Makes an assumption that all dts are present and contiguous,\n in the calendar.\n field : str\n The OHLCV field for which to retrieve data.\n is_perspective_after : bool\n see: `PricingHistoryLoader.history`\n\n Returns\n -------\n out : list of Float64Window with sufficient data so that each asset's\n window can provide `get` for the index corresponding with the last\n value in `dts`\n \"\"\"\n end = dts[-1]\n size = len(dts)\n asset_windows = {}\n needed_assets = []\n cal = self._calendar\n\n assets = self._asset_finder.retrieve_all(assets)\n end_ix = find_in_sorted_index(cal, end)\n\n for asset in assets:\n try:\n window = self._window_blocks[field].get(\n (asset, size, is_perspective_after), end)\n except KeyError:\n needed_assets.append(asset)\n else:\n if end_ix < window.most_recent_ix:\n # Window needs reset. Requested end index occurs before the\n # end index from the previous history call for this window.\n # Grab new window instead of rewinding adjustments.\n needed_assets.append(asset)\n else:\n asset_windows[asset] = window\n\n if needed_assets:\n offset = 0\n start_ix = find_in_sorted_index(cal, dts[0])\n\n prefetch_end_ix = min(end_ix + self._prefetch_length, len(cal) - 1)\n prefetch_end = cal[prefetch_end_ix]\n prefetch_dts = cal[start_ix:prefetch_end_ix + 1]\n if is_perspective_after:\n adj_end_ix = min(prefetch_end_ix + 1, len(cal) - 1)\n adj_dts = cal[start_ix:adj_end_ix + 1]\n else:\n adj_dts = prefetch_dts\n prefetch_len = len(prefetch_dts)\n array = self._array(prefetch_dts, needed_assets, field)\n\n if field == 'sid':\n window_type = Int64Window\n else:\n window_type = Float64Window\n\n view_kwargs = {}\n if field == 'volume':\n array = array.astype(float64_dtype)\n\n for i, asset in enumerate(needed_assets):\n adj_reader = None\n try:\n adj_reader = self._adjustment_readers[type(asset)]\n except KeyError:\n adj_reader = None\n if adj_reader is not None:\n adjs = adj_reader.load_adjustments(\n [field], adj_dts, [asset])[0]\n else:\n adjs = {}\n window = window_type(\n array[:, i].reshape(prefetch_len, 1),\n view_kwargs,\n adjs,\n offset,\n size,\n int(is_perspective_after),\n self._decimal_places_for_asset(asset, dts[-1]),\n )\n sliding_window = SlidingWindow(window, size, start_ix, offset)\n asset_windows[asset] = sliding_window\n self._window_blocks[field].set(\n (asset, size, is_perspective_after),\n sliding_window,\n prefetch_end)\n\n return [asset_windows[asset] for asset in assets]\n\n def history(self, assets, dts, field, is_perspective_after):\n \"\"\"\n A window of pricing data with adjustments applied assuming that the\n end of the window is the day before the current simulation time.\n\n Parameters\n ----------\n assets : iterable of Assets\n The assets in the window.\n dts : iterable of datetime64-like\n The datetimes for which to fetch data.\n Makes an assumption that all dts are present and contiguous,\n in the calendar.\n field : str\n The OHLCV field for which to retrieve data.\n is_perspective_after : bool\n True, if the window is being viewed immediately after the last dt\n in the sliding window.\n False, if the window is viewed on the last dt.\n\n This flag is used for handling the case where the last dt in the\n requested window immediately precedes a corporate action, e.g.:\n\n - is_perspective_after is True\n\n When the viewpoint is after the last dt in the window, as when a\n daily history window is accessed from a simulation that uses a\n minute data frequency, the history call to this loader will not\n include the current simulation dt. At that point in time, the raw\n data for the last day in the window will require adjustment, so the\n most recent adjustment with respect to the simulation time is\n applied to the last dt in the requested window.\n\n An example equity which has a 0.5 split ratio dated for 05-27,\n with the dts for a history call of 5 bars with a '1d' frequency at\n 05-27 9:31. Simulation frequency is 'minute'.\n\n (In this case this function is called with 4 daily dts, and the\n calling function is responsible for stitching back on the\n 'current' dt)\n\n | | | | | last dt | <-- viewer is here |\n | | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 9:31 |\n | raw | 10.10 | 10.20 | 10.30 | 10.40 | |\n | adj | 5.05 | 5.10 | 5.15 | 5.25 | |\n\n The adjustment is applied to the last dt, 05-26, and all previous\n dts.\n\n - is_perspective_after is False, daily\n\n When the viewpoint is the same point in time as the last dt in the\n window, as when a daily history window is accessed from a\n simulation that uses a daily data frequency, the history call will\n include the current dt. At that point in time, the raw data for the\n last day in the window will be post-adjustment, so no adjustment\n is applied to the last dt.\n\n An example equity which has a 0.5 split ratio dated for 05-27,\n with the dts for a history call of 5 bars with a '1d' frequency at\n 05-27 0:00. Simulation frequency is 'daily'.\n\n | | | | | | <-- viewer is here |\n | | | | | | last dt |\n | | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 |\n | raw | 10.10 | 10.20 | 10.30 | 10.40 | 5.25 |\n | adj | 5.05 | 5.10 | 5.15 | 5.20 | 5.25 |\n\n Adjustments are applied 05-23 through 05-26 but not to the last dt,\n 05-27\n\n Returns\n -------\n out : np.ndarray with shape(len(days between start, end), len(assets))\n \"\"\"\n block = self._ensure_sliding_windows(assets,\n dts,\n field,\n is_perspective_after)\n end_ix = self._calendar.searchsorted(dts[-1])\n\n return concatenate(\n [window.get(end_ix) for window in block],\n axis=1,\n )\n\n\nclass DailyHistoryLoader(HistoryLoader):\n\n @property\n def _frequency(self):\n return 'daily'\n\n @property\n def _calendar(self):\n return self._reader.sessions\n\n def _array(self, dts, assets, field):\n return self._reader.load_raw_arrays(\n [field],\n dts[0],\n dts[-1],\n assets,\n )[0]\n\n\nclass MinuteHistoryLoader(HistoryLoader):\n\n @property\n def _frequency(self):\n return 'minute'\n\n @lazyval\n def _calendar(self):\n mm = self.trading_calendar.all_minutes\n start = mm.searchsorted(self._reader.first_trading_day)\n end = mm.searchsorted(self._reader.last_available_dt, side='right')\n return mm[start:end]\n\n def _array(self, dts, assets, field):\n return self._reader.load_raw_arrays(\n [field],\n dts[0],\n dts[-1],\n assets,\n )[0]\n", "#\n# Copyright 2016 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom datetime import time\nfrom pandas.tseries.holiday import (\n Holiday,\n Easter,\n Day,\n GoodFriday,\n)\nfrom pytz import timezone\n\nfrom .trading_calendar import (\n TradingCalendar,\n FRIDAY,\n HolidayCalendar)\n\n# Universal Confraternization (new years day)\nConfUniversal = Holiday(\n 'Dia da Confraternizacao Universal',\n month=1,\n day=1,\n)\n# Sao Paulo city birthday\nAniversarioSaoPaulo = Holiday(\n 'Aniversario de Sao Paulo',\n month=1,\n day=25,\n)\n# Carnival Monday\nCarnavalSegunda = Holiday(\n 'Carnaval Segunda',\n month=1,\n day=1,\n offset=[Easter(), Day(-48)]\n)\n# Carnival Tuesday\nCarnavalTerca = Holiday(\n 'Carnaval Terca',\n month=1,\n day=1,\n offset=[Easter(), Day(-47)]\n)\n# Ash Wednesday (short day)\nQuartaCinzas = Holiday(\n 'Quarta Cinzas',\n month=1,\n day=1,\n offset=[Easter(), Day(-46)]\n)\n# Good Friday\nSextaPaixao = GoodFriday\n# Feast of the Most Holy Body of Christ\nCorpusChristi = Holiday(\n 'Corpus Christi',\n month=1,\n day=1,\n offset=[Easter(), Day(60)]\n)\n# Tiradentes Memorial\nTiradentes = Holiday(\n 'Tiradentes',\n month=4,\n day=21,\n)\n# Labor Day\nDiaTrabalho = Holiday(\n 'Dia Trabalho',\n month=5,\n day=1,\n)\n# Constitutionalist Revolution\nConstitucionalista = Holiday(\n 'Constitucionalista',\n month=7,\n day=9,\n start_date='1997-01-01'\n)\n# Independence Day\nIndependencia = Holiday(\n 'Independencia',\n month=9,\n day=7,\n)\n# Our Lady of Aparecida\nAparecida = Holiday(\n 'Nossa Senhora de Aparecida',\n month=10,\n day=12,\n)\n# All Souls' Day\nFinados = Holiday(\n 'Dia dos Finados',\n month=11,\n day=2,\n)\n# Proclamation of the Republic\nProclamacaoRepublica = Holiday(\n 'Proclamacao da Republica',\n month=11,\n day=15,\n)\n# Day of Black Awareness\nConscienciaNegra = Holiday(\n 'Dia da Consciencia Negra',\n month=11,\n day=20,\n start_date='2004-01-01'\n)\n# Christmas Eve\nVesperaNatal = Holiday(\n 'Vespera Natal',\n month=12,\n day=24,\n)\n# Christmas\nNatal = Holiday(\n 'Natal',\n month=12,\n day=25,\n)\n# New Year's Eve\nAnoNovo = Holiday(\n 'Ano Novo',\n month=12,\n day=31,\n)\n# New Year's Eve falls on Saturday\nAnoNovoSabado = Holiday(\n 'Ano Novo Sabado',\n month=12,\n day=30,\n days_of_week=(FRIDAY,),\n)\n\n\nclass BMFExchangeCalendar(TradingCalendar):\n \"\"\"\n Exchange calendar for BM&F BOVESPA\n\n Open Time: 10:00 AM, Brazil/Sao Paulo\n Close Time: 4:00 PM, Brazil/Sao Paulo\n\n Regularly-Observed Holidays:\n - Universal Confraternization (New year's day, Jan 1)\n - Sao Paulo City Anniversary (Jan 25)\n - Carnaval Monday (48 days before Easter)\n - Carnaval Tuesday (47 days before Easter)\n - Passion of the Christ (Good Friday, 2 days before Easter)\n - Corpus Christi (60 days after Easter)\n - Tiradentes (April 21)\n - Labor day (May 1)\n - Constitutionalist Revolution (July 9 after 1997)\n - Independence Day (September 7)\n - Our Lady of Aparecida Feast (October 12)\n - All Souls' Day (November 2)\n - Proclamation of the Republic (November 15)\n - Day of Black Awareness (November 20 after 2004)\n - Christmas (December 24 and 25)\n - Day before New Year's Eve (December 30 if NYE falls on a Saturday)\n - New Year's Eve (December 31)\n \"\"\"\n\n @property\n def name(self):\n return \"BMF\"\n\n @property\n def tz(self):\n return timezone(\"America/Sao_Paulo\")\n\n @property\n def open_time(self):\n return time(10, 1)\n\n @property\n def close_time(self):\n return time(16)\n\n @property\n def regular_holidays(self):\n return HolidayCalendar([\n ConfUniversal,\n AniversarioSaoPaulo,\n CarnavalSegunda,\n CarnavalTerca,\n SextaPaixao,\n CorpusChristi,\n Tiradentes,\n DiaTrabalho,\n Constitucionalista,\n Independencia,\n Aparecida,\n Finados,\n ProclamacaoRepublica,\n ConscienciaNegra,\n VesperaNatal,\n Natal,\n AnoNovo,\n AnoNovoSabado,\n ])\n\n @property\n def special_opens(self):\n return [\n (time(13, 1), HolidayCalendar([QuartaCinzas]))\n ]\n" ]
[ [ "pandas.tslib.normalize_date", "pandas.isnull" ], [ "pandas.tseries.holiday.Easter", "pandas.tseries.holiday.Holiday", "pandas.tseries.holiday.Day" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "0.19", "0.24", "0.20", "1.0", "0.25" ], "scipy": [], "tensorflow": [] } ]
taroxd/mindspore
[ "9bb620ff2caaac7f1c53c4b104935f22352cb88f", "9bb620ff2caaac7f1c53c4b104935f22352cb88f", "9bb620ff2caaac7f1c53c4b104935f22352cb88f", "9bb620ff2caaac7f1c53c4b104935f22352cb88f", "9bb620ff2caaac7f1c53c4b104935f22352cb88f" ]
[ "model_zoo/official/cv/ssd/src/dataset.py", "model_zoo/official/nlp/lstm/eval.py", "tests/ut/python/pipeline/parse/test_grammar_constraints.py", "model_zoo/official/cv/mobilenetv2/export.py", "mindspore/dataset/engine/graphdata.py" ]
[ "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"SSD dataset\"\"\"\n\nfrom __future__ import division\n\nimport os\nimport json\nimport xml.etree.ElementTree as et\nimport numpy as np\nimport cv2\n\nimport mindspore.dataset as de\nimport mindspore.dataset.vision.c_transforms as C\nfrom mindspore.mindrecord import FileWriter\nfrom .config import config\nfrom .box_utils import jaccard_numpy, ssd_bboxes_encode\n\n\ndef _rand(a=0., b=1.):\n \"\"\"Generate random.\"\"\"\n return np.random.rand() * (b - a) + a\n\n\ndef get_imageId_from_fileName(filename, id_iter):\n \"\"\"Get imageID from fileName if fileName is int, else return id_iter.\"\"\"\n filename = os.path.splitext(filename)[0]\n if filename.isdigit():\n return int(filename)\n return id_iter\n\n\ndef random_sample_crop(image, boxes):\n \"\"\"Random Crop the image and boxes\"\"\"\n height, width, _ = image.shape\n min_iou = np.random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9])\n\n if min_iou is None:\n return image, boxes\n\n # max trails (50)\n for _ in range(50):\n image_t = image\n\n w = _rand(0.3, 1.0) * width\n h = _rand(0.3, 1.0) * height\n\n # aspect ratio constraint b/t .5 & 2\n if h / w < 0.5 or h / w > 2:\n continue\n\n left = _rand() * (width - w)\n top = _rand() * (height - h)\n\n rect = np.array([int(top), int(left), int(top + h), int(left + w)])\n overlap = jaccard_numpy(boxes, rect)\n\n # dropout some boxes\n drop_mask = overlap > 0\n if not drop_mask.any():\n continue\n\n if overlap[drop_mask].min() < min_iou and overlap[drop_mask].max() > (min_iou + 0.2):\n continue\n\n image_t = image_t[rect[0]:rect[2], rect[1]:rect[3], :]\n\n centers = (boxes[:, :2] + boxes[:, 2:4]) / 2.0\n\n m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1])\n m2 = (rect[2] > centers[:, 0]) * (rect[3] > centers[:, 1])\n\n # mask in that both m1 and m2 are true\n mask = m1 * m2 * drop_mask\n\n # have any valid boxes? try again if not\n if not mask.any():\n continue\n\n # take only matching gt boxes\n boxes_t = boxes[mask, :].copy()\n\n boxes_t[:, :2] = np.maximum(boxes_t[:, :2], rect[:2])\n boxes_t[:, :2] -= rect[:2]\n boxes_t[:, 2:4] = np.minimum(boxes_t[:, 2:4], rect[2:4])\n boxes_t[:, 2:4] -= rect[:2]\n\n return image_t, boxes_t\n return image, boxes\n\n\ndef preprocess_fn(img_id, image, box, is_training):\n \"\"\"Preprocess function for dataset.\"\"\"\n cv2.setNumThreads(2)\n\n def _infer_data(image, input_shape):\n img_h, img_w, _ = image.shape\n input_h, input_w = input_shape\n\n image = cv2.resize(image, (input_w, input_h))\n\n # When the channels of image is 1\n if len(image.shape) == 2:\n image = np.expand_dims(image, axis=-1)\n image = np.concatenate([image, image, image], axis=-1)\n\n return img_id, image, np.array((img_h, img_w), np.float32)\n\n def _data_aug(image, box, is_training, image_size=(300, 300)):\n \"\"\"Data augmentation function.\"\"\"\n ih, iw, _ = image.shape\n w, h = image_size\n\n if not is_training:\n return _infer_data(image, image_size)\n\n # Random crop\n box = box.astype(np.float32)\n image, box = random_sample_crop(image, box)\n ih, iw, _ = image.shape\n\n # Resize image\n image = cv2.resize(image, (w, h))\n\n # Flip image or not\n flip = _rand() < .5\n if flip:\n image = cv2.flip(image, 1, dst=None)\n\n # When the channels of image is 1\n if len(image.shape) == 2:\n image = np.expand_dims(image, axis=-1)\n image = np.concatenate([image, image, image], axis=-1)\n\n box[:, [0, 2]] = box[:, [0, 2]] / ih\n box[:, [1, 3]] = box[:, [1, 3]] / iw\n\n if flip:\n box[:, [1, 3]] = 1 - box[:, [3, 1]]\n\n box, label, num_match = ssd_bboxes_encode(box)\n return image, box, label, num_match\n\n return _data_aug(image, box, is_training, image_size=config.img_shape)\n\n\ndef create_voc_label(is_training):\n \"\"\"Get image path and annotation from VOC.\"\"\"\n voc_root = config.voc_root\n cls_map = {name: i for i, name in enumerate(config.classes)}\n sub_dir = 'train' if is_training else 'eval'\n voc_dir = os.path.join(voc_root, sub_dir)\n if not os.path.isdir(voc_dir):\n raise ValueError(f'Cannot find {sub_dir} dataset path.')\n\n image_dir = anno_dir = voc_dir\n if os.path.isdir(os.path.join(voc_dir, 'Images')):\n image_dir = os.path.join(voc_dir, 'Images')\n if os.path.isdir(os.path.join(voc_dir, 'Annotations')):\n anno_dir = os.path.join(voc_dir, 'Annotations')\n\n if not is_training:\n json_file = os.path.join(config.voc_root, config.voc_json)\n file_dir = os.path.split(json_file)[0]\n if not os.path.isdir(file_dir):\n os.makedirs(file_dir)\n json_dict = {\"images\": [], \"type\": \"instances\", \"annotations\": [],\n \"categories\": []}\n bnd_id = 1\n\n image_files_dict = {}\n image_anno_dict = {}\n images = []\n id_iter = 0\n for anno_file in os.listdir(anno_dir):\n print(anno_file)\n if not anno_file.endswith('xml'):\n continue\n tree = et.parse(os.path.join(anno_dir, anno_file))\n root_node = tree.getroot()\n file_name = root_node.find('filename').text\n img_id = get_imageId_from_fileName(file_name, id_iter)\n id_iter += 1\n image_path = os.path.join(image_dir, file_name)\n print(image_path)\n if not os.path.isfile(image_path):\n print(f'Cannot find image {file_name} according to annotations.')\n continue\n\n labels = []\n for obj in root_node.iter('object'):\n cls_name = obj.find('name').text\n if cls_name not in cls_map:\n print(f'Label \"{cls_name}\" not in \"{config.classes}\"')\n continue\n bnd_box = obj.find('bndbox')\n x_min = int(bnd_box.find('xmin').text) - 1\n y_min = int(bnd_box.find('ymin').text) - 1\n x_max = int(bnd_box.find('xmax').text) - 1\n y_max = int(bnd_box.find('ymax').text) - 1\n labels.append([y_min, x_min, y_max, x_max, cls_map[cls_name]])\n\n if not is_training:\n o_width = abs(x_max - x_min)\n o_height = abs(y_max - y_min)\n ann = {'area': o_width * o_height, 'iscrowd': 0, 'image_id': \\\n img_id, 'bbox': [x_min, y_min, o_width, o_height], \\\n 'category_id': cls_map[cls_name], 'id': bnd_id, \\\n 'ignore': 0, \\\n 'segmentation': []}\n json_dict['annotations'].append(ann)\n bnd_id = bnd_id + 1\n\n if labels:\n images.append(img_id)\n image_files_dict[img_id] = image_path\n image_anno_dict[img_id] = np.array(labels)\n\n if not is_training:\n size = root_node.find(\"size\")\n width = int(size.find('width').text)\n height = int(size.find('height').text)\n image = {'file_name': file_name, 'height': height, 'width': width,\n 'id': img_id}\n json_dict['images'].append(image)\n\n if not is_training:\n for cls_name, cid in cls_map.items():\n cat = {'supercategory': 'none', 'id': cid, 'name': cls_name}\n json_dict['categories'].append(cat)\n json_fp = open(json_file, 'w')\n json_str = json.dumps(json_dict)\n json_fp.write(json_str)\n json_fp.close()\n\n return images, image_files_dict, image_anno_dict\n\n\ndef create_coco_label(is_training):\n \"\"\"Get image path and annotation from COCO.\"\"\"\n from pycocotools.coco import COCO\n\n coco_root = config.coco_root\n data_type = config.val_data_type\n if is_training:\n data_type = config.train_data_type\n\n # Classes need to train or test.\n train_cls = config.classes\n train_cls_dict = {}\n for i, cls in enumerate(train_cls):\n train_cls_dict[cls] = i\n\n anno_json = os.path.join(coco_root, config.instances_set.format(data_type))\n\n coco = COCO(anno_json)\n classs_dict = {}\n cat_ids = coco.loadCats(coco.getCatIds())\n for cat in cat_ids:\n classs_dict[cat[\"id\"]] = cat[\"name\"]\n\n image_ids = coco.getImgIds()\n images = []\n image_path_dict = {}\n image_anno_dict = {}\n\n for img_id in image_ids:\n image_info = coco.loadImgs(img_id)\n file_name = image_info[0][\"file_name\"]\n anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anno = coco.loadAnns(anno_ids)\n image_path = os.path.join(coco_root, data_type, file_name)\n annos = []\n iscrowd = False\n for label in anno:\n bbox = label[\"bbox\"]\n class_name = classs_dict[label[\"category_id\"]]\n iscrowd = iscrowd or label[\"iscrowd\"]\n if class_name in train_cls:\n x_min, x_max = bbox[0], bbox[0] + bbox[2]\n y_min, y_max = bbox[1], bbox[1] + bbox[3]\n annos.append(list(map(round, [y_min, x_min, y_max, x_max])) + [train_cls_dict[class_name]])\n\n if not is_training and iscrowd:\n continue\n if len(annos) >= 1:\n images.append(img_id)\n image_path_dict[img_id] = image_path\n image_anno_dict[img_id] = np.array(annos)\n\n return images, image_path_dict, image_anno_dict\n\n\ndef anno_parser(annos_str):\n \"\"\"Parse annotation from string to list.\"\"\"\n annos = []\n for anno_str in annos_str:\n anno = list(map(int, anno_str.strip().split(',')))\n annos.append(anno)\n return annos\n\n\ndef filter_valid_data(image_dir, anno_path):\n \"\"\"Filter valid image file, which both in image_dir and anno_path.\"\"\"\n images = []\n image_path_dict = {}\n image_anno_dict = {}\n if not os.path.isdir(image_dir):\n raise RuntimeError(\"Path given is not valid.\")\n if not os.path.isfile(anno_path):\n raise RuntimeError(\"Annotation file is not valid.\")\n\n with open(anno_path, \"rb\") as f:\n lines = f.readlines()\n for img_id, line in enumerate(lines):\n line_str = line.decode(\"utf-8\").strip()\n line_split = str(line_str).split(' ')\n file_name = line_split[0]\n image_path = os.path.join(image_dir, file_name)\n if os.path.isfile(image_path):\n images.append(img_id)\n image_path_dict[img_id] = image_path\n image_anno_dict[img_id] = anno_parser(line_split[1:])\n\n return images, image_path_dict, image_anno_dict\n\n\ndef voc_data_to_mindrecord(mindrecord_dir, is_training, prefix=\"ssd.mindrecord\", file_num=8):\n \"\"\"Create MindRecord file by image_dir and anno_path.\"\"\"\n mindrecord_path = os.path.join(mindrecord_dir, prefix)\n writer = FileWriter(mindrecord_path, file_num)\n images, image_path_dict, image_anno_dict = create_voc_label(is_training)\n\n ssd_json = {\n \"img_id\": {\"type\": \"int32\", \"shape\": [1]},\n \"image\": {\"type\": \"bytes\"},\n \"annotation\": {\"type\": \"int32\", \"shape\": [-1, 5]},\n }\n writer.add_schema(ssd_json, \"ssd_json\")\n\n for img_id in images:\n image_path = image_path_dict[img_id]\n with open(image_path, 'rb') as f:\n img = f.read()\n annos = np.array(image_anno_dict[img_id], dtype=np.int32)\n img_id = np.array([img_id], dtype=np.int32)\n row = {\"img_id\": img_id, \"image\": img, \"annotation\": annos}\n writer.write_raw_data([row])\n writer.commit()\n\n\ndef data_to_mindrecord_byte_image(dataset=\"coco\", is_training=True, prefix=\"ssd.mindrecord\", file_num=8):\n \"\"\"Create MindRecord file.\"\"\"\n mindrecord_dir = config.mindrecord_dir\n mindrecord_path = os.path.join(mindrecord_dir, prefix)\n writer = FileWriter(mindrecord_path, file_num)\n if dataset == \"coco\":\n images, image_path_dict, image_anno_dict = create_coco_label(is_training)\n else:\n images, image_path_dict, image_anno_dict = filter_valid_data(config.image_dir, config.anno_path)\n\n ssd_json = {\n \"img_id\": {\"type\": \"int32\", \"shape\": [1]},\n \"image\": {\"type\": \"bytes\"},\n \"annotation\": {\"type\": \"int32\", \"shape\": [-1, 5]},\n }\n writer.add_schema(ssd_json, \"ssd_json\")\n\n for img_id in images:\n image_path = image_path_dict[img_id]\n with open(image_path, 'rb') as f:\n img = f.read()\n annos = np.array(image_anno_dict[img_id], dtype=np.int32)\n img_id = np.array([img_id], dtype=np.int32)\n row = {\"img_id\": img_id, \"image\": img, \"annotation\": annos}\n writer.write_raw_data([row])\n writer.commit()\n\n\ndef create_ssd_dataset(mindrecord_file, batch_size=32, repeat_num=10, device_num=1, rank=0,\n is_training=True, num_parallel_workers=4, use_multiprocessing=True):\n \"\"\"Create SSD dataset with MindDataset.\"\"\"\n ds = de.MindDataset(mindrecord_file, columns_list=[\"img_id\", \"image\", \"annotation\"], num_shards=device_num,\n shard_id=rank, num_parallel_workers=num_parallel_workers, shuffle=is_training)\n decode = C.Decode()\n ds = ds.map(operations=decode, input_columns=[\"image\"])\n change_swap_op = C.HWC2CHW()\n normalize_op = C.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],\n std=[0.229 * 255, 0.224 * 255, 0.225 * 255])\n color_adjust_op = C.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)\n compose_map_func = (lambda img_id, image, annotation: preprocess_fn(img_id, image, annotation, is_training))\n if is_training:\n output_columns = [\"image\", \"box\", \"label\", \"num_match\"]\n trans = [color_adjust_op, normalize_op, change_swap_op]\n else:\n output_columns = [\"img_id\", \"image\", \"image_shape\"]\n trans = [normalize_op, change_swap_op]\n ds = ds.map(operations=compose_map_func, input_columns=[\"img_id\", \"image\", \"annotation\"],\n output_columns=output_columns, column_order=output_columns,\n python_multiprocessing=use_multiprocessing,\n num_parallel_workers=num_parallel_workers)\n ds = ds.map(operations=trans, input_columns=[\"image\"], python_multiprocessing=use_multiprocessing,\n num_parallel_workers=num_parallel_workers)\n ds = ds.batch(batch_size, drop_remainder=True)\n ds = ds.repeat(repeat_num)\n return ds\n\n\ndef create_mindrecord(dataset=\"coco\", prefix=\"ssd.mindrecord\", is_training=True):\n print(\"Start create dataset!\")\n\n # It will generate mindrecord file in config.mindrecord_dir,\n # and the file name is ssd.mindrecord0, 1, ... file_num.\n\n mindrecord_dir = config.mindrecord_dir\n mindrecord_file = os.path.join(mindrecord_dir, prefix + \"0\")\n if not os.path.exists(mindrecord_file):\n if not os.path.isdir(mindrecord_dir):\n os.makedirs(mindrecord_dir)\n if dataset == \"coco\":\n if os.path.isdir(config.coco_root):\n print(\"Create Mindrecord.\")\n data_to_mindrecord_byte_image(\"coco\", is_training, prefix)\n print(\"Create Mindrecord Done, at {}\".format(mindrecord_dir))\n else:\n print(\"coco_root not exits.\")\n elif dataset == \"voc\":\n if os.path.isdir(config.voc_root):\n print(\"Create Mindrecord.\")\n voc_data_to_mindrecord(mindrecord_dir, is_training, prefix)\n print(\"Create Mindrecord Done, at {}\".format(mindrecord_dir))\n else:\n print(\"voc_root not exits.\")\n else:\n if os.path.isdir(config.image_dir) and os.path.exists(config.anno_path):\n print(\"Create Mindrecord.\")\n data_to_mindrecord_byte_image(\"other\", is_training, prefix)\n print(\"Create Mindrecord Done, at {}\".format(mindrecord_dir))\n else:\n print(\"image_dir or anno_path not exits.\")\n return mindrecord_file\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\n#################train lstm example on aclImdb########################\n\"\"\"\nimport argparse\nimport os\n\nimport numpy as np\n\nfrom src.config import lstm_cfg as cfg, lstm_cfg_ascend\nfrom src.dataset import lstm_create_dataset, convert_to_mindrecord\nfrom src.lr_schedule import get_lr\nfrom src.lstm import SentimentNet\nfrom mindspore import Tensor, nn, Model, context\nfrom mindspore.nn import Accuracy\nfrom mindspore.train.callback import LossMonitor\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='MindSpore LSTM Example')\n parser.add_argument('--preprocess', type=str, default='false', choices=['true', 'false'],\n help='whether to preprocess data.')\n parser.add_argument('--aclimdb_path', type=str, default=\"./aclImdb\",\n help='path where the dataset is stored.')\n parser.add_argument('--glove_path', type=str, default=\"./glove\",\n help='path where the GloVe is stored.')\n parser.add_argument('--preprocess_path', type=str, default=\"./preprocess\",\n help='path where the pre-process data is stored.')\n parser.add_argument('--ckpt_path', type=str, default=None,\n help='the checkpoint file path used to evaluate model.')\n parser.add_argument('--device_target', type=str, default=\"Ascend\", choices=['GPU', 'CPU', 'Ascend'],\n help='the target device to run, support \"GPU\", \"CPU\". Default: \"Ascend\".')\n args = parser.parse_args()\n\n context.set_context(\n mode=context.GRAPH_MODE,\n save_graphs=False,\n device_target=args.device_target)\n\n if args.device_target == 'Ascend':\n cfg = lstm_cfg_ascend\n else:\n cfg = lstm_cfg\n\n if args.preprocess == \"true\":\n print(\"============== Starting Data Pre-processing ==============\")\n convert_to_mindrecord(cfg.embed_size, args.aclimdb_path, args.preprocess_path, args.glove_path)\n\n embedding_table = np.loadtxt(os.path.join(args.preprocess_path, \"weight.txt\")).astype(np.float32)\n # DynamicRNN in this network on Ascend platform only support the condition that the shape of input_size\n # and hiddle_size is multiples of 16, this problem will be solved later.\n if args.device_target == 'Ascend':\n pad_num = int(np.ceil(cfg.embed_size / 16) * 16 - cfg.embed_size)\n if pad_num > 0:\n embedding_table = np.pad(embedding_table, [(0, 0), (0, pad_num)], 'constant')\n cfg.embed_size = int(np.ceil(cfg.embed_size / 16) * 16)\n\n network = SentimentNet(vocab_size=embedding_table.shape[0],\n embed_size=cfg.embed_size,\n num_hiddens=cfg.num_hiddens,\n num_layers=cfg.num_layers,\n bidirectional=cfg.bidirectional,\n num_classes=cfg.num_classes,\n weight=Tensor(embedding_table),\n batch_size=cfg.batch_size)\n\n loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n ds_eval = lstm_create_dataset(args.preprocess_path, cfg.batch_size, training=False)\n if cfg.dynamic_lr:\n lr = Tensor(get_lr(global_step=cfg.global_step,\n lr_init=cfg.lr_init, lr_end=cfg.lr_end, lr_max=cfg.lr_max,\n warmup_epochs=cfg.warmup_epochs,\n total_epochs=cfg.num_epochs,\n steps_per_epoch=ds_eval.get_dataset_size(),\n lr_adjust_epoch=cfg.lr_adjust_epoch))\n else:\n lr = cfg.learning_rate\n\n opt = nn.Momentum(network.trainable_params(), lr, cfg.momentum)\n loss_cb = LossMonitor()\n\n model = Model(network, loss, opt, {'acc': Accuracy()})\n\n print(\"============== Starting Testing ==============\")\n param_dict = load_checkpoint(args.ckpt_path)\n load_param_into_net(network, param_dict)\n if args.device_target == \"CPU\":\n acc = model.eval(ds_eval, dataset_sink_mode=False)\n else:\n acc = model.eval(ds_eval)\n print(\"============== {} ==============\".format(acc))\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\ntest mindspore grammar constraints\n1. funtion must have return statement\n2. raise statement can not be used\n\"\"\"\n# pylint: disable=R1705, R1710, W0223\nimport numpy as np\nimport pytest\n\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore import context\nfrom mindspore import dtype as mstype\n\ncontext.set_context(mode=context.GRAPH_MODE)\n\ndef test_missing_return():\n class NetMissReturn(nn.Cell):\n def __init__(self):\n super(NetMissReturn, self).__init__()\n\n def construct(self, x, y, z):\n if x == 1:\n return 10\n elif x == 20:\n if y == 1:\n return 3\n elif y == 2:\n for i in range(z):\n return i + z\n i = 0\n while i < z:\n return i + z\n def g(u):\n return x + u\n # here method 'construct' misses a return statement\n g(y)\n else:\n return 7\n else:\n return 5\n\n net = NetMissReturn()\n x = Tensor(0, mstype.int32)\n y = Tensor(5, mstype.int32)\n z = Tensor(2, mstype.int32)\n with pytest.raises(TypeError) as er:\n net(x, y, z)\n assert \"Missing return statement in bound method 'construct'\" in str(er.value)\n\n\ndef test_nest_function_missing_return():\n class NetNestFuncMissReturn(nn.Cell):\n def __init__(self):\n super(NetNestFuncMissReturn, self).__init__()\n\n def construct(self, x, y, z):\n if x == 1:\n return 10\n elif x == 20:\n if y == 1:\n return 3\n elif y == 2:\n for i in range(z):\n return i + z\n i = 0\n while i < z:\n return i + z\n def g(u):\n x += u\n # nested function 'g' misses a return a statement\n return g(y)\n else:\n return 7\n else:\n return 5\n\n net = NetNestFuncMissReturn()\n x = Tensor(0, mstype.int32)\n y = Tensor(5, mstype.int32)\n z = Tensor(2, mstype.int32)\n with pytest.raises(TypeError) as er:\n net(x, y, z)\n assert \"Missing return statement in function 'g'\" in str(er.value)\n\n\ndef test_raise_in_method():\n class NetRaiseInMethod(nn.Cell):\n def __init__(self):\n super(NetRaiseInMethod, self).__init__()\n\n def construct(self, x, y, z):\n if x == 1:\n return 10\n elif x == 20:\n # add not support grammar 'raise' here\n raise ValueError('Illegal case')\n else:\n return y + z\n\n net = NetRaiseInMethod()\n x = Tensor(0, mstype.int32)\n y = Tensor(5, mstype.int32)\n z = Tensor(2, mstype.int32)\n with pytest.raises(RuntimeError) as er:\n net(x, y, z)\n assert \"Unsupported syntax 'Raise' at\" in str(er.value)\n\n\ndef test_raise_in_nested_function():\n class NetNestRaise(nn.Cell):\n def __init__(self):\n super(NetNestRaise, self).__init__()\n\n def construct(self, x, y, z):\n if x == 1:\n return 10\n elif x == 20:\n def nest_fn(u):\n if u > 0:\n # add not support grammar 'raise' here\n raise ValueError('Illegal case')\n return u + z + 1\n return nest_fn(y)\n else:\n return y + z\n\n net = NetNestRaise()\n x = Tensor(0, mstype.int32)\n y = Tensor(5, mstype.int32)\n z = Tensor(2, mstype.int32)\n with pytest.raises(RuntimeError) as er:\n net(x, y, z)\n assert \"Unsupported syntax 'Raise' at \" in str(er.value)\n\n\ndef test_nest_branch_with_return():\n class NetBranchWithReturn(nn.Cell):\n def __init__(self):\n super(NetBranchWithReturn, self).__init__()\n\n def construct(self, x, y, z):\n if x == 1:\n return 10\n else:\n return 5\n\n context.set_context(save_graphs=True)\n net = NetBranchWithReturn()\n x = Tensor(0, mstype.int32)\n y = Tensor(5, mstype.int32)\n z = Tensor(2, mstype.int32)\n net(x, y, z)\n\n\ndef test_any_with_no_return():\n class NetAnyNoReturn(nn.Cell):\n def __init__(self):\n super(NetAnyNoReturn, self).__init__()\n\n def construct(self, inp):\n result = inp.any()\n if result:\n return 6\n\n np_input = np.arange(2 * 3 * 4).reshape((2, 3, 4)).astype(np.bool_)\n tensor = Tensor(np_input)\n net = NetAnyNoReturn()\n with pytest.raises(TypeError) as er:\n net(tensor)\n assert \"Missing return statement in bound method 'construct'\" in str(er.value)\n\n\ndef test_missing_construct():\n class NetMissConstruct(nn.Cell):\n def __init__(self):\n super(NetMissConstruct, self).__init__()\n\n def construct1(self, inp):\n return 5\n\n np_input = np.arange(2 * 3 * 4).reshape((2, 3, 4)).astype(np.bool_)\n tensor = Tensor(np_input)\n net = NetMissConstruct()\n assert net(tensor) is None\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nmobilenetv2 export file.\n\"\"\"\nimport argparse\nimport numpy as np\nfrom mindspore import Tensor, export, context\nfrom src.config import set_config\nfrom src.models import define_net, load_ckpt\nfrom src.utils import set_context\n\nparser = argparse.ArgumentParser(description=\"mobilenetv2 export\")\nparser.add_argument(\"--device_id\", type=int, default=0, help=\"Device id\")\nparser.add_argument(\"--batch_size\", type=int, default=1, help=\"batch size\")\nparser.add_argument(\"--ckpt_file\", type=str, required=True, help=\"Checkpoint file path.\")\nparser.add_argument(\"--file_name\", type=str, default=\"mobilenetv2\", help=\"output file name.\")\nparser.add_argument(\"--file_format\", type=str, choices=[\"AIR\", \"ONNX\", \"MINDIR\"], default=\"AIR\", help=\"file format\")\nparser.add_argument('--platform', type=str, default=\"Ascend\", choices=(\"Ascend\", \"GPU\", \"CPU\"),\n help='run platform, only support GPU, CPU and Ascend')\nargs = parser.parse_args()\nargs.is_training = False\nargs.run_distribute = False\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=args.platform, device_id=args.device_id)\n\nif __name__ == '__main__':\n cfg = set_config(args)\n set_context(cfg)\n _, _, net = define_net(cfg, args.is_training)\n\n load_ckpt(net, args.ckpt_file)\n input_shp = [args.batch_size, 3, cfg.image_height, cfg.image_width]\n input_array = Tensor(np.random.uniform(-1.0, 1.0, size=input_shp).astype(np.float32))\n export(net, input_array, file_name=args.file_name, file_format=args.file_format)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\ngraphdata.py supports loading graph dataset for GNN network training,\nand provides operations related to graph data.\n\"\"\"\nimport atexit\nimport time\nimport numpy as np\nfrom mindspore._c_dataengine import GraphDataClient\nfrom mindspore._c_dataengine import GraphDataServer\nfrom mindspore._c_dataengine import Tensor\n\nfrom .validators import check_gnn_graphdata, check_gnn_get_all_nodes, check_gnn_get_all_edges, \\\n check_gnn_get_nodes_from_edges, check_gnn_get_all_neighbors, check_gnn_get_sampled_neighbors, \\\n check_gnn_get_neg_sampled_neighbors, check_gnn_get_node_feature, check_gnn_get_edge_feature, \\\n check_gnn_random_walk\n\n\nclass GraphData:\n \"\"\"\n Reads the graph dataset used for GNN training from the shared file and database.\n\n Args:\n dataset_file (str): One of file names in the dataset.\n num_parallel_workers (int, optional): Number of workers to process the dataset in parallel\n (default=None).\n working_mode (str, optional): Set working mode, now supports 'local'/'client'/'server' (default='local').\n\n - 'local', used in non-distributed training scenarios.\n\n - 'client', used in distributed training scenarios. The client does not load data,\n but obtains data from the server.\n\n - 'server', used in distributed training scenarios. The server loads the data\n and is available to the client.\n\n hostname (str, optional): Hostname of the graph data server. This parameter is only valid when\n working_mode is set to 'client' or 'server' (default='127.0.0.1').\n port (int, optional): Port of the graph data server. The range is 1024-65535. This parameter is\n only valid when working_mode is set to 'client' or 'server' (default=50051).\n num_client (int, optional): Maximum number of clients expected to connect to the server. The server will\n allocate resources according to this parameter. This parameter is only valid when working_mode\n is set to 'server' (default=1).\n auto_shutdown (bool, optional): Valid when working_mode is set to 'server',\n when the number of connected clients reaches num_client and no client is being connected,\n the server automatically exits (default=True).\n\n Examples:\n >>> import mindspore.dataset as ds\n >>>\n >>> data_graph = ds.GraphData('dataset_file', 2)\n >>> nodes = data_graph.get_all_nodes(0)\n >>> features = data_graph.get_node_feature(nodes, [1])\n \"\"\"\n\n @check_gnn_graphdata\n def __init__(self, dataset_file, num_parallel_workers=None, working_mode='local', hostname='127.0.0.1', port=50051,\n num_client=1, auto_shutdown=True):\n self._dataset_file = dataset_file\n self._working_mode = working_mode\n if num_parallel_workers is None:\n num_parallel_workers = 1\n\n def stop():\n self._graph_data.stop()\n\n if working_mode in ['local', 'client']:\n self._graph_data = GraphDataClient(dataset_file, num_parallel_workers, working_mode, hostname, port)\n atexit.register(stop)\n\n if working_mode == 'server':\n self._graph_data = GraphDataServer(\n dataset_file, num_parallel_workers, hostname, port, num_client, auto_shutdown)\n atexit.register(stop)\n try:\n while self._graph_data.is_stoped() is not True:\n time.sleep(1)\n except KeyboardInterrupt:\n raise Exception(\"Graph data server receives KeyboardInterrupt.\")\n\n @check_gnn_get_all_nodes\n def get_all_nodes(self, node_type):\n \"\"\"\n Get all nodes in the graph.\n\n Args:\n node_type (int): Specify the type of node.\n\n Returns:\n numpy.ndarray: Array of nodes.\n\n Examples:\n >>> import mindspore.dataset as ds\n >>>\n >>> data_graph = ds.GraphData('dataset_file', 2)\n >>> nodes = data_graph.get_all_nodes(0)\n\n Raises:\n TypeError: If `node_type` is not integer.\n \"\"\"\n if self._working_mode == 'server':\n raise Exception(\"This method is not supported when working mode is server.\")\n return self._graph_data.get_all_nodes(node_type).as_array()\n\n @check_gnn_get_all_edges\n def get_all_edges(self, edge_type):\n \"\"\"\n Get all edges in the graph.\n\n Args:\n edge_type (int): Specify the type of edge.\n\n Returns:\n numpy.ndarray: array of edges.\n\n Examples:\n >>> import mindspore.dataset as ds\n >>>\n >>> data_graph = ds.GraphData('dataset_file', 2)\n >>> nodes = data_graph.get_all_edges(0)\n\n Raises:\n TypeError: If `edge_type` is not integer.\n \"\"\"\n if self._working_mode == 'server':\n raise Exception(\"This method is not supported when working mode is server.\")\n return self._graph_data.get_all_edges(edge_type).as_array()\n\n @check_gnn_get_nodes_from_edges\n def get_nodes_from_edges(self, edge_list):\n \"\"\"\n Get nodes from the edges.\n\n Args:\n edge_list (Union[list, numpy.ndarray]): The given list of edges.\n\n Returns:\n numpy.ndarray: Array of nodes.\n\n Raises:\n TypeError: If `edge_list` is not list or ndarray.\n \"\"\"\n if self._working_mode == 'server':\n raise Exception(\"This method is not supported when working mode is server.\")\n return self._graph_data.get_nodes_from_edges(edge_list).as_array()\n\n @check_gnn_get_all_neighbors\n def get_all_neighbors(self, node_list, neighbor_type):\n \"\"\"\n Get `neighbor_type` neighbors of the nodes in `node_list`.\n\n Args:\n node_list (Union[list, numpy.ndarray]): The given list of nodes.\n neighbor_type (int): Specify the type of neighbor.\n\n Returns:\n numpy.ndarray: Array of nodes.\n\n Examples:\n >>> import mindspore.dataset as ds\n >>>\n >>> data_graph = ds.GraphData('dataset_file', 2)\n >>> nodes = data_graph.get_all_nodes(0)\n >>> neighbors = data_graph.get_all_neighbors(nodes, 0)\n\n Raises:\n TypeError: If `node_list` is not list or ndarray.\n TypeError: If `neighbor_type` is not integer.\n \"\"\"\n if self._working_mode == 'server':\n raise Exception(\"This method is not supported when working mode is server.\")\n return self._graph_data.get_all_neighbors(node_list, neighbor_type).as_array()\n\n @check_gnn_get_sampled_neighbors\n def get_sampled_neighbors(self, node_list, neighbor_nums, neighbor_types):\n \"\"\"\n Get sampled neighbor information.\n\n The api supports multi-hop neighbor sampling. That is, the previous sampling result is used as the input of\n next-hop sampling. A maximum of 6-hop are allowed.\n\n The sampling result is tiled into a list in the format of [input node, 1-hop sampling result,\n 2-hop samling result ...]\n\n Args:\n node_list (Union[list, numpy.ndarray]): The given list of nodes.\n neighbor_nums (Union[list, numpy.ndarray]): Number of neighbors sampled per hop.\n neighbor_types (Union[list, numpy.ndarray]): Neighbor type sampled per hop.\n\n Returns:\n numpy.ndarray: Array of nodes.\n\n Examples:\n >>> import mindspore.dataset as ds\n >>>\n >>> data_graph = ds.GraphData('dataset_file', 2)\n >>> nodes = data_graph.get_all_nodes(0)\n >>> neighbors = data_graph.get_sampled_neighbors(nodes, [2, 2], [0, 0])\n\n Raises:\n TypeError: If `node_list` is not list or ndarray.\n TypeError: If `neighbor_nums` is not list or ndarray.\n TypeError: If `neighbor_types` is not list or ndarray.\n \"\"\"\n if self._working_mode == 'server':\n raise Exception(\"This method is not supported when working mode is server.\")\n return self._graph_data.get_sampled_neighbors(\n node_list, neighbor_nums, neighbor_types).as_array()\n\n @check_gnn_get_neg_sampled_neighbors\n def get_neg_sampled_neighbors(self, node_list, neg_neighbor_num, neg_neighbor_type):\n \"\"\"\n Get `neg_neighbor_type` negative sampled neighbors of the nodes in `node_list`.\n\n Args:\n node_list (Union[list, numpy.ndarray]): The given list of nodes.\n neg_neighbor_num (int): Number of neighbors sampled.\n neg_neighbor_type (int): Specify the type of negative neighbor.\n\n Returns:\n numpy.ndarray: Array of nodes.\n\n Examples:\n >>> import mindspore.dataset as ds\n >>>\n >>> data_graph = ds.GraphData('dataset_file', 2)\n >>> nodes = data_graph.get_all_nodes(0)\n >>> neg_neighbors = data_graph.get_neg_sampled_neighbors(nodes, 5, 0)\n\n Raises:\n TypeError: If `node_list` is not list or ndarray.\n TypeError: If `neg_neighbor_num` is not integer.\n TypeError: If `neg_neighbor_type` is not integer.\n \"\"\"\n if self._working_mode == 'server':\n raise Exception(\"This method is not supported when working mode is server.\")\n return self._graph_data.get_neg_sampled_neighbors(\n node_list, neg_neighbor_num, neg_neighbor_type).as_array()\n\n @check_gnn_get_node_feature\n def get_node_feature(self, node_list, feature_types):\n \"\"\"\n Get `feature_types` feature of the nodes in `node_list`.\n\n Args:\n node_list (Union[list, numpy.ndarray]): The given list of nodes.\n feature_types (Union[list, numpy.ndarray]): The given list of feature types.\n\n Returns:\n numpy.ndarray: array of features.\n\n Examples:\n >>> import mindspore.dataset as ds\n >>>\n >>> data_graph = ds.GraphData('dataset_file', 2)\n >>> nodes = data_graph.get_all_nodes(0)\n >>> features = data_graph.get_node_feature(nodes, [1])\n\n Raises:\n TypeError: If `node_list` is not list or ndarray.\n TypeError: If `feature_types` is not list or ndarray.\n \"\"\"\n if self._working_mode == 'server':\n raise Exception(\"This method is not supported when working mode is server.\")\n if isinstance(node_list, list):\n node_list = np.array(node_list, dtype=np.int32)\n return [\n t.as_array() for t in self._graph_data.get_node_feature(\n Tensor(node_list),\n feature_types)]\n\n @check_gnn_get_edge_feature\n def get_edge_feature(self, edge_list, feature_types):\n \"\"\"\n Get `feature_types` feature of the edges in `edge_list`.\n\n Args:\n edge_list (Union[list, numpy.ndarray]): The given list of edges.\n feature_types (Union[list, numpy.ndarray]): The given list of feature types.\n\n Returns:\n numpy.ndarray: array of features.\n\n Examples:\n >>> import mindspore.dataset as ds\n >>>\n >>> data_graph = ds.GraphData('dataset_file', 2)\n >>> edges = data_graph.get_all_edges(0)\n >>> features = data_graph.get_edge_feature(edges, [1])\n\n Raises:\n TypeError: If `edge_list` is not list or ndarray.\n TypeError: If `feature_types` is not list or ndarray.\n \"\"\"\n if self._working_mode == 'server':\n raise Exception(\"This method is not supported when working mode is server.\")\n if isinstance(edge_list, list):\n edge_list = np.array(edge_list, dtype=np.int32)\n return [\n t.as_array() for t in self._graph_data.get_edge_feature(\n Tensor(edge_list),\n feature_types)]\n\n def graph_info(self):\n \"\"\"\n Get the meta information of the graph, including the number of nodes, the type of nodes,\n the feature information of nodes, the number of edges, the type of edges, and the feature information of edges.\n\n Returns:\n dict: Meta information of the graph. The key is node_type, edge_type, node_num, edge_num,\n node_feature_type and edge_feature_type.\n \"\"\"\n if self._working_mode == 'server':\n raise Exception(\"This method is not supported when working mode is server.\")\n return self._graph_data.graph_info()\n\n @check_gnn_random_walk\n def random_walk(\n self,\n target_nodes,\n meta_path,\n step_home_param=1.0,\n step_away_param=1.0,\n default_node=-1):\n \"\"\"\n Random walk in nodes.\n\n Args:\n target_nodes (list[int]): Start node list in random walk\n meta_path (list[int]): node type for each walk step\n step_home_param (float, optional): return hyper parameter in node2vec algorithm (Default = 1.0).\n step_away_param (float, optional): inout hyper parameter in node2vec algorithm (Default = 1.0).\n default_node (int, optional): default node if no more neighbors found (Default = -1).\n A default value of -1 indicates that no node is given.\n\n Returns:\n numpy.ndarray: Array of nodes.\n\n Examples:\n >>> import mindspore.dataset as ds\n >>>\n >>> data_graph = ds.GraphData('dataset_file', 2)\n >>> nodes = data_graph.random_walk([1,2], [1,2,1,2,1])\n\n Raises:\n TypeError: If `target_nodes` is not list or ndarray.\n TypeError: If `meta_path` is not list or ndarray.\n \"\"\"\n if self._working_mode == 'server':\n raise Exception(\"This method is not supported when working mode is server.\")\n return self._graph_data.random_walk(target_nodes, meta_path, step_home_param, step_away_param,\n default_node).as_array()\n" ]
[ [ "numpy.expand_dims", "numpy.maximum", "numpy.minimum", "numpy.random.choice", "numpy.concatenate", "numpy.random.rand", "numpy.array" ], [ "numpy.ceil", "numpy.pad" ], [ "numpy.arange" ], [ "numpy.random.uniform" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
doronbehar/lab4
[ "90af5a8fd562ba6a35b6ba90611122573e7de485" ]
[ "x2.ESR/ESRB.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pint\n# Use the same registry\nfrom main import ureg\nureg.setup_matplotlib(True)\nfrom uncertainties import ufloat, umath, unumpy\nimport pandas as pd\nfrom scipy.signal import find_peaks\nfrom scipy.integrate import simpson\nfrom scipy.optimize import curve_fit\nplt.rcParams['text.usetex'] = True\n\namp = 700*ureg.mV\nR=ufloat(0.82, 0.82*0.1)*ureg.ohm\n\ndf = pd.read_csv(\"./ESRB.csv\")\n# The I0_modulation signal is horrible, the system was too noisy, so instead:\n#\n# I0_modulation = (unumpy.uarray(\n # df['V_modulation_raw'].values,\n # df['V_modulation_err'].values\n# )*ureg.mV/R).to('ampere')\n#\n# we regnerate it, assuming it should be linear, just as V_DC is.\nI0_modulation = (unumpy.uarray(np.linspace(\n df['V_modulation_raw'].min(),\n df['V_modulation_raw'].max(),\n len(df)\n), df['V_modulation_err'].mean())*ureg.mV/R).to('ampere')\n\nptp_Y = unumpy.uarray(\n df['ptp_Y_raw'].values*df['phase_sign'].values,\n df['ptp_Y_err'].values\n)*ureg.mV\nptp_X_modulation = ufloat(3.09, 0.01)*ureg.mV\n\nfig, ax = plt.subplots()\nI0_modulation_err = np.array([val.m.s for val in I0_modulation])\nI0_modulation_raw = np.array([val.m.n for val in I0_modulation])\nptp_ratio = ptp_Y/ptp_X_modulation\nabsorption_deriviative = ptp_ratio/max(ptp_ratio)\nabsorption_deriviative_raw = np.array([val.m.n for val in absorption_deriviative])\nabsorption_deriviative_err = np.array([val.m.s for val in absorption_deriviative])\nax.errorbar(\n I0_modulation_raw*ureg.ampere,\n absorption_deriviative_raw, # Dimensionless\n fmt='.',\n yerr=absorption_deriviative_err,\n # TODO: Mention in report that error is too big to be drafted\n #xerr=I_modulation_err,\n # TODO: Is this the correct label?\n label='Absorption Deriviative'\n)\n\ndef lorentzian_dif_fit(I, I0, gamma, amplitude):\n return amplitude*(-2*(gamma**2)*(I - I0))/ \\\n (gamma**2 + (I - I0)**2)**2\ndef lorentzian_fit(I, I0, gamma, amplitude):\n return amplitude*gamma**2/\\\n (gamma**2 + (I - I0)**2)**2\n##### By MATLAB:\n# Goodness of fit:\n# SSE: 0.197\n# R-square: 0.9845\n# Adjusted R-square: 0.9838\n# RMSE: 0.06769\n# I0 gamma amplitude\nmatlab_p0 = [0.5479, 0.03847, 0.05554]\nmatlab_bounds=((0.547, 0.03672, 0.05304),\n (0.5488, 0.04021, 0.05805))\nI_rf = ufloat(matlab_p0[0], abs(matlab_bounds[0][0] - matlab_p0[0]))*ureg.ampere\nI_hwhm = ufloat(matlab_p0[1], abs(matlab_bounds[0][1] - matlab_p0[1]))*ureg.ampere\n\nfrom main import g_times_bohr\n# TODO: Take this value from Itamar & Tomer\nH_RF = ufloat(34.914, 0.009)*ureg.gauss\nk = H_RF/I_rf\n# Converts current I To frequency f using all of the constants\ndef I2f(I):\n return (I*k*g_times_bohr/ureg.planck_constant).to('megahertz')\n\nf0_modulation = I2f(I0_modulation)\nf_rf = I2f(I_rf)\nf_hwhm = I2f(I_hwhm)\nT2 = (1/f_hwhm).to('nanosecond')\n\n##### A failing Python fit attempt - I consider it as a failure because it hits\n##### the bounds :/\n# popt, pcov = curve_fit(\n # lorentzian_dif_fit, absorption_deriviative_raw, I0_modulation_raw,\n # p0=matlab_p0, bounds=matlab_bounds\n# )\n# lorentzian_dif_fit_points = lorentzian_dif_fit(I0_modulation_raw, *popt)\n# ax.plot(\n # I0_modulation_raw*ureg.ampere,\n # lorentzian_dif_fit_points,\n # label=\"Python fit\"\n# )\n\nI0_modulation_seq = np.linspace(\n I0_modulation.min().m.n,\n I0_modulation.max().m.n,\n len(I0_modulation)*100\n)\nax.plot(\n I0_modulation_seq*ureg.ampere,\n lorentzian_dif_fit(I0_modulation_seq, I_rf.m.n, I_hwhm.m.n, matlab_p0[2]),\n label=\"Matlab fit\"\n)\nax.set_yticks([])\naxt = ax.twiny()\naxt.grid(linestyle='--')\naxt.set_yticks([])\nf0_modulation_seq = np.linspace(\n f0_modulation.min().m.n,\n f0_modulation.max().m.n,\n len(f0_modulation)*100\n)\ndef lorentzian_wrapper(f0):\n # From some reason this need to be amplified by a factor of 800 so it will\n # look good.\n return lorentzian_fit(f0, f_rf.m.n, f_hwhm.m.n, matlab_p0[2]*800)\naxt.plot(\n f0_modulation_seq*ureg.megahertz,\n lorentzian_wrapper(f0_modulation_seq),\n label = \"Lorenzian fit\", color='green'\n)\naxt.set_xticks(\n [(f_rf - f_hwhm).m.n, f_rf.m.n, (f_rf + f_hwhm).m.n],\n ['', '$f_{rf}$', '']\n)\naxt.set_xlabel('')\naxt.arrow(\n length_includes_head = True,\n x = (f_rf - f_hwhm).m.n*ureg.megahertz,\n y = lorentzian_wrapper((f_rf - f_hwhm).m.n),\n dx = 2*f_hwhm.m.n*ureg.megahertz,\n dy = 0,\n head_length = f_hwhm.m.n/10,\n head_width = matlab_p0[2],\n label=\"Full Width Half Max\",\n)\naxt.arrow(\n length_includes_head = True,\n x = (f_rf + f_hwhm).m.n*ureg.megahertz,\n y = lorentzian_wrapper((f_rf + f_hwhm).m.n),\n dx = -2*f_hwhm.m.n*ureg.megahertz,\n head_length = f_hwhm.m.n/10,\n head_width = matlab_p0[2],\n dy = 0,\n)\naxt.text(\n 0.5, 0.63,\n # (f_hwhm.m.n/10),\n # lorentzian_wrapper((f0 - f_hwhm).m.n)*2,\n \"FWHM\",\n transform=ax.transAxes,\n # fontsize=00\n)\nax.legend(loc='upper right')\n# axt.legend(loc='upper left')\nplt.show()\nfig.savefig(\"ESRB.pgf\")\nfig.savefig(\"ESRB.png\")\n\n# TODO: Integrate numerically / or fit to a laurenzian's differentiation\n\n# TODO: Scale the x axis to frequency and find the width of the laurenzian in\n# frequency scale\n" ]
[ [ "numpy.array", "pandas.read_csv", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
ayushkarnawat/profit
[ "f3c4d601078b52513af6832c3faf75ddafc59ac5" ]
[ "examples/gb1/train_oracle.py" ]
[ "\"\"\"Train (basic) densely-connected oracle.\"\"\"\n\nimport os\nimport time\nimport multiprocessing as mp\n\nimport pandas as pd\n\nimport torch\nfrom torch import optim\nfrom torch.utils.data import DataLoader, Subset, TensorDataset, WeightedRandomSampler\n\nfrom profit.dataset.splitters import split_method_dict\nfrom profit.models.torch import SequenceOracle\nfrom profit.utils.data_utils.tokenizers import AminoAcidTokenizer\nfrom profit.utils.training_utils.torch import losses as L\nfrom profit.utils.training_utils.torch.callbacks import ModelCheckpoint\nfrom profit.utils.training_utils.torch.callbacks import EarlyStopping\n\nfrom examples.gb1.data import load_dataset\n\n\ntimestep = time.strftime(\"%Y-%b-%d-%H:%M:%S\", time.gmtime())\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ntensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.Tensor\nsplits = [\"train\", \"valid\"]\n\n# Preprocess + load the dataset\ndataset = load_dataset(\"lstm\", \"primary\", labels=\"Fitness\", num_data=-1,\n filetype=\"mdb\", as_numpy=False, vocab=\"aa20\")\n# Stratify train/val/test sets s.t. the target labels are equally represented in\n# each subset. Each subset will have the same ratio of low/mid/high variants in\n# each batch as the full dataset. See: https://discuss.pytorch.org/t/29907/2\n_dataset = dataset[:][\"arr_0\"]\n_labels = dataset[:][\"arr_1\"].view(-1)\n# # Remove samples below a certain threshold\n# high_idx = torch.where(_labels > _labels.mean())\n# dataset = Subset(dataset, sorted(high_idx))\n# _dataset = _dataset[high_idx]\n# _labels = _labels[high_idx]\n\n# Compute sample weights (each sample should get its own weight)\ndef sampler(labels: torch.Tensor,\n nbins: int = 10,\n stratify: bool = False) -> WeightedRandomSampler:\n discretize = pd.qcut if stratify else pd.cut\n bin_labels = torch.LongTensor(discretize(labels.tolist(), nbins,\n labels=False, duplicates=\"drop\"))\n class_sample_count = torch.LongTensor(\n [(bin_labels == t).sum() for t in torch.arange(nbins)])\n weight = 1. / class_sample_count.float()\n sample_weights = torch.zeros_like(labels)\n for t in torch.unique(bin_labels):\n sample_weights[bin_labels == t] = weight[t]\n return WeightedRandomSampler(sample_weights, len(sample_weights))\n\n# Compute sample weights and add to original dataset\nweights = sampler(_labels, nbins=10, stratify=False).weights.type(torch.float)\ndataset = TensorDataset(*dataset[:].values(), weights)\n\n# Create subset indicies\nsubset_idx = split_method_dict[\"stratified\"]().train_valid_test_split(\n dataset=_dataset, labels=_labels.tolist(), frac_train=0.9,\n frac_valid=0.1, frac_test=0.0, return_idxs=True, n_bins=10)\nstratified = {split: Subset(dataset, sorted(idx))\n for split, idx in zip(splits, subset_idx)}\n\n# Create stratified sampler (only needed for training)\ntrain_sampler = sampler(stratified[\"train\"][:][1].view(-1), stratify=True)\n\n# Initialize model\ntokenizer = AminoAcidTokenizer(\"aa20\")\nvocab_size = tokenizer.vocab_size\nseqlen = stratified[\"train\"][0][0].size(0)\nmodel = SequenceOracle(seqlen, vocab_size, hidden_size=50, out_size=2)\n\n# Initialize callbacks\n# NOTE: Must set model (within save_clbk) to ensure weights get saved\nstop_clbk = EarlyStopping(patience=5, verbose=1)\nsave_clbk = ModelCheckpoint(os.path.join(\"bin/3gb1/oracle\", timestep),\n monitor=\"val_loss\",\n verbose=1,\n save_weights_only=True)\nsave_clbk.set_model(model)\n\n# Initialize callbacks\noptimizer = optim.AdamW(model.parameters(), lr=1e-3)\n\nepochs = 50\nfor epoch in range(1, epochs+1):\n for split in splits:\n summed_loss = 0\n data_loader = DataLoader(\n dataset=stratified[split],\n batch_size=32,\n sampler=train_sampler if split == \"train\" else None,\n num_workers=mp.cpu_count(),\n pin_memory=torch.cuda.is_available()\n )\n\n # Enable/disable dropout\n model.train() if split == \"train\" else model.eval()\n\n for it, batch in enumerate(data_loader):\n data = batch[0].long().to(device)\n target = batch[1].to(device)\n sample_weight = batch[2].to(device)\n # One-hot encode (see: https://discuss.pytorch.org/t/507/34)\n batch_size, seqlen = data.size()\n onehot = torch.zeros(batch_size, seqlen, vocab_size)\n onehot.scatter_(2, torch.unsqueeze(data, 2), 1)\n\n # Forward pass\n pred = model(onehot)\n # Loss calculation\n nll_loss = L.gaussian_nll_loss(pred, target, reduction=\"none\")\n # Reweight nll_loss w/ sample weights\n nll_loss = (nll_loss * sample_weight).sum()\n summed_loss += nll_loss.item()\n loss = nll_loss / batch_size\n # Compute gradients and update params/weights\n if split == \"train\":\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Bookkeeping (batch)\n if it % 5 == 0 or it+1 == len(data_loader):\n print(\"{} Batch {:04d}/{:d} ({:.2f}%)\\tLoss: {:.4f}\".format(\n split.upper(), it+1, len(data_loader),\n 100. * ((it+1)/len(data_loader)), loss.item()))\n\n # Bookkeeping (epoch)\n avg_loss = summed_loss / len(data_loader.dataset)\n print(\"{} Epoch {}/{}, Average NLL loss: {:.4f}\".format(\n split.upper(), epoch, epochs, avg_loss))\n\n # Stop training (based off val loss) and save (top k) ckpts\n if split == \"valid\":\n save_clbk.on_epoch_end(epoch, logs={\"val_loss\": avg_loss})\n should_stop = stop_clbk.on_epoch_end(epoch, logs={\"val_loss\": avg_loss})\n if should_stop:\n break\n else:\n continue\n break\n" ]
[ [ "torch.zeros", "torch.zeros_like", "torch.unsqueeze", "torch.unique", "torch.cuda.is_available", "torch.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Air-Factories-2-0/af2-hyperledger
[ "7aeeb831cf03fdf7fe64f9500da17c02688a0886", "7aeeb831cf03fdf7fe64f9500da17c02688a0886", "7aeeb831cf03fdf7fe64f9500da17c02688a0886", "7aeeb831cf03fdf7fe64f9500da17c02688a0886", "7aeeb831cf03fdf7fe64f9500da17c02688a0886", "7aeeb831cf03fdf7fe64f9500da17c02688a0886", "7aeeb831cf03fdf7fe64f9500da17c02688a0886" ]
[ "scripts/printingValidation/venv/lib/python3.9/site-packages/skimage/morphology/tests/test_max_tree.py", "scripts/printingValidation/venv/lib/python3.9/site-packages/skimage/measure/_marching_cubes_lewiner.py", "scripts/printingValidation/venv/lib/python3.9/site-packages/skimage/data/tests/test_data.py", "scripts/printingValidation/venv/lib/python3.9/site-packages/pymatting/util/boxfilter.py", "scripts/printingValidation/venv/lib/python3.9/site-packages/skimage/segmentation/boundaries.py", "scripts/printingValidation/venv/lib/python3.9/site-packages/skimage/feature/tests/test_texture.py", "scripts/printingValidation/venv/lib/python3.9/site-packages/skimage/filters/setup.py" ]
[ "import numpy as np\nfrom skimage.morphology import max_tree, area_closing, area_opening\nfrom skimage.morphology import max_tree_local_maxima, diameter_opening\nfrom skimage.morphology import diameter_closing\nfrom skimage.util import invert\n\nfrom skimage._shared.testing import assert_array_equal, TestCase\n\neps = 1e-12\n\n\ndef _full_type_test(img, param, expected, func, param_scale=False,\n **keywords):\n\n # images as they are\n out = func(img, param, **keywords)\n assert_array_equal(out, expected)\n\n # unsigned int\n for dt in [np.uint32, np.uint64]:\n img_cast = img.astype(dt)\n out = func(img_cast, param, **keywords)\n exp_cast = expected.astype(dt)\n assert_array_equal(out, exp_cast)\n\n # float\n data_float = img.astype(np.float64)\n data_float = data_float / 255.0\n expected_float = expected.astype(np.float64)\n expected_float = expected_float / 255.0\n if param_scale:\n param_cast = param / 255.0\n else:\n param_cast = param\n for dt in [np.float32, np.float64]:\n data_cast = data_float.astype(dt)\n out = func(data_cast, param_cast, **keywords)\n exp_cast = expected_float.astype(dt)\n error_img = 255.0 * exp_cast - 255.0 * out\n error = (error_img >= 1.0).sum()\n assert error < eps\n\n # signed images\n img_signed = img.astype(np.int16)\n img_signed = img_signed - 128\n exp_signed = expected.astype(np.int16)\n exp_signed = exp_signed - 128\n for dt in [np.int8, np.int16, np.int32, np.int64]:\n img_s = img_signed.astype(dt)\n out = func(img_s, param, **keywords)\n exp_s = exp_signed.astype(dt)\n assert_array_equal(out, exp_s)\n\n\nclass TestMaxtree(TestCase):\n\n def test_max_tree(self):\n \"Test for max tree\"\n img_type = np.uint8\n img = np.array([[10, 8, 8, 9],\n [7, 7, 9, 9],\n [8, 7, 10, 10],\n [9, 9, 10, 10]], dtype=img_type)\n\n P_exp = np.array([[1, 4, 1, 1],\n [4, 4, 3, 3],\n [1, 4, 3, 10],\n [3, 3, 10, 10]], dtype=np.int64)\n\n S_exp = np.array([4, 5, 9, 1, 2, 8, 3, 6, 7,\n 12, 13, 0, 10, 11, 14, 15],\n dtype=np.int64)\n\n for img_type in [np.uint8, np.uint16, np.uint32, np.uint64]:\n img = img.astype(img_type)\n P, S = max_tree(img, connectivity=2)\n assert_array_equal(P, P_exp)\n assert_array_equal(S, S_exp)\n\n for img_type in [np.int8, np.int16, np.int32, np.int64]:\n img = img.astype(img_type)\n img_shifted = img - 9\n P, S = max_tree(img_shifted, connectivity=2)\n assert_array_equal(P, P_exp)\n assert_array_equal(S, S_exp)\n\n img_float = img.astype(float)\n img_float = (img_float - 8) / 2.0\n for img_type in [np.float32, np.float64]:\n img_float = img_float.astype(img_type)\n P, S = max_tree(img_float, connectivity=2)\n assert_array_equal(P, P_exp)\n assert_array_equal(S, S_exp)\n\n return\n\n def test_area_closing(self):\n \"Test for Area Closing (2 thresholds, all types)\"\n\n # original image\n img = np.array(\n [[240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [240, 200, 200, 240, 200, 240, 200, 200, 240, 240, 200, 240],\n [240, 200, 40, 240, 240, 240, 240, 240, 240, 240, 40, 240],\n [240, 240, 240, 240, 100, 240, 100, 100, 240, 240, 200, 240],\n [240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [200, 200, 200, 200, 200, 200, 200, 240, 200, 200, 255, 255],\n [200, 255, 200, 200, 200, 255, 200, 240, 255, 255, 255, 40],\n [200, 200, 200, 100, 200, 200, 200, 240, 255, 255, 255, 255],\n [200, 200, 200, 100, 200, 200, 200, 240, 200, 200, 255, 255],\n [200, 200, 200, 200, 200, 40, 200, 240, 240, 100, 255, 255],\n [200, 40, 255, 255, 255, 40, 200, 255, 200, 200, 255, 255],\n [200, 200, 200, 200, 200, 200, 200, 255, 255, 255, 255, 255]],\n dtype=np.uint8)\n\n # expected area closing with area 2\n expected_2 = np.array(\n [[240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [240, 200, 200, 240, 240, 240, 200, 200, 240, 240, 200, 240],\n [240, 200, 200, 240, 240, 240, 240, 240, 240, 240, 200, 240],\n [240, 240, 240, 240, 240, 240, 100, 100, 240, 240, 200, 240],\n [240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [200, 200, 200, 200, 200, 200, 200, 240, 200, 200, 255, 255],\n [200, 255, 200, 200, 200, 255, 200, 240, 255, 255, 255, 255],\n [200, 200, 200, 100, 200, 200, 200, 240, 255, 255, 255, 255],\n [200, 200, 200, 100, 200, 200, 200, 240, 200, 200, 255, 255],\n [200, 200, 200, 200, 200, 40, 200, 240, 240, 200, 255, 255],\n [200, 200, 255, 255, 255, 40, 200, 255, 200, 200, 255, 255],\n [200, 200, 200, 200, 200, 200, 200, 255, 255, 255, 255, 255]],\n dtype=np.uint8)\n\n # expected diameter closing with diameter 4\n expected_4 = np.array(\n [[240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [240, 200, 200, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [240, 200, 200, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],\n [200, 200, 200, 200, 200, 200, 200, 240, 240, 240, 255, 255],\n [200, 255, 200, 200, 200, 255, 200, 240, 255, 255, 255, 255],\n [200, 200, 200, 200, 200, 200, 200, 240, 255, 255, 255, 255],\n [200, 200, 200, 200, 200, 200, 200, 240, 200, 200, 255, 255],\n [200, 200, 200, 200, 200, 200, 200, 240, 240, 200, 255, 255],\n [200, 200, 255, 255, 255, 200, 200, 255, 200, 200, 255, 255],\n [200, 200, 200, 200, 200, 200, 200, 255, 255, 255, 255, 255]],\n dtype=np.uint8)\n\n # _full_type_test makes a test with many image types.\n _full_type_test(img, 2, expected_2, area_closing, connectivity=2)\n _full_type_test(img, 4, expected_4, area_closing, connectivity=2)\n\n P, S = max_tree(invert(img), connectivity=2)\n _full_type_test(img, 4, expected_4, area_closing,\n parent=P, tree_traverser=S)\n\n def test_area_opening(self):\n \"Test for Area Opening (2 thresholds, all types)\"\n\n # original image\n img = np.array([[15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15],\n [15, 55, 55, 15, 55, 15, 55, 55, 15, 15, 55, 15],\n [15, 55, 215, 15, 15, 15, 15, 15, 15, 15, 215, 15],\n [15, 15, 15, 15, 155, 15, 155, 155, 15, 15, 55, 15],\n [15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15],\n [55, 55, 55, 55, 55, 55, 55, 15, 55, 55, 0, 0],\n [55, 0, 55, 55, 55, 0, 55, 15, 0, 0, 0, 215],\n [55, 55, 55, 155, 55, 55, 55, 15, 0, 0, 0, 0],\n [55, 55, 55, 155, 55, 55, 55, 15, 55, 55, 0, 0],\n [55, 55, 55, 55, 55, 215, 55, 15, 15, 155, 0, 0],\n [55, 215, 0, 0, 0, 215, 55, 0, 55, 55, 0, 0],\n [55, 55, 55, 55, 55, 55, 55, 0, 0, 0, 0, 0]],\n dtype=np.uint8)\n\n # expected area closing with area 2\n expected_2 = np.array([[15, 15, 15, 15, 15, 15, 15, 15, 15,\n 15, 15, 15],\n [15, 55, 55, 15, 15, 15, 55, 55, 15,\n 15, 55, 15],\n [15, 55, 55, 15, 15, 15, 15, 15, 15,\n 15, 55, 15],\n [15, 15, 15, 15, 15, 15, 155, 155, 15,\n 15, 55, 15],\n [15, 15, 15, 15, 15, 15, 15, 15, 15,\n 15, 15, 15],\n [55, 55, 55, 55, 55, 55, 55, 15, 55,\n 55, 0, 0],\n [55, 0, 55, 55, 55, 0, 55, 15, 0,\n 0, 0, 0],\n [55, 55, 55, 155, 55, 55, 55, 15, 0,\n 0, 0, 0],\n [55, 55, 55, 155, 55, 55, 55, 15, 55,\n 55, 0, 0],\n [55, 55, 55, 55, 55, 215, 55, 15, 15,\n 55, 0, 0],\n [55, 55, 0, 0, 0, 215, 55, 0, 55,\n 55, 0, 0],\n [55, 55, 55, 55, 55, 55, 55, 0, 0,\n 0, 0, 0]],\n dtype=np.uint8)\n\n # expected diameter closing with diameter 4\n expected_4 = np.array([[15, 15, 15, 15, 15, 15, 15, 15, 15,\n 15, 15, 15],\n [15, 55, 55, 15, 15, 15, 15, 15, 15,\n 15, 15, 15],\n [15, 55, 55, 15, 15, 15, 15, 15, 15,\n 15, 15, 15],\n [15, 15, 15, 15, 15, 15, 15, 15, 15,\n 15, 15, 15],\n [15, 15, 15, 15, 15, 15, 15, 15, 15,\n 15, 15, 15],\n [55, 55, 55, 55, 55, 55, 55, 15, 15,\n 15, 0, 0],\n [55, 0, 55, 55, 55, 0, 55, 15, 0,\n 0, 0, 0],\n [55, 55, 55, 55, 55, 55, 55, 15, 0,\n 0, 0, 0],\n [55, 55, 55, 55, 55, 55, 55, 15, 55,\n 55, 0, 0],\n [55, 55, 55, 55, 55, 55, 55, 15, 15,\n 55, 0, 0],\n [55, 55, 0, 0, 0, 55, 55, 0, 55,\n 55, 0, 0],\n [55, 55, 55, 55, 55, 55, 55, 0, 0,\n 0, 0, 0]],\n dtype=np.uint8)\n\n # _full_type_test makes a test with many image types.\n _full_type_test(img, 2, expected_2, area_opening, connectivity=2)\n _full_type_test(img, 4, expected_4, area_opening, connectivity=2)\n\n P, S = max_tree(img, connectivity=2)\n _full_type_test(img, 4, expected_4, area_opening,\n parent=P, tree_traverser=S)\n\n def test_diameter_closing(self):\n \"Test for Diameter Opening (2 thresholds, all types)\"\n img = np.array([[97, 95, 93, 92, 91, 90, 90, 90, 91, 92, 93, 95],\n [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93],\n [93, 63, 63, 63, 63, 86, 86, 86, 87, 43, 43, 91],\n [92, 89, 88, 86, 85, 85, 84, 85, 85, 43, 43, 89],\n [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],\n [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],\n [90, 88, 86, 84, 83, 83, 82, 83, 83, 84, 86, 88],\n [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],\n [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],\n [92, 89, 23, 23, 85, 85, 84, 85, 85, 3, 3, 89],\n [93, 91, 23, 23, 87, 86, 86, 86, 87, 88, 3, 91],\n [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93]],\n dtype=np.uint8)\n\n ex2 = np.array([[97, 95, 93, 92, 91, 90, 90, 90, 91, 92, 93, 95],\n [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93],\n [93, 63, 63, 63, 63, 86, 86, 86, 87, 43, 43, 91],\n [92, 89, 88, 86, 85, 85, 84, 85, 85, 43, 43, 89],\n [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],\n [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],\n [90, 88, 86, 84, 83, 83, 83, 83, 83, 84, 86, 88],\n [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],\n [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],\n [92, 89, 23, 23, 85, 85, 84, 85, 85, 3, 3, 89],\n [93, 91, 23, 23, 87, 86, 86, 86, 87, 88, 3, 91],\n [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93]],\n dtype=np.uint8)\n\n ex4 = np.array([[97, 95, 93, 92, 91, 90, 90, 90, 91, 92, 93, 95],\n [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93],\n [93, 63, 63, 63, 63, 86, 86, 86, 87, 84, 84, 91],\n [92, 89, 88, 86, 85, 85, 84, 85, 85, 84, 84, 89],\n [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],\n [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],\n [90, 88, 86, 84, 83, 83, 83, 83, 83, 84, 86, 88],\n [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],\n [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],\n [92, 89, 84, 84, 85, 85, 84, 85, 85, 84, 84, 89],\n [93, 91, 84, 84, 87, 86, 86, 86, 87, 88, 84, 91],\n [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93]],\n dtype=np.uint8)\n\n # _full_type_test makes a test with many image types.\n _full_type_test(img, 2, ex2, diameter_closing, connectivity=2)\n _full_type_test(img, 4, ex4, diameter_closing, connectivity=2)\n\n P, S = max_tree(invert(img), connectivity=2)\n _full_type_test(img, 4, ex4, diameter_opening,\n parent=P, tree_traverser=S)\n\n def test_diameter_opening(self):\n \"Test for Diameter Opening (2 thresholds, all types)\"\n img = np.array([[5, 7, 9, 11, 12, 12, 12, 12, 12, 11, 9, 7],\n [7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10],\n [9, 40, 40, 40, 40, 16, 16, 16, 16, 60, 60, 11],\n [11, 13, 15, 16, 17, 18, 18, 18, 17, 60, 60, 13],\n [12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],\n [12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],\n [12, 15, 16, 18, 19, 19, 20, 19, 19, 18, 16, 15],\n [12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],\n [12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],\n [11, 13, 80, 80, 17, 18, 18, 18, 17, 100, 100, 13],\n [9, 11, 80, 80, 16, 16, 16, 16, 16, 15, 100, 11],\n [7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10]])\n\n ex2 = np.array([[5, 7, 9, 11, 12, 12, 12, 12, 12, 11, 9, 7],\n [7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10],\n [9, 40, 40, 40, 40, 16, 16, 16, 16, 60, 60, 11],\n [11, 13, 15, 16, 17, 18, 18, 18, 17, 60, 60, 13],\n [12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],\n [12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],\n [12, 15, 16, 18, 19, 19, 19, 19, 19, 18, 16, 15],\n [12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],\n [12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],\n [11, 13, 80, 80, 17, 18, 18, 18, 17, 100, 100, 13],\n [9, 11, 80, 80, 16, 16, 16, 16, 16, 15, 100, 11],\n [7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10]])\n\n ex4 = np.array([[5, 7, 9, 11, 12, 12, 12, 12, 12, 11, 9, 7],\n [7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10],\n [9, 40, 40, 40, 40, 16, 16, 16, 16, 18, 18, 11],\n [11, 13, 15, 16, 17, 18, 18, 18, 17, 18, 18, 13],\n [12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],\n [12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],\n [12, 15, 16, 18, 19, 19, 19, 19, 19, 18, 16, 15],\n [12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],\n [12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],\n [11, 13, 18, 18, 17, 18, 18, 18, 17, 18, 18, 13],\n [9, 11, 18, 18, 16, 16, 16, 16, 16, 15, 18, 11],\n [7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10]])\n\n # _full_type_test makes a test with many image types.\n _full_type_test(img, 2, ex2, diameter_opening, connectivity=2)\n _full_type_test(img, 4, ex4, diameter_opening, connectivity=2)\n\n P, S = max_tree(img, connectivity=2)\n _full_type_test(img, 4, ex4, diameter_opening,\n parent=P, tree_traverser=S)\n\n def test_local_maxima(self):\n \"local maxima for various data types\"\n data = np.array([[10, 11, 13, 14, 14, 15, 14, 14, 13, 11],\n [11, 13, 15, 16, 16, 16, 16, 16, 15, 13],\n [13, 15, 40, 40, 18, 18, 18, 60, 60, 15],\n [14, 16, 40, 40, 19, 19, 19, 60, 60, 16],\n [14, 16, 18, 19, 19, 19, 19, 19, 18, 16],\n [15, 16, 18, 19, 19, 20, 19, 19, 18, 16],\n [14, 16, 18, 19, 19, 19, 19, 19, 18, 16],\n [14, 16, 80, 80, 19, 19, 19, 100, 100, 16],\n [13, 15, 80, 80, 18, 18, 18, 100, 100, 15],\n [11, 13, 15, 16, 16, 16, 16, 16, 15, 13]],\n dtype=np.uint8)\n expected_result = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 0, 0, 1, 1, 0],\n [0, 0, 1, 1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 0, 0, 1, 1, 0],\n [0, 0, 1, 1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],\n dtype=np.uint64)\n for dtype in [np.uint8, np.uint64, np.int8, np.int64]:\n\n test_data = data.astype(dtype)\n out = max_tree_local_maxima(test_data, connectivity=1)\n out_bin = out > 0\n assert_array_equal(expected_result, out_bin)\n assert out.dtype == expected_result.dtype\n assert np.max(out) == 5\n\n P, S = max_tree(test_data)\n out = max_tree_local_maxima(test_data,\n parent=P,\n tree_traverser=S)\n\n assert_array_equal(expected_result, out_bin)\n\n assert out.dtype == expected_result.dtype\n assert np.max(out) == 5\n\n def test_extrema_float(self):\n \"specific tests for float type\"\n data = np.array([[0.10, 0.11, 0.13, 0.14, 0.14, 0.15, 0.14,\n 0.14, 0.13, 0.11],\n [0.11, 0.13, 0.15, 0.16, 0.16, 0.16, 0.16,\n 0.16, 0.15, 0.13],\n [0.13, 0.15, 0.40, 0.40, 0.18, 0.18, 0.18,\n 0.60, 0.60, 0.15],\n [0.14, 0.16, 0.40, 0.40, 0.19, 0.19, 0.19,\n 0.60, 0.60, 0.16],\n [0.14, 0.16, 0.18, 0.19, 0.19, 0.19, 0.19,\n 0.19, 0.18, 0.16],\n [0.15, 0.182, 0.18, 0.19, 0.204, 0.20, 0.19,\n 0.19, 0.18, 0.16],\n [0.14, 0.16, 0.18, 0.19, 0.19, 0.19, 0.19,\n 0.19, 0.18, 0.16],\n [0.14, 0.16, 0.80, 0.80, 0.19, 0.19, 0.19,\n 4.0, 1.0, 0.16],\n [0.13, 0.15, 0.80, 0.80, 0.18, 0.18, 0.18,\n 1.0, 1.0, 0.15],\n [0.11, 0.13, 0.15, 0.16, 0.16, 0.16, 0.16,\n 0.16, 0.15, 0.13]],\n dtype=np.float32)\n\n expected_result = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 0, 0, 1, 1, 0],\n [0, 0, 1, 1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 0, 0, 1, 0, 0],\n [0, 0, 1, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],\n dtype=np.uint8)\n\n # test for local maxima\n out = max_tree_local_maxima(data, connectivity=1)\n out_bin = out > 0\n assert_array_equal(expected_result, out_bin)\n assert np.max(out) == 6\n\n def test_3d(self):\n \"\"\"tests the detection of maxima in 3D.\"\"\"\n img = np.zeros((8, 8, 8), dtype=np.uint8)\n local_maxima = np.zeros((8, 8, 8), dtype=np.uint64)\n\n # first maximum: only one pixel\n img[1, 1:3, 1:3] = 100\n img[2, 2, 2] = 200\n img[3, 1:3, 1:3] = 100\n local_maxima[2, 2, 2] = 1\n\n # second maximum: three pixels in z-direction\n img[5:8, 1, 1] = 200\n local_maxima[5:8, 1, 1] = 1\n\n # third: two maxima in 0 and 3.\n img[0, 5:8, 5:8] = 200\n img[1, 6, 6] = 100\n img[2, 5:7, 5:7] = 200\n img[0:3, 5:8, 5:8] += 50\n local_maxima[0, 5:8, 5:8] = 1\n local_maxima[2, 5:7, 5:7] = 1\n\n # four : one maximum in the corner of the square\n img[6:8, 6:8, 6:8] = 200\n img[7, 7, 7] = 255\n local_maxima[7, 7, 7] = 1\n\n out = max_tree_local_maxima(img)\n out_bin = out > 0\n assert_array_equal(local_maxima, out_bin)\n assert np.max(out) == 5\n", "import base64\n\nimport numpy as np\n\nfrom . import _marching_cubes_lewiner_luts as mcluts\nfrom . import _marching_cubes_lewiner_cy\nfrom ._marching_cubes_classic import _marching_cubes_classic\n\n\ndef marching_cubes(volume, level=None, *, spacing=(1., 1., 1.),\n gradient_direction='descent', step_size=1,\n allow_degenerate=True, method='lewiner', mask=None):\n \"\"\"Marching cubes algorithm to find surfaces in 3d volumetric data.\n\n In contrast with Lorensen et al. approach [2]_, Lewiner et\n al. algorithm is faster, resolves ambiguities, and guarantees\n topologically correct results. Therefore, this algorithm generally\n a better choice.\n\n Parameters\n ----------\n volume : (M, N, P) array\n Input data volume to find isosurfaces. Will internally be\n converted to float32 if necessary.\n level : float, optional\n Contour value to search for isosurfaces in `volume`. If not\n given or None, the average of the min and max of vol is used.\n spacing : length-3 tuple of floats, optional\n Voxel spacing in spatial dimensions corresponding to numpy array\n indexing dimensions (M, N, P) as in `volume`.\n gradient_direction : string, optional\n Controls if the mesh was generated from an isosurface with gradient\n descent toward objects of interest (the default), or the opposite,\n considering the *left-hand* rule.\n The two options are:\n * descent : Object was greater than exterior\n * ascent : Exterior was greater than object\n step_size : int, optional\n Step size in voxels. Default 1. Larger steps yield faster but\n coarser results. The result will always be topologically correct\n though.\n allow_degenerate : bool, optional\n Whether to allow degenerate (i.e. zero-area) triangles in the\n end-result. Default True. If False, degenerate triangles are\n removed, at the cost of making the algorithm slower.\n method: str, optional\n One of 'lewiner', 'lorensen' or '_lorensen'. Specify which of\n Lewiner et al. or Lorensen et al. method will be used. The\n '_lorensen' flag correspond to an old implementation that will\n be deprecated in version 0.19.\n mask : (M, N, P) array, optional\n Boolean array. The marching cube algorithm will be computed only on\n True elements. This will save computational time when interfaces\n are located within certain region of the volume M, N, P-e.g. the top\n half of the cube-and also allow to compute finite surfaces-i.e. open\n surfaces that do not end at the border of the cube.\n\n Returns\n -------\n verts : (V, 3) array\n Spatial coordinates for V unique mesh vertices. Coordinate order\n matches input `volume` (M, N, P). If ``allow_degenerate`` is set to\n True, then the presence of degenerate triangles in the mesh can make\n this array have duplicate vertices.\n faces : (F, 3) array\n Define triangular faces via referencing vertex indices from ``verts``.\n This algorithm specifically outputs triangles, so each face has\n exactly three indices.\n normals : (V, 3) array\n The normal direction at each vertex, as calculated from the\n data.\n values : (V, ) array\n Gives a measure for the maximum value of the data in the local region\n near each vertex. This can be used by visualization tools to apply\n a colormap to the mesh.\n\n See Also\n --------\n skimage.measure.mesh_surface_area\n skimage.measure.find_contours\n\n Notes\n -----\n The algorithm [1]_ is an improved version of Chernyaev's Marching\n Cubes 33 algorithm. It is an efficient algorithm that relies on\n heavy use of lookup tables to handle the many different cases,\n keeping the algorithm relatively easy. This implementation is\n written in Cython, ported from Lewiner's C++ implementation.\n\n To quantify the area of an isosurface generated by this algorithm, pass\n verts and faces to `skimage.measure.mesh_surface_area`.\n\n Regarding visualization of algorithm output, to contour a volume\n named `myvolume` about the level 0.0, using the ``mayavi`` package::\n\n >>>\n >> from mayavi import mlab\n >> verts, faces, _, _ = marching_cubes(myvolume, 0.0)\n >> mlab.triangular_mesh([vert[0] for vert in verts],\n [vert[1] for vert in verts],\n [vert[2] for vert in verts],\n faces)\n >> mlab.show()\n\n Similarly using the ``visvis`` package::\n\n >>>\n >> import visvis as vv\n >> verts, faces, normals, values = marching_cubes(myvolume, 0.0)\n >> vv.mesh(np.fliplr(verts), faces, normals, values)\n >> vv.use().Run()\n\n To reduce the number of triangles in the mesh for better performance,\n see this `example\n <https://docs.enthought.com/mayavi/mayavi/auto/example_julia_set_decimation.html#example-julia-set-decimation>`_\n using the ``mayavi`` package.\n\n References\n ----------\n .. [1] Thomas Lewiner, Helio Lopes, Antonio Wilson Vieira and Geovan\n Tavares. Efficient implementation of Marching Cubes' cases with\n topological guarantees. Journal of Graphics Tools 8(2)\n pp. 1-15 (december 2003).\n :DOI:`10.1080/10867651.2003.10487582`\n .. [2] Lorensen, William and Harvey E. Cline. Marching Cubes: A High\n Resolution 3D Surface Construction Algorithm. Computer Graphics\n (SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).\n :DOI:`10.1145/37401.37422`\n\n \"\"\"\n\n if method == 'lewiner':\n return _marching_cubes_lewiner(volume, level, spacing,\n gradient_direction, step_size,\n allow_degenerate, use_classic=False,\n mask=mask)\n elif method == 'lorensen':\n return _marching_cubes_lewiner(volume, level, spacing,\n gradient_direction, step_size,\n allow_degenerate, use_classic=True,\n mask=mask)\n elif method == '_lorensen':\n if mask is not None:\n raise NotImplementedError(\n 'Parameter `mask` is not implemented for method \"_lorensen\" '\n 'and will be ignored.'\n )\n return _marching_cubes_classic(volume, level, spacing,\n gradient_direction)\n else:\n raise ValueError(\"method should be one of 'lewiner', 'lorensen' or \"\n \"'_lorensen'.\")\n\n\ndef _marching_cubes_lewiner(volume, level, spacing, gradient_direction,\n step_size, allow_degenerate, use_classic, mask):\n \"\"\"Lewiner et al. algorithm for marching cubes. See\n marching_cubes_lewiner for documentation.\n\n \"\"\"\n\n # Check volume and ensure its in the format that the alg needs\n if not isinstance(volume, np.ndarray) or (volume.ndim != 3):\n raise ValueError('Input volume should be a 3D numpy array.')\n if volume.shape[0] < 2 or volume.shape[1] < 2 or volume.shape[2] < 2:\n raise ValueError(\"Input array must be at least 2x2x2.\")\n volume = np.ascontiguousarray(volume,\n np.float32) # no copy if not necessary\n\n # Check/convert other inputs:\n # level\n if level is None:\n level = 0.5 * (volume.min() + volume.max())\n else:\n level = float(level)\n if level < volume.min() or level > volume.max():\n raise ValueError(\"Surface level must be within volume data range.\")\n # spacing\n if len(spacing) != 3:\n raise ValueError(\"`spacing` must consist of three floats.\")\n # step_size\n step_size = int(step_size)\n if step_size < 1:\n raise ValueError('step_size must be at least one.')\n # use_classic\n use_classic = bool(use_classic)\n\n # Get LutProvider class (reuse if possible)\n L = _get_mc_luts()\n\n # Check if a mask array is passed\n if mask is not None:\n if not mask.shape == volume.shape:\n raise ValueError('volume and mask must have the same shape.')\n\n # Apply algorithm\n func = _marching_cubes_lewiner_cy.marching_cubes\n vertices, faces, normals, values = func(volume, level, L,\n step_size, use_classic, mask)\n\n if not len(vertices):\n raise RuntimeError('No surface found at the given iso value.')\n\n # Output in z-y-x order, as is common in skimage\n vertices = np.fliplr(vertices)\n normals = np.fliplr(normals)\n\n # Finishing touches to output\n faces.shape = -1, 3\n if gradient_direction == 'descent':\n # MC implementation is right-handed, but gradient_direction is\n # left-handed\n faces = np.fliplr(faces)\n elif not gradient_direction == 'ascent':\n raise ValueError(\"Incorrect input %s in `gradient_direction`, see \"\n \"docstring.\" % (gradient_direction))\n if not np.array_equal(spacing, (1, 1, 1)):\n vertices = vertices * np.r_[spacing]\n\n if allow_degenerate:\n return vertices, faces, normals, values\n else:\n fun = _marching_cubes_lewiner_cy.remove_degenerate_faces\n return fun(vertices.astype(np.float32), faces, normals, values)\n\n\ndef _to_array(args):\n shape, text = args\n byts = base64.decodebytes(text.encode('utf-8'))\n ar = np.frombuffer(byts, dtype='int8')\n ar.shape = shape\n return ar\n\n\n# Map an edge-index to two relative pixel positions. The ege index\n# represents a point that lies somewhere in between these pixels.\n# Linear interpolation should be used to determine where it is exactly.\n# 0\n# 3 1 -> 0x\n# 2 xx\nEDGETORELATIVEPOSX = np.array([ [0,1],[1,1],[1,0],[0,0], [0,1],[1,1],[1,0],[0,0], [0,0],[1,1],[1,1],[0,0] ], 'int8')\nEDGETORELATIVEPOSY = np.array([ [0,0],[0,1],[1,1],[1,0], [0,0],[0,1],[1,1],[1,0], [0,0],[0,0],[1,1],[1,1] ], 'int8')\nEDGETORELATIVEPOSZ = np.array([ [0,0],[0,0],[0,0],[0,0], [1,1],[1,1],[1,1],[1,1], [0,1],[0,1],[0,1],[0,1] ], 'int8')\n\n\ndef _get_mc_luts():\n \"\"\" Kind of lazy obtaining of the luts.\n \"\"\"\n if not hasattr(mcluts, 'THE_LUTS'):\n\n mcluts.THE_LUTS = _marching_cubes_lewiner_cy.LutProvider(\n EDGETORELATIVEPOSX, EDGETORELATIVEPOSY, EDGETORELATIVEPOSZ,\n\n _to_array(mcluts.CASESCLASSIC), _to_array(mcluts.CASES),\n\n _to_array(mcluts.TILING1), _to_array(mcluts.TILING2), _to_array(mcluts.TILING3_1), _to_array(mcluts.TILING3_2),\n _to_array(mcluts.TILING4_1), _to_array(mcluts.TILING4_2), _to_array(mcluts.TILING5), _to_array(mcluts.TILING6_1_1),\n _to_array(mcluts.TILING6_1_2), _to_array(mcluts.TILING6_2), _to_array(mcluts.TILING7_1),\n _to_array(mcluts.TILING7_2), _to_array(mcluts.TILING7_3), _to_array(mcluts.TILING7_4_1),\n _to_array(mcluts.TILING7_4_2), _to_array(mcluts.TILING8), _to_array(mcluts.TILING9),\n _to_array(mcluts.TILING10_1_1), _to_array(mcluts.TILING10_1_1_), _to_array(mcluts.TILING10_1_2),\n _to_array(mcluts.TILING10_2), _to_array(mcluts.TILING10_2_), _to_array(mcluts.TILING11),\n _to_array(mcluts.TILING12_1_1), _to_array(mcluts.TILING12_1_1_), _to_array(mcluts.TILING12_1_2),\n _to_array(mcluts.TILING12_2), _to_array(mcluts.TILING12_2_), _to_array(mcluts.TILING13_1),\n _to_array(mcluts.TILING13_1_), _to_array(mcluts.TILING13_2), _to_array(mcluts.TILING13_2_),\n _to_array(mcluts.TILING13_3), _to_array(mcluts.TILING13_3_), _to_array(mcluts.TILING13_4),\n _to_array(mcluts.TILING13_5_1), _to_array(mcluts.TILING13_5_2), _to_array(mcluts.TILING14),\n\n _to_array(mcluts.TEST3), _to_array(mcluts.TEST4), _to_array(mcluts.TEST6),\n _to_array(mcluts.TEST7), _to_array(mcluts.TEST10), _to_array(mcluts.TEST12),\n _to_array(mcluts.TEST13), _to_array(mcluts.SUBCONFIG13),\n )\n\n return mcluts.THE_LUTS\n", "import numpy as np\nimport skimage.data as data\nfrom skimage.data._fetchers import image_fetcher\nfrom skimage import io\nfrom skimage._shared.testing import assert_equal, assert_almost_equal, fetch\nimport os\nimport pytest\n\n\ndef test_data_dir():\n # data_dir should be a directory people can use as a standard directory\n # https://github.com/scikit-image/scikit-image/pull/3945#issuecomment-498141893\n data_dir = data.data_dir\n assert 'astronaut.png' in os.listdir(data_dir)\n\n\ndef test_download_all_with_pooch():\n # jni first wrote this test with the intention of\n # fully deleting the files in the data_dir,\n # then ensure that the data gets downloaded accordingly.\n # hmaarrfk raised the concern that this test wouldn't\n # play well with parallel testing since we\n # may be breaking the global state that certain other\n # tests require, especially in parallel testing\n\n # The second concern is that this test essentially uses\n # alot of bandwidth, which is not fun for developers on\n # lower speed connections.\n # https://github.com/scikit-image/scikit-image/pull/4666/files/26d5138b25b958da6e97ebf979e9bc36f32c3568#r422604863\n data_dir = data.data_dir\n if image_fetcher is not None:\n data.download_all()\n assert len(os.listdir(data_dir)) > 50\n else:\n with pytest.raises(ModuleNotFoundError):\n data.download_all()\n\n\ndef test_astronaut():\n \"\"\" Test that \"astronaut\" image can be loaded. \"\"\"\n astronaut = data.astronaut()\n assert_equal(astronaut.shape, (512, 512, 3))\n\n\ndef test_camera():\n \"\"\" Test that \"camera\" image can be loaded. \"\"\"\n cameraman = data.camera()\n assert_equal(cameraman.ndim, 2)\n\n\ndef test_checkerboard():\n \"\"\" Test that \"checkerboard\" image can be loaded. \"\"\"\n data.checkerboard()\n\n\ndef test_chelsea():\n \"\"\" Test that \"chelsea\" image can be loaded. \"\"\"\n data.chelsea()\n\n\ndef test_clock():\n \"\"\" Test that \"clock\" image can be loaded. \"\"\"\n data.clock()\n\n\ndef test_coffee():\n \"\"\" Test that \"coffee\" image can be loaded. \"\"\"\n data.coffee()\n\n\ndef test_eagle():\n \"\"\" Test that \"eagle\" image can be loaded. \"\"\"\n # Fetching the data through the testing module will\n # cause the test to skip if pooch isn't installed.\n fetch('data/eagle.png')\n eagle = data.eagle()\n assert_equal(eagle.ndim, 2)\n assert_equal(eagle.dtype, np.dtype('uint8'))\n\n\ndef test_horse():\n \"\"\" Test that \"horse\" image can be loaded. \"\"\"\n horse = data.horse()\n assert_equal(horse.ndim, 2)\n assert_equal(horse.dtype, np.dtype('bool'))\n\n\ndef test_hubble():\n \"\"\" Test that \"Hubble\" image can be loaded. \"\"\"\n data.hubble_deep_field()\n\n\ndef test_immunohistochemistry():\n \"\"\" Test that \"immunohistochemistry\" image can be loaded. \"\"\"\n data.immunohistochemistry()\n\n\ndef test_logo():\n \"\"\" Test that \"logo\" image can be loaded. \"\"\"\n logo = data.logo()\n assert_equal(logo.ndim, 3)\n assert_equal(logo.shape[2], 4)\n\n\ndef test_moon():\n \"\"\" Test that \"moon\" image can be loaded. \"\"\"\n data.moon()\n\n\ndef test_page():\n \"\"\" Test that \"page\" image can be loaded. \"\"\"\n data.page()\n\n\ndef test_rocket():\n \"\"\" Test that \"rocket\" image can be loaded. \"\"\"\n data.rocket()\n\n\ndef test_text():\n \"\"\" Test that \"text\" image can be loaded. \"\"\"\n data.text()\n\n\ndef test_stereo_motorcycle():\n \"\"\" Test that \"stereo_motorcycle\" image can be loaded. \"\"\"\n data.stereo_motorcycle()\n\n\ndef test_binary_blobs():\n blobs = data.binary_blobs(length=128)\n assert_almost_equal(blobs.mean(), 0.5, decimal=1)\n blobs = data.binary_blobs(length=128, volume_fraction=0.25)\n assert_almost_equal(blobs.mean(), 0.25, decimal=1)\n blobs = data.binary_blobs(length=32, volume_fraction=0.25, n_dim=3)\n assert_almost_equal(blobs.mean(), 0.25, decimal=1)\n other_realization = data.binary_blobs(length=32, volume_fraction=0.25,\n n_dim=3)\n assert not np.all(blobs == other_realization)\n\n\ndef test_lfw_subset():\n \"\"\" Test that \"lfw_subset\" can be loaded.\"\"\"\n data.lfw_subset()\n\n\ndef test_skin():\n \"\"\"Test that \"skin\" image can be loaded.\n\n Needs internet connection.\n \"\"\"\n skin = data.skin()\n assert skin.ndim == 3\n\n\ndef test_cell():\n \"\"\" Test that \"cell\" image can be loaded.\"\"\"\n data.cell()\n\n\ndef test_cells3d():\n \"\"\"Needs internet connection.\"\"\"\n path = fetch('data/cells3d.tif')\n image = io.imread(path)\n assert image.shape == (60, 2, 256, 256)\n\n\ndef test_brain_3d():\n \"\"\"Needs internet connection.\"\"\"\n path = fetch('data/brain.tiff')\n image = io.imread(path)\n assert image.shape == (10, 256, 256)\n\n\ndef test_kidney_3d_multichannel():\n \"\"\"Test that 3D multichannel image of kidney tissue can be loaded.\n\n Needs internet connection.\n \"\"\"\n fetch('data/kidney.tif')\n kidney = data.kidney()\n assert kidney.shape == (16, 512, 512, 3)\n\n\ndef test_lily_multichannel():\n \"\"\"Test that microscopy image of lily of the valley can be loaded.\n\n Needs internet connection.\n \"\"\"\n fetch('data/lily.tif')\n lily = data.lily()\n assert lily.shape == (922, 922, 4)\n\n\ndef test_vortex():\n fetch('data/pivchallenge-B-B001_1.tif')\n fetch('data/pivchallenge-B-B001_2.tif')\n image0, image1 = data.vortex()\n for image in [image0, image1]:\n assert image.shape == (512, 512)\n\n\[email protected](\n 'function_name', ['create_image_fetcher', 'file_hash', 'image_fetcher']\n)\ndef test_fetchers_are_public(function_name):\n # Check that the following functions that are only used indirectly in the\n # above tests are public.\n assert hasattr(data, function_name)\n", "from pymatting.util.util import apply_to_channels\nimport numpy as np\nfrom numba import njit, prange\n\n\n@njit(\"f8[:, :](f8[:, :], i8)\", cache=True, parallel=True)\ndef boxfilter_rows_valid(src, r):\n m, n = src.shape\n\n dst = np.zeros((m, n - 2 * r))\n\n for i in prange(m):\n for j_dst in range(1):\n s = 0.0\n for j_src in range(j_dst, j_dst + 2 * r + 1):\n s += src[i, j_src]\n dst[i, j_dst] = s\n\n for j_dst in range(1, dst.shape[1]):\n j_src = j_dst - 1\n s -= src[i, j_src]\n\n j_src = j_dst + 2 * r\n s += src[i, j_src]\n\n dst[i, j_dst] = s\n\n return dst\n\n\n@njit(\"f8[:, :](f8[:, :], i8)\", cache=True, parallel=True)\ndef boxfilter_rows_same(src, r):\n m, n = src.shape\n\n dst = np.zeros((m, n))\n\n for i in prange(m):\n for j_dst in range(1):\n s = 0.0\n for j_src in range(j_dst + r + 1):\n s += src[i, j_src]\n dst[i, j_dst] = s\n\n for j_dst in range(1, r + 1):\n s += src[i, j_dst + r]\n dst[i, j_dst] = s\n\n for j_dst in range(r + 1, n - r):\n s -= src[i, j_dst - r - 1]\n s += src[i, j_dst + r]\n dst[i, j_dst] = s\n\n for j_dst in range(n - r, n):\n s -= src[i, j_dst - r - 1]\n dst[i, j_dst] = s\n\n return dst\n\n\n@njit(\"f8[:, :](f8[:, :], i8)\", cache=True, parallel=True)\ndef boxfilter_rows_full(src, r):\n m, n = src.shape\n\n dst = np.zeros((m, n + 2 * r))\n\n for i in prange(m):\n for j_dst in range(1):\n s = 0.0\n for j_src in range(j_dst + r + 1 - r):\n s += src[i, j_src]\n dst[i, j_dst] = s\n\n for j_dst in range(1, 2 * r + 1):\n s += src[i, j_dst]\n dst[i, j_dst] = s\n\n for j_dst in range(2 * r + 1, dst.shape[1] - 2 * r):\n s -= src[i, j_dst - r - r - 1]\n s += src[i, j_dst]\n dst[i, j_dst] = s\n\n for j_dst in range(dst.shape[1] - 2 * r, dst.shape[1]):\n s -= src[i, j_dst - r - r - 1]\n dst[i, j_dst] = s\n\n return dst\n\n\n@apply_to_channels\ndef boxfilter(src, radius=3, mode=\"same\"):\n \"\"\"Computes the boxfilter (uniform blur, i.e. blur with kernel :code:`np.ones(radius, radius)`) of an input image.\n\n Depending on the mode, the input image of size :math:`(h, w)` is either of shape\n\n * :math:`(h - 2 r, w - 2 r)` in case of 'valid' mode\n * :math:`(h, w)` in case of 'same' mode\n * :math:`(h + 2 r, w + 2 r)` in case of 'full' mode\n\n .. image:: figures/padding.png\n\n Parameters\n ----------\n src: numpy.ndarray\n Input image having either shape :math:`h \\\\times w \\\\times d` or :math:`h \\\\times w`\n radius: int\n Radius of boxfilter, defaults to :math:`3`\n mode: str\n One of 'valid', 'same' or 'full', defaults to 'same'\n\n Returns\n -------\n dst: numpy.ndarray\n Blurred image\n\n Example\n -------\n >>> from pymatting import *\n >>> import numpy as np\n >>> boxfilter(np.eye(5), radius=2, mode=\"valid\")\n array([[5.]])\n >>> boxfilter(np.eye(5), radius=2, mode=\"same\")\n array([[3., 3., 3., 2., 1.],\n [3., 4., 4., 3., 2.],\n [3., 4., 5., 4., 3.],\n [2., 3., 4., 4., 3.],\n [1., 2., 3., 3., 3.]])\n >>> boxfilter(np.eye(5), radius=2, mode=\"full\")\n array([[1., 1., 1., 1., 1., 0., 0., 0., 0.],\n [1., 2., 2., 2., 2., 1., 0., 0., 0.],\n [1., 2., 3., 3., 3., 2., 1., 0., 0.],\n [1., 2., 3., 4., 4., 3., 2., 1., 0.],\n [1., 2., 3., 4., 5., 4., 3., 2., 1.],\n [0., 1., 2., 3., 4., 4., 3., 2., 1.],\n [0., 0., 1., 2., 3., 3., 3., 2., 1.],\n [0., 0., 0., 1., 2., 2., 2., 2., 1.],\n [0., 0., 0., 0., 1., 1., 1., 1., 1.]])\n\n \"\"\"\n assert radius > 0\n assert mode in [\"valid\", \"same\", \"full\"]\n assert src.shape[0] >= 2 * radius + 1\n assert src.shape[1] >= 2 * radius + 1\n\n boxfilter_rows = {\n \"valid\": boxfilter_rows_valid,\n \"same\": boxfilter_rows_same,\n \"full\": boxfilter_rows_full,\n }[mode]\n\n tmp = src.T\n tmp = boxfilter_rows(tmp, radius)\n tmp = tmp.T\n dst = boxfilter_rows(tmp, radius)\n\n return dst\n", "\nimport numpy as np\nfrom scipy import ndimage as ndi\n\nfrom .._shared.utils import _supported_float_type\nfrom ..morphology import dilation, erosion, square\nfrom ..util import img_as_float, view_as_windows\nfrom ..color import gray2rgb\n\n\ndef _find_boundaries_subpixel(label_img):\n \"\"\"See ``find_boundaries(..., mode='subpixel')``.\n\n Notes\n -----\n This function puts in an empty row and column between each *actual*\n row and column of the image, for a corresponding shape of ``2s - 1``\n for every image dimension of size ``s``. These \"interstitial\" rows\n and columns are filled as ``True`` if they separate two labels in\n `label_img`, ``False`` otherwise.\n\n I used ``view_as_windows`` to get the neighborhood of each pixel.\n Then I check whether there are two labels or more in that\n neighborhood.\n \"\"\"\n ndim = label_img.ndim\n max_label = np.iinfo(label_img.dtype).max\n\n label_img_expanded = np.zeros([(2 * s - 1) for s in label_img.shape],\n label_img.dtype)\n pixels = (slice(None, None, 2), ) * ndim\n label_img_expanded[pixels] = label_img\n\n edges = np.ones(label_img_expanded.shape, dtype=bool)\n edges[pixels] = False\n label_img_expanded[edges] = max_label\n windows = view_as_windows(np.pad(label_img_expanded, 1, mode='edge'),\n (3,) * ndim)\n\n boundaries = np.zeros_like(edges)\n for index in np.ndindex(label_img_expanded.shape):\n if edges[index]:\n values = np.unique(windows[index].ravel())\n if len(values) > 2: # single value and max_label\n boundaries[index] = True\n return boundaries\n\n\ndef find_boundaries(label_img, connectivity=1, mode='thick', background=0):\n \"\"\"Return bool array where boundaries between labeled regions are True.\n\n Parameters\n ----------\n label_img : array of int or bool\n An array in which different regions are labeled with either different\n integers or boolean values.\n connectivity : int in {1, ..., `label_img.ndim`}, optional\n A pixel is considered a boundary pixel if any of its neighbors\n has a different label. `connectivity` controls which pixels are\n considered neighbors. A connectivity of 1 (default) means\n pixels sharing an edge (in 2D) or a face (in 3D) will be\n considered neighbors. A connectivity of `label_img.ndim` means\n pixels sharing a corner will be considered neighbors.\n mode : string in {'thick', 'inner', 'outer', 'subpixel'}\n How to mark the boundaries:\n\n - thick: any pixel not completely surrounded by pixels of the\n same label (defined by `connectivity`) is marked as a boundary.\n This results in boundaries that are 2 pixels thick.\n - inner: outline the pixels *just inside* of objects, leaving\n background pixels untouched.\n - outer: outline pixels in the background around object\n boundaries. When two objects touch, their boundary is also\n marked.\n - subpixel: return a doubled image, with pixels *between* the\n original pixels marked as boundary where appropriate.\n background : int, optional\n For modes 'inner' and 'outer', a definition of a background\n label is required. See `mode` for descriptions of these two.\n\n Returns\n -------\n boundaries : array of bool, same shape as `label_img`\n A bool image where ``True`` represents a boundary pixel. For\n `mode` equal to 'subpixel', ``boundaries.shape[i]`` is equal\n to ``2 * label_img.shape[i] - 1`` for all ``i`` (a pixel is\n inserted in between all other pairs of pixels).\n\n Examples\n --------\n >>> labels = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],\n ... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],\n ... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],\n ... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],\n ... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],\n ... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8)\n >>> find_boundaries(labels, mode='thick').astype(np.uint8)\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 0, 1, 1, 0],\n [0, 1, 1, 0, 1, 1, 0, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 0, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n >>> find_boundaries(labels, mode='inner').astype(np.uint8)\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 0, 1, 0, 0],\n [0, 0, 1, 0, 1, 1, 0, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n >>> find_boundaries(labels, mode='outer').astype(np.uint8)\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 1, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 1, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 1, 1, 0, 0, 1, 0],\n [0, 0, 1, 1, 1, 1, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n >>> labels_small = labels[::2, ::3]\n >>> labels_small\n array([[0, 0, 0, 0],\n [0, 0, 5, 0],\n [0, 1, 5, 0],\n [0, 0, 5, 0],\n [0, 0, 0, 0]], dtype=uint8)\n >>> find_boundaries(labels_small, mode='subpixel').astype(np.uint8)\n array([[0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 0, 1, 0],\n [0, 1, 1, 1, 0, 1, 0],\n [0, 1, 0, 1, 0, 1, 0],\n [0, 1, 1, 1, 0, 1, 0],\n [0, 0, 0, 1, 0, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n >>> bool_image = np.array([[False, False, False, False, False],\n ... [False, False, False, False, False],\n ... [False, False, True, True, True],\n ... [False, False, True, True, True],\n ... [False, False, True, True, True]],\n ... dtype=bool)\n >>> find_boundaries(bool_image)\n array([[False, False, False, False, False],\n [False, False, True, True, True],\n [False, True, True, True, True],\n [False, True, True, False, False],\n [False, True, True, False, False]])\n \"\"\"\n if label_img.dtype == 'bool':\n label_img = label_img.astype(np.uint8)\n ndim = label_img.ndim\n footprint = ndi.generate_binary_structure(ndim, connectivity)\n if mode != 'subpixel':\n boundaries = (\n dilation(label_img, footprint) != erosion(label_img, footprint)\n )\n if mode == 'inner':\n foreground_image = (label_img != background)\n boundaries &= foreground_image\n elif mode == 'outer':\n max_label = np.iinfo(label_img.dtype).max\n background_image = (label_img == background)\n footprint = ndi.generate_binary_structure(ndim, ndim)\n inverted_background = np.array(label_img, copy=True)\n inverted_background[background_image] = max_label\n adjacent_objects = (\n (\n dilation(label_img, footprint)\n != erosion(inverted_background, footprint)\n )\n & ~background_image\n )\n boundaries &= (background_image | adjacent_objects)\n return boundaries\n else:\n boundaries = _find_boundaries_subpixel(label_img)\n return boundaries\n\n\ndef mark_boundaries(image, label_img, color=(1, 1, 0),\n outline_color=None, mode='outer', background_label=0):\n \"\"\"Return image with boundaries between labeled regions highlighted.\n\n Parameters\n ----------\n image : (M, N[, 3]) array\n Grayscale or RGB image.\n label_img : (M, N) array of int\n Label array where regions are marked by different integer values.\n color : length-3 sequence, optional\n RGB color of boundaries in the output image.\n outline_color : length-3 sequence, optional\n RGB color surrounding boundaries in the output image. If None, no\n outline is drawn.\n mode : string in {'thick', 'inner', 'outer', 'subpixel'}, optional\n The mode for finding boundaries.\n background_label : int, optional\n Which label to consider background (this is only useful for\n modes ``inner`` and ``outer``).\n\n Returns\n -------\n marked : (M, N, 3) array of float\n An image in which the boundaries between labels are\n superimposed on the original image.\n\n See Also\n --------\n find_boundaries\n \"\"\"\n float_dtype = _supported_float_type(image.dtype)\n marked = img_as_float(image, force_copy=True)\n marked = marked.astype(float_dtype, copy=False)\n if marked.ndim == 2:\n marked = gray2rgb(marked)\n if mode == 'subpixel':\n # Here, we want to interpose an extra line of pixels between\n # each original line - except for the last axis which holds\n # the RGB information. ``ndi.zoom`` then performs the (cubic)\n # interpolation, filling in the values of the interposed pixels\n marked = ndi.zoom(marked, [2 - 1/s for s in marked.shape[:-1]] + [1],\n mode='mirror')\n boundaries = find_boundaries(label_img, mode=mode,\n background=background_label)\n if outline_color is not None:\n outlines = dilation(boundaries, square(3))\n marked[outlines] = outline_color\n marked[boundaries] = color\n return marked\n", "import numpy as np\nimport pytest\n\nfrom skimage._shared.testing import expected_warnings, test_parallel\nfrom skimage.feature import (graycomatrix,\n graycoprops,\n greycomatrix,\n greycoprops,\n local_binary_pattern,\n multiblock_lbp)\nfrom skimage.transform import integral_image\n\n\nclass TestGLCM():\n\n def setup(self):\n self.image = np.array([[0, 0, 1, 1],\n [0, 0, 1, 1],\n [0, 2, 2, 2],\n [2, 2, 3, 3]], dtype=np.uint8)\n\n @test_parallel()\n def test_output_angles(self):\n result = graycomatrix(\n self.image, [1], [0, np.pi / 4, np.pi / 2, 3 * np.pi / 4], 4\n )\n assert result.shape == (4, 4, 1, 4)\n expected1 = np.array([[2, 2, 1, 0],\n [0, 2, 0, 0],\n [0, 0, 3, 1],\n [0, 0, 0, 1]], dtype=np.uint32)\n np.testing.assert_array_equal(result[:, :, 0, 0], expected1)\n expected2 = np.array([[1, 1, 3, 0],\n [0, 1, 1, 0],\n [0, 0, 0, 2],\n [0, 0, 0, 0]], dtype=np.uint32)\n np.testing.assert_array_equal(result[:, :, 0, 1], expected2)\n expected3 = np.array([[3, 0, 2, 0],\n [0, 2, 2, 0],\n [0, 0, 1, 2],\n [0, 0, 0, 0]], dtype=np.uint32)\n np.testing.assert_array_equal(result[:, :, 0, 2], expected3)\n expected4 = np.array([[2, 0, 0, 0],\n [1, 1, 2, 0],\n [0, 0, 2, 1],\n [0, 0, 0, 0]], dtype=np.uint32)\n np.testing.assert_array_equal(result[:, :, 0, 3], expected4)\n\n def test_output_symmetric_1(self):\n result = graycomatrix(self.image, [1], [np.pi / 2], 4,\n symmetric=True)\n assert result.shape == (4, 4, 1, 1)\n expected = np.array([[6, 0, 2, 0],\n [0, 4, 2, 0],\n [2, 2, 2, 2],\n [0, 0, 2, 0]], dtype=np.uint32)\n np.testing.assert_array_equal(result[:, :, 0, 0], expected)\n\n def test_error_raise_float(self):\n for dtype in [\n float, np.double, np.float16, np.float32, np.float64\n ]:\n with pytest.raises(ValueError):\n graycomatrix(self.image.astype(dtype), [1], [np.pi], 4)\n\n def test_error_raise_int_types(self):\n for dtype in [\n np.int16, np.int32, np.int64, np.uint16, np.uint32, np.uint64\n ]:\n with pytest.raises(ValueError):\n graycomatrix(self.image.astype(dtype), [1], [np.pi])\n\n def test_error_raise_negative(self):\n with pytest.raises(ValueError):\n graycomatrix(self.image.astype(np.int16) - 1, [1], [np.pi], 4)\n\n def test_error_raise_levels_smaller_max(self):\n with pytest.raises(ValueError):\n graycomatrix(self.image - 1, [1], [np.pi], 3)\n\n def test_image_data_types(self):\n for dtype in [\n np.uint16, np.uint32, np.uint64, np.int16, np.int32, np.int64\n ]:\n img = self.image.astype(dtype)\n result = graycomatrix(img, [1], [np.pi / 2], 4,\n symmetric=True)\n assert result.shape == (4, 4, 1, 1)\n expected = np.array([[6, 0, 2, 0],\n [0, 4, 2, 0],\n [2, 2, 2, 2],\n [0, 0, 2, 0]], dtype=np.uint32)\n np.testing.assert_array_equal(result[:, :, 0, 0], expected)\n\n return\n\n def test_output_distance(self):\n im = np.array([[0, 0, 0, 0],\n [1, 0, 0, 1],\n [2, 0, 0, 2],\n [3, 0, 0, 3]], dtype=np.uint8)\n result = graycomatrix(im, [3], [0], 4, symmetric=False)\n expected = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]], dtype=np.uint32)\n np.testing.assert_array_equal(result[:, :, 0, 0], expected)\n\n def test_output_combo(self):\n im = np.array([[0],\n [1],\n [2],\n [3]], dtype=np.uint8)\n result = graycomatrix(im, [1, 2], [0, np.pi / 2], 4)\n assert result.shape == (4, 4, 2, 2)\n\n z = np.zeros((4, 4), dtype=np.uint32)\n e1 = np.array([[0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [0, 0, 0, 0]], dtype=np.uint32)\n e2 = np.array([[0, 0, 1, 0],\n [0, 0, 0, 1],\n [0, 0, 0, 0],\n [0, 0, 0, 0]], dtype=np.uint32)\n\n np.testing.assert_array_equal(result[:, :, 0, 0], z)\n np.testing.assert_array_equal(result[:, :, 1, 0], z)\n np.testing.assert_array_equal(result[:, :, 0, 1], e1)\n np.testing.assert_array_equal(result[:, :, 1, 1], e2)\n\n def test_output_empty(self):\n result = graycomatrix(self.image, [10], [0], 4)\n np.testing.assert_array_equal(result[:, :, 0, 0],\n np.zeros((4, 4), dtype=np.uint32))\n result = graycomatrix(self.image, [10], [0], 4, normed=True)\n np.testing.assert_array_equal(result[:, :, 0, 0],\n np.zeros((4, 4), dtype=np.uint32))\n\n def test_normed_symmetric(self):\n result = graycomatrix(self.image, [1, 2, 3],\n [0, np.pi / 2, np.pi], 4,\n normed=True, symmetric=True)\n for d in range(result.shape[2]):\n for a in range(result.shape[3]):\n np.testing.assert_almost_equal(result[:, :, d, a].sum(),\n 1.0)\n np.testing.assert_array_equal(result[:, :, d, a],\n result[:, :, d, a].transpose())\n\n def test_contrast(self):\n result = graycomatrix(self.image, [1, 2], [0], 4,\n normed=True, symmetric=True)\n result = np.round(result, 3)\n contrast = graycoprops(result, 'contrast')\n np.testing.assert_almost_equal(contrast[0, 0], 0.585, decimal=3)\n\n def test_dissimilarity(self):\n result = graycomatrix(self.image, [1], [0, np.pi / 2], 4,\n normed=True, symmetric=True)\n result = np.round(result, 3)\n dissimilarity = graycoprops(result, 'dissimilarity')\n np.testing.assert_almost_equal(dissimilarity[0, 0], 0.418, decimal=3)\n\n def test_greycomatrix_and_greycoprops_deprecations(self):\n expected = graycomatrix(self.image, [1], [0, np.pi / 2], 4,\n normed=True, symmetric=True)\n with expected_warnings([\"Function ``greycomatrix``\"]):\n result = greycomatrix(self.image, [1], [0, np.pi / 2], 4,\n normed=True, symmetric=True)\n np.testing.assert_array_equal(expected, result)\n\n result = np.round(result, 3)\n dissimilarity_expected = graycoprops(result, 'dissimilarity')\n with expected_warnings([\"Function ``greycoprops``\"]):\n dissimilarity_result = greycoprops(result, 'dissimilarity')\n np.testing.assert_array_equal(\n dissimilarity_expected, dissimilarity_result\n )\n\n def test_dissimilarity_2(self):\n result = graycomatrix(self.image, [1, 3], [np.pi / 2], 4,\n normed=True, symmetric=True)\n result = np.round(result, 3)\n dissimilarity = graycoprops(result, 'dissimilarity')[0, 0]\n np.testing.assert_almost_equal(dissimilarity, 0.665, decimal=3)\n\n def test_non_normalized_glcm(self):\n img = (np.random.random((100, 100)) * 8).astype(np.uint8)\n p = graycomatrix(img, [1, 2, 4, 5], [0, 0.25, 1, 1.5], levels=8)\n np.testing.assert_(np.max(graycoprops(p, 'correlation')) < 1.0)\n\n def test_invalid_property(self):\n result = graycomatrix(self.image, [1], [0], 4)\n with pytest.raises(ValueError):\n graycoprops(result, 'ABC')\n\n def test_homogeneity(self):\n result = graycomatrix(self.image, [1], [0, 6], 4, normed=True,\n symmetric=True)\n homogeneity = graycoprops(result, 'homogeneity')[0, 0]\n np.testing.assert_almost_equal(homogeneity, 0.80833333)\n\n def test_energy(self):\n result = graycomatrix(self.image, [1], [0, 4], 4, normed=True,\n symmetric=True)\n energy = graycoprops(result, 'energy')[0, 0]\n np.testing.assert_almost_equal(energy, 0.38188131)\n\n def test_correlation(self):\n result = graycomatrix(self.image, [1, 2], [0], 4, normed=True,\n symmetric=True)\n energy = graycoprops(result, 'correlation')\n np.testing.assert_almost_equal(energy[0, 0], 0.71953255)\n np.testing.assert_almost_equal(energy[1, 0], 0.41176470)\n\n def test_uniform_properties(self):\n im = np.ones((4, 4), dtype=np.uint8)\n result = graycomatrix(im, [1, 2, 8], [0, np.pi / 2], 4, normed=True,\n symmetric=True)\n for prop in ['contrast', 'dissimilarity', 'homogeneity',\n 'energy', 'correlation', 'ASM']:\n graycoprops(result, prop)\n\n\nclass TestLBP():\n\n def setup(self):\n self.image = np.array([[255, 6, 255, 0, 141, 0],\n [ 48, 250, 204, 166, 223, 63],\n [ 8, 0, 159, 50, 255, 30],\n [167, 255, 63, 40, 128, 255],\n [ 0, 255, 30, 34, 255, 24],\n [146, 241, 255, 0, 189, 126]],\n dtype='double')\n\n @test_parallel()\n def test_default(self):\n lbp = local_binary_pattern(self.image, 8, 1, 'default')\n ref = np.array([[ 0, 251, 0, 255, 96, 255],\n [143, 0, 20, 153, 64, 56],\n [238, 255, 12, 191, 0, 252],\n [129, 64., 62, 159, 199, 0],\n [255, 4, 255, 175, 0, 254],\n [ 3, 5, 0, 255, 4, 24]])\n np.testing.assert_array_equal(lbp, ref)\n\n def test_ror(self):\n lbp = local_binary_pattern(self.image, 8, 1, 'ror')\n ref = np.array([[ 0, 127, 0, 255, 3, 255],\n [ 31, 0, 5, 51, 1, 7],\n [119, 255, 3, 127, 0, 63],\n [ 3, 1, 31, 63, 31, 0],\n [255, 1, 255, 95, 0, 127],\n [ 3, 5, 0, 255, 1, 3]])\n np.testing.assert_array_equal(lbp, ref)\n\n def test_uniform(self):\n lbp = local_binary_pattern(self.image, 8, 1, 'uniform')\n ref = np.array([[0, 7, 0, 8, 2, 8],\n [5, 0, 9, 9, 1, 3],\n [9, 8, 2, 7, 0, 6],\n [2, 1, 5, 6, 5, 0],\n [8, 1, 8, 9, 0, 7],\n [2, 9, 0, 8, 1, 2]])\n np.testing.assert_array_equal(lbp, ref)\n\n def test_var(self):\n # Test idea: mean of variance is estimate of overall variance.\n\n # Fix random seed for test stability.\n np.random.seed(13141516)\n\n # Create random image with known variance.\n image = np.random.rand(500, 500)\n target_std = 0.3\n image = image / image.std() * target_std\n\n # Use P=4 to avoid interpolation effects\n P, R = 4, 1\n lbp = local_binary_pattern(image, P, R, 'var')\n\n # Take central part to avoid border effect.\n lbp = lbp[5:-5, 5:-5]\n\n # The LBP variance is biased (ddof=0), correct for that.\n expected = target_std**2 * (P-1)/P\n\n np.testing.assert_almost_equal(lbp.mean(), expected, 4)\n\n def test_nri_uniform(self):\n lbp = local_binary_pattern(self.image, 8, 1, 'nri_uniform')\n ref = np.array([[ 0, 54, 0, 57, 12, 57],\n [34, 0, 58, 58, 3, 22],\n [58, 57, 15, 50, 0, 47],\n [10, 3, 40, 42, 35, 0],\n [57, 7, 57, 58, 0, 56],\n [ 9, 58, 0, 57, 7, 14]])\n np.testing.assert_array_almost_equal(lbp, ref)\n\n\nclass TestMBLBP():\n\n def test_single_mblbp(self):\n\n # Create dummy matrix where first and fifth rectangles have greater\n # value than the central one. Therefore, the following bits\n # should be 1.\n test_img = np.zeros((9, 9), dtype='uint8')\n test_img[3:6, 3:6] = 1\n test_img[:3, :3] = 255\n test_img[6:, 6:] = 255\n\n # MB-LBP is filled in reverse order. So the first and fifth bits from\n # the end should be filled.\n correct_answer = 0b10001000\n\n int_img = integral_image(test_img)\n\n lbp_code = multiblock_lbp(int_img, 0, 0, 3, 3)\n\n np.testing.assert_equal(lbp_code, correct_answer)\n", "#!/usr/bin/env python\n\nimport os\nfrom skimage._build import cython\n\nbase_path = os.path.abspath(os.path.dirname(__file__))\n\n\ndef configuration(parent_package='', top_path=None):\n from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs\n\n config = Configuration('filters', parent_package, top_path)\n\n cython(['rank/core_cy.pyx',\n 'rank/core_cy_3d.pyx',\n 'rank/generic_cy.pyx',\n 'rank/percentile_cy.pyx',\n 'rank/bilateral_cy.pyx',\n '_multiotsu.pyx'], working_path=base_path)\n\n config.add_extension('rank.core_cy', sources=['rank/core_cy.c'],\n include_dirs=[get_numpy_include_dirs()])\n config.add_extension('rank.core_cy_3d', sources=['rank/core_cy_3d.c'],\n include_dirs=[get_numpy_include_dirs()])\n config.add_extension('_multiotsu', sources=['_multiotsu.c'],\n include_dirs=[get_numpy_include_dirs()])\n config.add_extension('rank.generic_cy', sources=['rank/generic_cy.c'],\n include_dirs=[get_numpy_include_dirs()])\n config.add_extension(\n 'rank.percentile_cy', sources=['rank/percentile_cy.c'],\n include_dirs=[get_numpy_include_dirs()])\n config.add_extension(\n 'rank.bilateral_cy', sources=['rank/bilateral_cy.c'],\n include_dirs=[get_numpy_include_dirs()])\n\n return config\n\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(maintainer='scikit-image Developers',\n author='scikit-image Developers',\n maintainer_email='[email protected]',\n description='Filters',\n url='https://github.com/scikit-image/scikit-image',\n license='SciPy License (BSD Style)',\n **(configuration(top_path='').todict())\n )\n" ]
[ [ "numpy.max", "numpy.array", "numpy.zeros" ], [ "numpy.array_equal", "numpy.ascontiguousarray", "numpy.fliplr", "numpy.frombuffer", "numpy.array" ], [ "numpy.all", "numpy.dtype" ], [ "numpy.zeros" ], [ "numpy.pad", "scipy.ndimage.generate_binary_structure", "scipy.ndimage.zoom", "numpy.ones", "numpy.zeros_like", "numpy.iinfo", "numpy.ndindex", "numpy.array", "numpy.zeros" ], [ "numpy.testing.assert_equal", "numpy.random.random", "numpy.random.seed", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.round", "numpy.testing.assert_almost_equal", "numpy.random.rand", "numpy.array", "numpy.zeros", "numpy.testing.assert_array_almost_equal" ], [ "numpy.distutils.misc_util.get_numpy_include_dirs", "numpy.distutils.misc_util.Configuration" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.11", "1.19", "1.24", "1.16", "1.23", "1.20", "1.7", "1.12", "1.21", "1.22", "1.14", "1.6", "1.13", "1.9", "1.17", "1.10", "1.18", "1.15", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
hw07216/imaginaire
[ "d82c87aced50afd44fd162491ba5b59056b74034", "d82c87aced50afd44fd162491ba5b59056b74034", "d82c87aced50afd44fd162491ba5b59056b74034" ]
[ "imaginaire/losses/feature_matching.py", "imaginaire/evaluation/fid.py", "imaginaire/evaluation/caption/clip.py" ]
[ "# Copyright (C) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# This work is made available under the Nvidia Source Code License-NC.\n# To view a copy of this license, check out LICENSE.md\nimport torch.nn as nn\n\n\nclass FeatureMatchingLoss(nn.Module):\n r\"\"\"Compute feature matching loss\"\"\"\n def __init__(self, criterion='l1'):\n super(FeatureMatchingLoss, self).__init__()\n if criterion == 'l1':\n self.criterion = nn.L1Loss()\n elif criterion == 'l2' or criterion == 'mse':\n self.criterion = nn.MSELoss()\n else:\n raise ValueError('Criterion %s is not recognized' % criterion)\n\n def forward(self, fake_features, real_features):\n r\"\"\"Return the target vector for the binary cross entropy loss\n computation.\n\n Args:\n fake_features (list of lists): Discriminator features of fake images.\n real_features (list of lists): Discriminator features of real images.\n\n Returns:\n (tensor): Loss value.\n \"\"\"\n num_d = len(fake_features)\n dis_weight = 1.0 / num_d\n loss = fake_features[0][0].new_tensor(0)\n for i in range(num_d):\n for j in range(len(fake_features[i])):\n tmp_loss = self.criterion(fake_features[i][j],\n real_features[i][j].detach())\n loss += dis_weight * tmp_loss\n return loss\n", "# Copyright (C) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# This work is made available under the Nvidia Source Code License-NC.\n# To view a copy of this license, check out LICENSE.md\nimport os\nimport numpy as np\nimport torch\nfrom scipy import linalg\n\nfrom imaginaire.evaluation.common import load_or_compute_activations\nfrom imaginaire.utils.distributed import is_master\nfrom imaginaire.utils.distributed import master_only_print as print\n\n\[email protected]_grad()\ndef compute_fid(fid_path, data_loader, net_G,\n key_real='images', key_fake='fake_images',\n sample_size=None, preprocess=None, return_act=False,\n is_video=False, few_shot_video=False, **kwargs):\n r\"\"\"Compute the fid score.\n\n Args:\n fid_path (str): Location for the numpy file to store or to load the\n statistics.\n data_loader (obj): PyTorch dataloader object.\n net_G (obj): For image generation modes, net_G is the generator network.\n For video generation models, net_G is the trainer.\n key_real (str): Dictionary key value for the real data.\n key_fake (str): Dictionary key value for the fake data.\n sample_size (int or tuple): How many samples to be used.\n preprocess (func): The preprocess function to be applied to the data.\n return_act (bool): If ``True``, also returns feature activations of\n real and fake data.\n is_video (bool): Whether we are handling video sequences.\n few_shot_video (bool): If ``True``, uses few-shot video synthesis.\n Returns:\n (float): FID value.\n \"\"\"\n print('Computing FID.')\n act_path = os.path.join(os.path.dirname(fid_path),\n 'activations_real.npy')\n # Get the fake mean and covariance.\n fake_act = load_or_compute_activations(\n None, data_loader, key_real, key_fake, net_G,\n sample_size, preprocess, is_video=is_video,\n few_shot_video=few_shot_video, **kwargs\n )\n\n # Get the ground truth mean and covariance.\n real_act = load_or_compute_activations(\n act_path, data_loader, key_real, key_fake, None,\n sample_size, preprocess, is_video=is_video,\n few_shot_video=few_shot_video, **kwargs\n )\n\n if is_master():\n fid = _calculate_frechet_distance(\n fake_act, real_act)[\"FID\"]\n if return_act:\n return fid, real_act, fake_act\n else:\n return fid\n elif return_act:\n return None, None, None\n else:\n return None\n\n\[email protected]_grad()\ndef compute_fid_data(fid_path, data_loader_a, data_loader_b,\n key_a='images', key_b='images', sample_size=None,\n is_video=False, few_shot_video=False, **kwargs):\n r\"\"\"Compute the fid score between two datasets.\n\n Args:\n fid_path (str): Location for the numpy file to store or to load the\n statistics.\n data_loader_a (obj): PyTorch dataloader object for dataset a.\n data_loader_b (obj): PyTorch dataloader object for dataset b.\n key_a (str): Dictionary key value for images in the dataset a.\n key_b (str): Dictionary key value for images in the dataset b.\n sample_size (int): How many samples to be used for computing the FID.\n is_video (bool): Whether we are handling video sequences.\n few_shot_video (bool): If ``True``, uses few-shot video synthesis.\n Returns:\n (float): FID value.\n \"\"\"\n print('Computing FID.')\n path_a = os.path.join(os.path.dirname(fid_path),\n 'activations_a.npy')\n min_data_size = min(len(data_loader_a.dataset),\n len(data_loader_b.dataset))\n if sample_size is None:\n sample_size = min_data_size\n else:\n sample_size = min(sample_size, min_data_size)\n\n act_a = load_or_compute_activations(\n path_a, data_loader_a, key_a, key_b, None,\n sample_size=sample_size, is_video=is_video,\n few_shot_video=few_shot_video, **kwargs\n )\n act_b = load_or_compute_activations(\n None, data_loader_b, key_a, key_b, None,\n sample_size=sample_size, is_video=is_video,\n few_shot_video=few_shot_video, **kwargs\n )\n\n if is_master():\n return _calculate_frechet_distance(act_a, act_b)[\"FID\"]\n\n\ndef _calculate_frechet_distance(act_1, act_2, eps=1e-6):\n mu1 = np.mean(act_1.cpu().numpy(), axis=0)\n sigma1 = np.cov(act_1.cpu().numpy(), rowvar=False)\n mu2 = np.mean(act_2.cpu().numpy(), axis=0)\n sigma2 = np.cov(act_2.cpu().numpy(), rowvar=False)\n mu1 = np.atleast_1d(mu1)\n mu2 = np.atleast_1d(mu2)\n sigma1 = np.atleast_2d(sigma1)\n sigma2 = np.atleast_2d(sigma2)\n assert mu1.shape == mu2.shape, 'Training and test mean vectors have different lengths'\n assert sigma1.shape == sigma2.shape, 'Training and test covariances have different dimensions'\n diff = mu1 - mu2\n # Product might be almost singular\n covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)\n if not np.isfinite(covmean).all():\n msg = ('fid calculation produces singular product; '\n 'adding %s to diagonal of cov estimates') % eps\n print(msg)\n offset = np.eye(sigma1.shape[0]) * eps\n covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))\n\n # Numerical error might give slight imaginary component\n if np.iscomplexobj(covmean):\n if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):\n m = np.max(np.abs(covmean.imag))\n print('Imaginary component {}'.format(m))\n # raise ValueError('Imaginary component {}'.format(m))\n covmean = covmean.real\n tr_covmean = np.trace(covmean)\n return {\"FID\": (diff.dot(diff) + np.trace(sigma1) + np.trace(\n sigma2) - 2 * tr_covmean)}\n", "# Copyright (C) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# This work is made available under the Nvidia Source Code License-NC.\n# To view a copy of this license, check out LICENSE.md\n# flake8: noqa\n# https://github.com/openai/CLIP\nimport hashlib\nimport os\nimport urllib\nimport warnings\nfrom time import sleep\nfrom typing import Union, List\n\n\nfrom collections import OrderedDict\nfrom typing import Tuple, Union\n\nimport torch\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch import nn\nfrom PIL import Image\nfrom torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, \\\n Normalize\nfrom tqdm import tqdm\n\n__all__ = [\"available_models\", \"load\", 'build_model']\n\nfrom imaginaire.utils.io import download_file_from_google_drive\n\n_MODELS = {\n \"RN50\": \"https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt\",\n \"RN101\": \"https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt\",\n \"RN50x4\": \"https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt\",\n \"ViT-B/32\": \"https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt\",\n}\n\n\ndef _download(url: str, root: str = os.path.expanduser(\"~/.cache/clip\")):\n os.makedirs(root, exist_ok=True)\n filename = os.path.basename(url)\n\n expected_sha256 = url.split(\"/\")[-2]\n download_target = os.path.join(root, filename)\n\n if os.path.exists(download_target) and not os.path.isfile(download_target):\n raise RuntimeError(\n f\"{download_target} exists and is not a regular file\")\n\n if os.path.isfile(download_target):\n if hashlib.sha256(open(download_target,\n \"rb\").read()).hexdigest() == expected_sha256:\n return download_target\n else:\n warnings.warn(\n f\"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file\")\n\n with urllib.request.urlopen(url) as source, open(download_target,\n \"wb\") as output:\n with tqdm(total=int(source.info().get(\"Content-Length\")), ncols=80,\n unit='iB', unit_scale=True) as loop:\n while True:\n buffer = source.read(8192)\n if not buffer:\n break\n\n output.write(buffer)\n loop.update(len(buffer))\n\n if hashlib.sha256(\n open(download_target, \"rb\").read()).hexdigest() != expected_sha256:\n raise RuntimeError(\n f\"Model has been downloaded but the SHA256 checksum does not not match\")\n\n return download_target\n\n\ndef _transform(n_px):\n return Compose([\n Resize(n_px, interpolation=Image.BICUBIC),\n CenterCrop(n_px),\n lambda image: image.convert(\"RGB\"),\n ToTensor(),\n Normalize((0.48145466, 0.4578275, 0.40821073),\n (0.26862954, 0.26130258, 0.27577711)),\n ])\n\n\ndef available_models() -> List[str]:\n \"\"\"Returns the names of available CLIP models\"\"\"\n return list(_MODELS.keys())\n\n\ndef load(model_path):\n if not os.path.exists(model_path):\n downloaded = False\n while not downloaded:\n try:\n download_file_from_google_drive(\"1Ri5APYM34A_IjG4F3Admutsf2oUwDjfW\", model_path)\n downloaded = True\n except Exception as e:\n print(e)\n sleep(30)\n continue\n model = torch.load(model_path, map_location='cpu')\n model = build_model(model).cuda()\n return model, _transform(model.visual.input_resolution)\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1):\n super().__init__()\n\n # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1\n self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n\n self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()\n\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = None\n self.stride = stride\n\n if stride > 1 or inplanes != planes * Bottleneck.expansion:\n # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1\n self.downsample = nn.Sequential(OrderedDict([\n (\"-1\", nn.AvgPool2d(stride)),\n (\"0\", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1,\n bias=False)),\n (\"1\", nn.BatchNorm2d(planes * self.expansion))\n ]))\n\n def forward(self, x: torch.Tensor):\n identity = x\n\n out = self.relu(self.bn1(self.conv1(x)))\n out = self.relu(self.bn2(self.conv2(out)))\n out = self.avgpool(out)\n out = self.bn3(self.conv3(out))\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n return out\n\n\nclass AttentionPool2d(nn.Module):\n def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int,\n output_dim: int = None):\n super().__init__()\n self.positional_embedding = nn.Parameter(\n torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)\n self.k_proj = nn.Linear(embed_dim, embed_dim)\n self.q_proj = nn.Linear(embed_dim, embed_dim)\n self.v_proj = nn.Linear(embed_dim, embed_dim)\n self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)\n self.num_heads = num_heads\n\n def forward(self, x):\n x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(\n 2, 0, 1) # NCHW -> (HW)NC\n x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC\n x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC\n x, _ = F.multi_head_attention_forward(\n query=x, key=x, value=x,\n embed_dim_to_check=x.shape[-1],\n num_heads=self.num_heads,\n q_proj_weight=self.q_proj.weight,\n k_proj_weight=self.k_proj.weight,\n v_proj_weight=self.v_proj.weight,\n in_proj_weight=None,\n in_proj_bias=torch.cat(\n [self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),\n bias_k=None,\n bias_v=None,\n add_zero_attn=False,\n dropout_p=0,\n out_proj_weight=self.c_proj.weight,\n out_proj_bias=self.c_proj.bias,\n use_separate_proj_weight=True,\n training=self.training,\n need_weights=False\n )\n\n return x[0]\n\n\nclass ModifiedResNet(nn.Module):\n \"\"\"\n A ResNet class that is similar to torchvision's but contains the following changes:\n - There are now 3 \"stem\" convolutions as opposed to 1, with an average pool instead of a max pool.\n - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1\n - The final pooling layer is a QKV attention instead of an average pool\n \"\"\"\n\n def __init__(self, layers, output_dim, heads, input_resolution=224,\n width=64):\n super().__init__()\n self.output_dim = output_dim\n self.input_resolution = input_resolution\n\n # the 3-layer stem\n self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2,\n padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(width // 2)\n self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1,\n bias=False)\n self.bn2 = nn.BatchNorm2d(width // 2)\n self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(width)\n self.avgpool = nn.AvgPool2d(2)\n self.relu = nn.ReLU(inplace=True)\n\n # residual layers\n self._inplanes = width # this is a *mutable* variable used during construction\n self.layer1 = self._make_layer(width, layers[0])\n self.layer2 = self._make_layer(width * 2, layers[1], stride=2)\n self.layer3 = self._make_layer(width * 4, layers[2], stride=2)\n self.layer4 = self._make_layer(width * 8, layers[3], stride=2)\n\n embed_dim = width * 32 # the ResNet feature dimension\n self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim,\n heads, output_dim)\n\n def _make_layer(self, planes, blocks, stride=1):\n layers = [Bottleneck(self._inplanes, planes, stride)]\n\n self._inplanes = planes * Bottleneck.expansion\n for _ in range(1, blocks):\n layers.append(Bottleneck(self._inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n def stem(x):\n for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2),\n (self.conv3, self.bn3)]:\n x = self.relu(bn(conv(x)))\n x = self.avgpool(x)\n return x\n\n x = x.type(self.conv1.weight.dtype)\n x = stem(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.attnpool(x)\n\n return x\n\n\nclass LayerNorm(nn.LayerNorm):\n \"\"\"Subclass torch's LayerNorm to handle fp16.\"\"\"\n\n def forward(self, x: torch.Tensor):\n orig_type = x.dtype\n ret = super().forward(x.type(torch.float32))\n return ret.type(orig_type)\n\n\nclass QuickGELU(nn.Module):\n def forward(self, x: torch.Tensor):\n return x * torch.sigmoid(1.702 * x)\n\n\nclass ResidualAttentionBlock(nn.Module):\n def __init__(self, d_model: int, n_head: int,\n attn_mask: torch.Tensor = None):\n super().__init__()\n\n self.attn = nn.MultiheadAttention(d_model, n_head)\n self.ln_1 = LayerNorm(d_model)\n self.mlp = nn.Sequential(OrderedDict([\n (\"c_fc\", nn.Linear(d_model, d_model * 4)),\n (\"gelu\", QuickGELU()),\n (\"c_proj\", nn.Linear(d_model * 4, d_model))\n ]))\n self.ln_2 = LayerNorm(d_model)\n self.attn_mask = attn_mask\n\n def attention(self, x: torch.Tensor):\n self.attn_mask = self.attn_mask.to(dtype=x.dtype,\n device=x.device) if self.attn_mask is not None else None\n return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[\n 0]\n\n def forward(self, x: torch.Tensor):\n x = x + self.attention(self.ln_1(x))\n x = x + self.mlp(self.ln_2(x))\n return x\n\n\nclass Transformer(nn.Module):\n def __init__(self, width: int, layers: int, heads: int,\n attn_mask: torch.Tensor = None):\n super().__init__()\n self.width = width\n self.layers = layers\n self.resblocks = nn.Sequential(\n *[ResidualAttentionBlock(width, heads, attn_mask) for _ in\n range(layers)])\n\n def forward(self, x: torch.Tensor):\n return self.resblocks(x)\n\n\nclass VisualTransformer(nn.Module):\n def __init__(self, input_resolution: int, patch_size: int, width: int,\n layers: int, heads: int, output_dim: int):\n super().__init__()\n self.input_resolution = input_resolution\n self.output_dim = output_dim\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=width,\n kernel_size=patch_size, stride=patch_size,\n bias=False)\n\n scale = width ** -0.5\n self.class_embedding = nn.Parameter(scale * torch.randn(width))\n self.positional_embedding = nn.Parameter(\n scale * torch.randn((input_resolution // patch_size) ** 2 + 1,\n width))\n self.ln_pre = LayerNorm(width)\n\n self.transformer = Transformer(width, layers, heads)\n\n self.ln_post = LayerNorm(width)\n self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n\n def forward(self, x: torch.Tensor):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n x = x.reshape(x.shape[0], x.shape[1],\n -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(\n x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x],\n dim=1) # shape = [*, grid ** 2 + 1, width]\n x = x + self.positional_embedding.to(x.dtype)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n x = self.ln_post(x[:, 0, :])\n\n if self.proj is not None:\n x = x @ self.proj\n\n return x\n\n\nclass CLIP(nn.Module):\n def __init__(self,\n embed_dim: int,\n # vision\n image_resolution: int,\n vision_layers: Union[Tuple[int, int, int, int], int],\n vision_width: int,\n vision_patch_size: int,\n # text\n context_length: int,\n vocab_size: int,\n transformer_width: int,\n transformer_heads: int,\n transformer_layers: int\n ):\n super().__init__()\n\n self.context_length = context_length\n\n if isinstance(vision_layers, (tuple, list)):\n vision_heads = vision_width * 32 // 64\n self.visual = ModifiedResNet(\n layers=vision_layers,\n output_dim=embed_dim,\n heads=vision_heads,\n input_resolution=image_resolution,\n width=vision_width\n )\n else:\n vision_heads = vision_width // 64\n self.visual = VisualTransformer(\n input_resolution=image_resolution,\n patch_size=vision_patch_size,\n width=vision_width,\n layers=vision_layers,\n heads=vision_heads,\n output_dim=embed_dim\n )\n\n self.transformer = Transformer(\n width=transformer_width,\n layers=transformer_layers,\n heads=transformer_heads,\n attn_mask=self.build_attention_mask()\n )\n\n self.vocab_size = vocab_size\n self.token_embedding = nn.Embedding(vocab_size, transformer_width)\n self.positional_embedding = nn.Parameter(\n torch.empty(self.context_length, transformer_width))\n self.ln_final = LayerNorm(transformer_width)\n\n self.text_projection = nn.Parameter(\n torch.empty(transformer_width, embed_dim))\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n\n self.initialize_parameters()\n\n def initialize_parameters(self):\n nn.init.normal_(self.token_embedding.weight, std=0.02)\n nn.init.normal_(self.positional_embedding, std=0.01)\n\n if isinstance(self.visual, ModifiedResNet):\n if self.visual.attnpool is not None:\n std = self.visual.attnpool.c_proj.in_features ** -0.5\n nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)\n nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)\n nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)\n nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)\n\n for resnet_block in [self.visual.layer1, self.visual.layer2,\n self.visual.layer3, self.visual.layer4]:\n for name, param in resnet_block.named_parameters():\n if name.endswith(\"bn3.weight\"):\n nn.init.zeros_(param)\n\n proj_std = (self.transformer.width ** -0.5) * (\n (2 * self.transformer.layers) ** -0.5)\n attn_std = self.transformer.width ** -0.5\n fc_std = (2 * self.transformer.width) ** -0.5\n for block in self.transformer.resblocks:\n nn.init.normal_(block.attn.in_proj_weight, std=attn_std)\n nn.init.normal_(block.attn.out_proj.weight, std=proj_std)\n nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)\n nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)\n\n if self.text_projection is not None:\n nn.init.normal_(self.text_projection,\n std=self.transformer.width ** -0.5)\n\n def build_attention_mask(self):\n # lazily create causal attention mask, with full attention between the vision tokens\n # pytorch uses additive attention mask; fill with -inf\n mask = torch.empty(self.context_length, self.context_length)\n mask.fill_(float(\"-inf\"))\n mask.triu_(1) # zero out the lower diagonal\n return mask\n\n @property\n def dtype(self):\n return self.visual.conv1.weight.dtype\n\n def encode_image(self, image):\n return self.visual(image.type(self.dtype))\n\n def encode_text(self, text):\n x = self.token_embedding(text).type(\n self.dtype) # [batch_size, n_ctx, d_model]\n\n x = x + self.positional_embedding.type(self.dtype)\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n x = self.ln_final(x).type(self.dtype)\n\n # x.shape = [batch_size, n_ctx, transformer.width]\n # take features from the eot embedding (eot_token is the highest number in each sequence)\n x = x[torch.arange(x.shape[0]), text.argmax(\n dim=-1)] @ self.text_projection\n\n return x\n\n def forward(self, image, text):\n image_features = self.encode_image(image)\n text_features = self.encode_text(text)\n\n # normalized features\n image_features = image_features / image_features.norm(dim=-1,\n keepdim=True)\n text_features = text_features / text_features.norm(dim=-1, keepdim=True)\n\n # cosine similarity as logits\n logit_scale = self.logit_scale.exp()\n logits_per_image = logit_scale * image_features @ text_features.t()\n logits_per_text = logit_scale * text_features @ image_features.t()\n\n # shape = [global_batch_size, global_batch_size]\n return logits_per_image, logits_per_text\n\n\ndef convert_weights(model: nn.Module):\n \"\"\"Convert applicable model parameters to fp16\"\"\"\n\n def _convert_weights_to_fp16(l):\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):\n l.weight.data = l.weight.data.half()\n if l.bias is not None:\n l.bias.data = l.bias.data.half()\n\n if isinstance(l, nn.MultiheadAttention):\n for attr in [*[f\"{s}_proj_weight\" for s in [\"in\", \"q\", \"k\", \"v\"]],\n \"in_proj_bias\", \"bias_k\", \"bias_v\"]:\n tensor = getattr(l, attr)\n if tensor is not None:\n tensor.data = tensor.data.half()\n\n for name in [\"text_projection\", \"proj\"]:\n if hasattr(l, name):\n attr = getattr(l, name)\n if attr is not None:\n attr.data = attr.data.half()\n\n model.apply(_convert_weights_to_fp16)\n\n\ndef build_model(state_dict: dict):\n vit = \"visual.proj\" in state_dict\n\n if vit:\n vision_width = state_dict[\"visual.conv1.weight\"].shape[0]\n vision_layers = len([k for k in state_dict.keys() if\n k.startswith(\"visual.\") and k.endswith(\n \".attn.in_proj_weight\")])\n vision_patch_size = state_dict[\"visual.conv1.weight\"].shape[-1]\n grid_size = round(\n (state_dict[\"visual.positional_embedding\"].shape[0] - 1) ** 0.5)\n image_resolution = vision_patch_size * grid_size\n else:\n counts: list = [len(set(k.split(\".\")[2] for k in state_dict if\n k.startswith(f\"visual.layer{b}\"))) for b in\n [1, 2, 3, 4]]\n vision_layers = tuple(counts)\n vision_width = state_dict[\"visual.layer1.0.conv1.weight\"].shape[0]\n output_width = round((state_dict[\n \"visual.attnpool.positional_embedding\"].shape[\n 0] - 1) ** 0.5)\n vision_patch_size = None\n assert output_width ** 2 + 1 == \\\n state_dict[\"visual.attnpool.positional_embedding\"].shape[0]\n image_resolution = output_width * 32\n\n embed_dim = state_dict[\"text_projection\"].shape[1]\n context_length = state_dict[\"positional_embedding\"].shape[0]\n vocab_size = state_dict[\"token_embedding.weight\"].shape[0]\n transformer_width = state_dict[\"ln_final.weight\"].shape[0]\n transformer_heads = transformer_width // 64\n transformer_layers = len(set(k.split(\".\")[2] for k in state_dict if\n k.startswith(f\"transformer.resblocks\")))\n\n model = CLIP(\n embed_dim,\n image_resolution, vision_layers, vision_width, vision_patch_size,\n context_length, vocab_size, transformer_width, transformer_heads,\n transformer_layers\n )\n\n for key in [\"input_resolution\", \"context_length\", \"vocab_size\"]:\n if key in state_dict:\n del state_dict[key]\n\n convert_weights(model)\n model.load_state_dict(state_dict)\n return model.eval()\n" ]
[ [ "torch.nn.MSELoss", "torch.nn.L1Loss" ], [ "numpy.abs", "numpy.isfinite", "numpy.eye", "numpy.atleast_1d", "numpy.atleast_2d", "torch.no_grad", "numpy.iscomplexobj", "numpy.diagonal", "numpy.trace" ], [ "torch.load", "torch.cat", "torch.zeros", "torch.nn.Embedding", "torch.ones", "torch.nn.MultiheadAttention", "torch.randn", "torch.arange", "torch.nn.Sequential", "torch.sigmoid", "numpy.log", "torch.empty", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.init.normal_", "torch.nn.init.zeros_", "torch.nn.BatchNorm2d", "torch.nn.Identity", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GZHoffie/analytics-zoo
[ "d0258aa113ffd1a5c4927376fb32b09fb0baf73c", "d0258aa113ffd1a5c4927376fb32b09fb0baf73c", "d0258aa113ffd1a5c4927376fb32b09fb0baf73c" ]
[ "pyzoo/zoo/zouwu/model/Seq2Seq.py", "pyzoo/zoo/zouwu/model/MTNet_keras.py", "pyzoo/zoo/examples/orca/automl/autoestimator/autoestimator_pytorch.py" ]
[ "#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, LSTM, Dense\nimport tensorflow.keras as keras\n\nfrom zoo.automl.model.abstract import BaseModel\nfrom zoo.automl.common.util import *\nfrom zoo.automl.common.metrics import Evaluator\n\n\nclass LSTMSeq2Seq(BaseModel):\n\n def __init__(self, check_optional_config=True, future_seq_len=2):\n \"\"\"\n Constructor of LSTM Seq2Seq model\n \"\"\"\n self.model = None\n self.past_seq_len = None\n self.future_seq_len = future_seq_len\n self.feature_num = None\n self.target_col_num = None\n self.metric = None\n self.latent_dim = None\n self.batch_size = None\n self.check_optional_config = check_optional_config\n\n def _build_train(self, mc=False, **config):\n \"\"\"\n build LSTM Seq2Seq model\n :param config:\n :return:\n \"\"\"\n super()._check_config(**config)\n self.metric = config.get('metric', 'mean_squared_error')\n self.latent_dim = config.get('latent_dim', 128)\n self.dropout = config.get('dropout', 0.2)\n self.lr = config.get('lr', 0.001)\n # for restore in continuous training\n self.batch_size = config.get('batch_size', 64)\n training = True if mc else None\n\n # Define an input sequence and process it.\n self.encoder_inputs = Input(shape=(None, self.feature_num), name=\"encoder_inputs\")\n encoder = LSTM(units=self.latent_dim,\n dropout=self.dropout,\n return_state=True,\n name=\"encoder_lstm\")\n encoder_outputs, state_h, state_c = encoder(self.encoder_inputs, training=training)\n # We discard `encoder_outputs` and only keep the states.\n self.encoder_states = [state_h, state_c]\n\n # Set up the decoder, using `encoder_states` as initial state.\n self.decoder_inputs = Input(shape=(None, self.target_col_num), name=\"decoder_inputs\")\n # We set up our decoder to return full output sequences,\n # and to return internal states as well. We don't use the\n # return states in the training model, but we will use them in inference.\n self.decoder_lstm = LSTM(self.latent_dim,\n dropout=self.dropout,\n return_sequences=True,\n return_state=True,\n name=\"decoder_lstm\")\n decoder_outputs, _, _ = self.decoder_lstm(self.decoder_inputs,\n training=training,\n initial_state=self.encoder_states)\n\n self.decoder_dense = Dense(self.target_col_num, name=\"decoder_dense\")\n decoder_outputs = self.decoder_dense(decoder_outputs)\n\n # Define the model that will turn\n # `encoder_input_data` & `decoder_input_data` into `decoder_target_data`\n self.model = Model([self.encoder_inputs, self.decoder_inputs], decoder_outputs)\n self.model.compile(loss='mse',\n metrics=[self.metric],\n optimizer=keras.optimizers.RMSprop(lr=self.lr))\n return self.model\n\n def _restore_model(self):\n self.encoder_inputs = self.model.input[0] # input_1\n encoder_outputs, state_h_enc, state_c_enc = self.model.layers[2].output # lstm_1\n self.encoder_states = [state_h_enc, state_c_enc]\n\n self.decoder_inputs = self.model.input[1] # input_2\n self.decoder_lstm = self.model.layers[3]\n\n self.decoder_dense = self.model.layers[4]\n\n def _build_inference(self, mc=False):\n training = True if mc else None\n # from our previous model - mapping encoder sequence to state vectors\n encoder_model = Model(self.encoder_inputs, self.encoder_states)\n\n # A modified version of the decoding stage that takes in predicted target inputs\n # and encoded state vectors, returning predicted target outputs and decoder state vectors.\n # We need to hang onto these state vectors to run the next step of the inference loop.\n decoder_state_input_h = Input(shape=(self.latent_dim,))\n decoder_state_input_c = Input(shape=(self.latent_dim,))\n decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\n\n decoder_outputs, state_h, state_c = self.decoder_lstm(self.decoder_inputs,\n training=training,\n initial_state=decoder_states_inputs)\n decoder_states = [state_h, state_c]\n\n decoder_outputs = self.decoder_dense(decoder_outputs)\n decoder_model = Model([self.decoder_inputs] + decoder_states_inputs,\n [decoder_outputs] + decoder_states)\n return encoder_model, decoder_model\n\n def _decode_sequence(self, input_seq, mc=False):\n encoder_model, decoder_model = self._build_inference(mc=mc)\n # Encode the input as state vectors.\n states_value = encoder_model.predict(input_seq)\n\n # Generate empty target sequence of length 1.\n target_seq = np.zeros((len(input_seq), 1, self.target_col_num))\n\n # Populate the first target sequence with end of encoding series value\n target_seq[:, 0] = input_seq[:, -1, :self.target_col_num]\n\n # Sampling loop for a batch of sequences - we will fill decoded_seq with predictions\n # (to simplify, here we assume a batch of size 1).\n\n decoded_seq = np.zeros((len(input_seq), self.future_seq_len, self.target_col_num))\n\n for i in range(self.future_seq_len):\n output, h, c = decoder_model.predict([target_seq] + states_value)\n\n decoded_seq[:, i] = output[:, 0]\n\n # Update the target sequence (of length 1).\n target_seq = np.zeros((len(input_seq), 1, self.target_col_num))\n target_seq[:, 0] = output[:, 0]\n\n # Update states\n states_value = [h, c]\n\n return decoded_seq\n\n def _get_decoder_inputs(self, x, y):\n \"\"\"\n lagged target series for teacher forcing\n decoder_input data is one timestamp ahead of y\n :param x: 3-d array in format of (sample_num, past_sequence_len, feature_num)\n :param y: 3-d array in format of (sample_num, future_sequence_len, target_col_num)\n Need to expand dimension if y is a 2-d array with one target col\n :return: 3-d array of decoder inputs\n \"\"\"\n decoder_input_data = np.zeros(y.shape)\n decoder_input_data[1:, ] = y[:-1, ]\n decoder_input_data[0, 0] = x[-1, -1, :self.target_col_num]\n decoder_input_data[0, 1:] = y[0, :-1]\n\n return decoder_input_data\n\n def _get_len(self, x, y):\n self.past_seq_len = x.shape[1]\n self.feature_num = x.shape[2]\n # self.future_seq_len = y.shape[1]\n self.target_col_num = y.shape[2]\n\n def _expand_y(self, y):\n \"\"\"\n expand dims for y.\n :param y:\n :return:\n \"\"\"\n while len(y.shape) < 3:\n y = np.expand_dims(y, axis=2)\n return y\n\n def _pre_processing(self, x, y, validation_data):\n \"\"\"\n pre_process input data.\n 1. expand dims for y and val_y\n 2. get decoder inputs for train data\n 3. get decoder inputs for validation data\n :param x: train_x\n :param y: train_y\n :param validation_data:\n :return: network input\n \"\"\"\n y = self._expand_y(y)\n self._get_len(x, y)\n decoder_input_data = self._get_decoder_inputs(x, y)\n if validation_data is not None:\n val_x, val_y = validation_data\n val_y = self._expand_y(val_y)\n val_decoder_input = self._get_decoder_inputs(val_x, val_y)\n validation_data = ([val_x, val_decoder_input], val_y)\n return x, y, decoder_input_data, validation_data\n\n def fit_eval(self, data, validation_data=None, mc=False, verbose=0, **config):\n \"\"\"\n fit for one iteration\n :param data: could be a tuple with numpy ndarray with form (x, y)\n x: 3-d array in format (no. of samples, past sequence length, 2+feature length),\n in the last dimension, the 1st col is the time index (data type needs to be numpy datetime\n type, e.g. \"datetime64\"),\n the 2nd col is the target value (data type should be numeric)\n y: 2-d numpy array in format (no. of samples, future sequence length)\n if future sequence length > 1,\n or 1-d numpy array in format (no. of samples, ) if future sequence length = 1\n :param validation_data: tuple in format (x_test,y_test), data used for validation.\n If this is specified, validation result will be the optimization target for automl.\n Otherwise, train metric will be the optimization target.\n :param config: optimization hyper parameters\n :return: the resulting metric\n \"\"\"\n x, y = data[0], data[1]\n x, y, decoder_input_data, validation_data = self._pre_processing(x, y, validation_data)\n\n # if model is not initialized, __build the model\n if self.model is None:\n self._build_train(mc=mc, **config)\n\n # batch_size = config.get('batch_size', 64)\n # lr = self.lr\n # name = \"seq2seq-batch_size-{}-epochs-{}-lr-{}-time-{}\"\\\n # .format(batch_size, epochs, lr, time())\n # tensorboard = TensorBoard(log_dir=\"logs/\" + name)\n\n hist = self.model.fit([x, decoder_input_data], y,\n validation_data=validation_data,\n batch_size=self.batch_size,\n epochs=config.get(\"epochs\", 10),\n verbose=verbose,\n # callbacks=[tensorboard]\n )\n # print(hist.history)\n\n if validation_data is None:\n # get train metrics\n # results = self.model.evaluate(x, y)\n result = hist.history.get(self.metric)[-1]\n else:\n result = hist.history.get('val_' + str(self.metric))[-1]\n return result\n\n def evaluate(self, x, y, metric=['mse']):\n \"\"\"\n Evaluate on x, y\n :param x: input\n :param y: target\n :param metric: a list of metrics in string format\n :return: a list of metric evaluation results\n \"\"\"\n y_pred = self.predict(x)\n # y = np.squeeze(y, axis=2)\n if self.target_col_num == 1:\n return [Evaluator.evaluate(m, y, y_pred) for m in metric]\n else:\n return [np.array([Evaluator.evaluate(m, y[:, i, :], y_pred[:, i, :])\n for i in range(self.future_seq_len)])\n for m in metric]\n\n def predict(self, x, mc=False):\n \"\"\"\n Prediction on x.\n :param x: input\n :return: predicted y (expected dimension = 2)\n \"\"\"\n y_pred = self._decode_sequence(x, mc=mc)\n if self.target_col_num == 1:\n y_pred = np.squeeze(y_pred, axis=2)\n return y_pred\n\n def predict_with_uncertainty(self, x, n_iter=100):\n result = np.array([self.predict(x, mc=True) for i in range(n_iter)])\n prediction = result.mean(axis=0)\n uncertainty = result.var(axis=0)\n return prediction, uncertainty\n\n def save(self, model_path, config_path):\n \"\"\"\n save model to file.\n :param model_path: the model file path to be saved to.\n :param config_path: the config file path to be saved to.\n :return:\n \"\"\"\n\n self.model.save(model_path)\n\n config_to_save = {\"past_seq_len\": self.past_seq_len,\n \"feature_num\": self.feature_num,\n \"future_seq_len\": self.future_seq_len,\n \"target_col_num\": self.target_col_num,\n \"metric\": self.metric,\n \"latent_dim\": self.latent_dim,\n \"batch_size\": self.batch_size}\n save_config(config_path, config_to_save)\n\n def restore(self, model_path, **config):\n \"\"\"\n restore model from file\n :param model_path: the model file\n :param config: the trial config\n :return: the restored model\n \"\"\"\n\n self.past_seq_len = config[\"past_seq_len\"]\n self.feature_num = config[\"feature_num\"]\n self.future_seq_len = config[\"future_seq_len\"]\n self.target_col_num = config[\"target_col_num\"]\n self.metric = config[\"metric\"]\n self.latent_dim = config[\"latent_dim\"]\n self.batch_size = config[\"batch_size\"]\n\n self.model = keras.models.load_model(model_path)\n self._restore_model()\n # self.model.load_weights(file_path)\n\n def _get_required_parameters(self):\n return {\n # 'input_shape_x',\n # 'input_shape_y',\n # 'out_units'\n }\n\n def _get_optional_parameters(self):\n return {\n 'past_seq_len'\n 'latent_dim'\n 'dropout',\n 'metric',\n 'lr',\n 'epochs',\n 'batch_size'\n }\n", "# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n# MIT License\n#\n# Copyright (c) 2018 Roland Zimmermann\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\nimport numpy as np\nimport time\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.initializers import TruncatedNormal, Constant\nimport tensorflow.keras.backend as K\n\nimport tensorflow as tf\nfrom zoo.automl.common.metrics import Evaluator\nfrom zoo.automl.model.abstract import BaseModel\nfrom zoo.automl.common.util import save_config\n\n\nclass AttentionRNNWrapper(Wrapper):\n \"\"\"\n This class is modified based on\n https://github.com/zimmerrol/keras-utility-layer-collection/blob/master/kulc/attention.py.\n The idea of the implementation is based on the paper:\n \"Effective Approaches to Attention-based Neural Machine Translation\" by Luong et al.\n This layer is an attention layer, which can be wrapped around arbitrary RNN layers.\n This way, after each time step an attention vector is calculated\n based on the current output of the LSTM and the entire input time series.\n This attention vector is then used as a weight vector to choose special values\n from the input data. This data is then finally concatenated to the next input time step's\n data. On this a linear transformation in the same space as the input data's space\n is performed before the data is fed into the RNN cell again.\n This technique is similar to the input-feeding method described in the paper cited\n \"\"\"\n\n def __init__(self, layer, weight_initializer=\"glorot_uniform\", **kwargs):\n assert isinstance(layer, RNN)\n self.layer = layer\n self.supports_masking = True\n self.weight_initializer = weight_initializer\n\n super(AttentionRNNWrapper, self).__init__(layer, **kwargs)\n\n def _validate_input_shape(self, input_shape):\n if len(input_shape) != 3:\n raise ValueError(\n \"Layer received an input with shape {0} but expected a Tensor of rank 3.\".format(\n input_shape[0]))\n\n def build(self, input_shape):\n self._validate_input_shape(input_shape)\n\n self.input_spec = InputSpec(shape=input_shape)\n\n if not self.layer.built:\n self.layer.build(input_shape)\n self.layer.built = True\n\n input_dim = input_shape[-1]\n\n if self.layer.return_sequences:\n output_dim = self.layer.compute_output_shape(input_shape)[0][-1]\n else:\n output_dim = self.layer.compute_output_shape(input_shape)[-1]\n\n input_dim = input_dim.value\n output_dim = output_dim.value\n\n self._W1 = self.add_weight(shape=(input_dim, input_dim), name=\"{}_W1\".format(self.name),\n initializer=self.weight_initializer)\n self._W2 = self.add_weight(shape=(output_dim, input_dim), name=\"{}_W2\".format(self.name),\n initializer=self.weight_initializer)\n self._W3 = self.add_weight(shape=(2 * input_dim, input_dim), name=\"{}_W3\".format(self.name),\n initializer=self.weight_initializer)\n self._b2 = self.add_weight(shape=(input_dim,), name=\"{}_b2\".format(self.name),\n initializer=self.weight_initializer)\n self._b3 = self.add_weight(shape=(input_dim,), name=\"{}_b3\".format(self.name),\n initializer=self.weight_initializer)\n self._V = self.add_weight(shape=(input_dim, 1), name=\"{}_V\".format(self.name),\n initializer=self.weight_initializer)\n\n super(AttentionRNNWrapper, self).build()\n\n def compute_output_shape(self, input_shape):\n self._validate_input_shape(input_shape)\n\n return self.layer.compute_output_shape(input_shape)\n\n @property\n def trainable_weights(self):\n return self._trainable_weights + self.layer.trainable_weights\n\n @property\n def non_trainable_weights(self):\n return self._non_trainable_weights + self.layer.non_trainable_weights\n\n def step(self, x, states):\n h = states[1]\n # states[1] necessary?\n\n # equals K.dot(X, self._W1) + self._b2 with X.shape=[bs, T, input_dim]\n total_x_prod = states[-1]\n # comes from the constants (equals the input sequence)\n X = states[-2]\n\n # expand dims to add the vector which is only valid for this time step\n # to total_x_prod which is valid for all time steps\n hw = K.expand_dims(K.dot(h, self._W2), 1)\n additive_atn = total_x_prod + hw\n attention = K.softmax(K.dot(additive_atn, self._V), axis=1)\n x_weighted = K.sum(attention * X, [1])\n\n x = K.dot(K.concatenate([x, x_weighted], 1), self._W3) + self._b3\n\n h, new_states = self.layer.cell.call(x, states[:-2])\n\n return h, new_states\n\n def call(self, x, constants=None, mask=None, initial_state=None):\n # input shape: (n_samples, time (padded with zeros), input_dim)\n input_shape = self.input_spec.shape\n\n if self.layer.stateful:\n initial_states = self.layer.states\n elif initial_state is not None:\n initial_states = initial_state\n if not isinstance(initial_states, (list, tuple)):\n initial_states = [initial_states]\n\n base_initial_state = self.layer.get_initial_state(x)\n if len(base_initial_state) != len(initial_states):\n raise ValueError(\n \"initial_state does not have the correct length. Received length {0} \"\n \"but expected {1}\".format(len(initial_states), len(base_initial_state)))\n else:\n # check the state' shape\n for i in range(len(initial_states)):\n # initial_states[i][j] != base_initial_state[i][j]:\n if not initial_states[i].shape.is_compatible_with(base_initial_state[i].shape):\n raise ValueError(\n \"initial_state does not match the default base state of the layer. \"\n \"Received {0} but expected {1}\".format(\n [x.shape for x in initial_states],\n [x.shape for x in base_initial_state]))\n else:\n initial_states = self.layer.get_initial_state(x)\n\n # print(initial_states)\n\n if not constants:\n constants = []\n\n constants += self.get_constants(x)\n\n last_output, outputs, states = K.rnn(\n self.step,\n x,\n initial_states,\n go_backwards=self.layer.go_backwards,\n mask=mask,\n constants=constants,\n unroll=self.layer.unroll,\n input_length=input_shape[1]\n )\n\n if self.layer.stateful:\n self.updates = []\n for i in range(len(states)):\n self.updates.append((self.layer.states[i], states[i]))\n\n if self.layer.return_sequences:\n output = outputs\n else:\n output = last_output\n\n # Properly set learning phase\n if getattr(last_output, '_uses_learning_phase', False):\n output._uses_learning_phase = True\n for state in states:\n state._uses_learning_phase = True\n\n if self.layer.return_state:\n if not isinstance(states, (list, tuple)):\n states = [states]\n else:\n states = list(states)\n return [output] + states\n else:\n return output\n\n def get_constants(self, x):\n # add constants to speed up calculation\n constants = [x, K.dot(x, self._W1) + self._b2]\n\n return constants\n\n def get_config(self):\n config = {'weight_initializer': self.weight_initializer}\n base_config = super(AttentionRNNWrapper, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass MTNetKeras(BaseModel):\n\n def __init__(self, check_optional_config=False, future_seq_len=1):\n\n \"\"\"\n Constructor of MTNet model\n \"\"\"\n self.check_optional_config = check_optional_config\n self.config = None\n # config parameter\n self.time_step = None # timestep\n self.cnn_height = None # convolution window size (convolution filter height)` ?\n self.long_num = None # the number of the long-term memory series\n self.ar_window = None # the window size of ar model\n self.feature_num = None # input's variable dimension (convolution filter width)\n self.output_dim = None # output's variable dimension\n self.cnn_hid_size = None\n # last size is equal to en_conv_hidden_size, should be a list\n self.rnn_hid_sizes = None\n self.last_rnn_size = None\n self.cnn_dropout = None\n self.rnn_dropout = None\n self.lr = None\n self.batch_size = None\n self.loss = None\n\n self.saved_configs = {\"cnn_height\", \"long_num\", \"time_step\", \"ar_window\",\n \"cnn_hid_size\", \"rnn_hid_sizes\", \"cnn_dropout\",\n \"rnn_dropout\", \"lr\", \"batch_size\",\n \"epochs\", \"metrics\", \"mc\",\n \"feature_num\", \"output_dim\", \"loss\"}\n self.model = None\n self.metrics = None\n self.mc = None\n self.epochs = None\n\n def apply_config(self, rs=False, config=None):\n super()._check_config(**config)\n if rs:\n config_names = set(config.keys())\n assert config_names.issuperset(self.saved_configs)\n # assert config_names.issuperset(self.lr_decay_configs) or \\\n # config_names.issuperset(self.lr_configs)\n self.epochs = config.get(\"epochs\")\n self.metrics = config.get(\"metrics\", [\"mean_squared_error\"])\n self.mc = config.get(\"mc\")\n self.feature_num = config[\"feature_num\"]\n self.output_dim = config[\"output_dim\"]\n self.time_step = config.get(\"time_step\", 1)\n self.long_num = config.get(\"long_num\", 7)\n self.ar_window = config.get(\"ar_window\", 1)\n self.cnn_height = config.get(\"cnn_height\", 1)\n self.cnn_hid_size = config.get(\"cnn_hid_size\", 32)\n self.rnn_hid_sizes = config.get(\"rnn_hid_sizes\", [16, 32])\n self.last_rnn_size = self.rnn_hid_sizes[-1]\n self.rnn_dropout = config.get(\"rnn_dropout\", 0.2)\n self.cnn_dropout = config.get(\"cnn_dropout\", 0.2)\n self.loss = config.get('loss', \"mae\")\n self.batch_size = config.get(\"batch_size\", 64)\n self.lr = config.get('lr', 0.001)\n self._check_configs()\n\n def _check_configs(self):\n assert self.time_step >= 1, \\\n \"Invalid configuration value. 'time_step' must be larger than 1\"\n assert self.time_step >= self.ar_window, \\\n \"Invalid configuration value. 'ar_window' must not exceed 'time_step'\"\n assert isinstance(self.rnn_hid_sizes, list), \\\n \"Invalid configuration value. 'rnn_hid_sizes' must be a list of integers\"\n # assert self.cnn_hid_size == self.last_rnn_size,\\\n # \"Invalid configuration value. 'cnn_hid_size' must be equal to the last element of \" \\\n # \"'rnn_hid_sizes'\"\n\n def build(self):\n \"\"\"\n build MTNet model\n :param config:\n :return:\n \"\"\"\n training = True if self.mc else None\n # long-term time series historical data inputs\n long_input = Input(shape=(self.long_num, self.time_step, self.feature_num))\n # short-term time series historical data\n short_input = Input(shape=(self.time_step, self.feature_num))\n\n # ------- no-linear component----------------\n # memory and context : (batch, long_num, last_rnn_size)\n memory = self.__encoder(long_input, num=self.long_num, name='memory', training=training)\n # memory = memory_model(long_input)\n context = self.__encoder(long_input, num=self.long_num, name='context', training=training)\n # context = context_model(long_input)\n # query: (batch, 1, last_rnn_size)\n query_input = Reshape((1, self.time_step, self.feature_num),\n name='reshape_query')(short_input)\n query = self.__encoder(query_input, num=1, name='query', training=training)\n # query = query_model(query_input)\n\n # prob = memory * query.T, shape is (long_num, 1)\n query_t = Permute((2, 1))(query)\n prob = Lambda(lambda xy: tf.matmul(xy[0], xy[1]))([memory, query_t])\n prob = Softmax(axis=-1)(prob)\n # out is of the same shape of context: (batch, long_num, last_rnn_size)\n out = multiply([context, prob])\n # concat: (batch, long_num + 1, last_rnn_size)\n\n pred_x = concatenate([out, query], axis=1)\n reshaped_pred_x = Reshape((self.last_rnn_size * (self.long_num + 1),),\n name=\"reshape_pred_x\")(pred_x)\n nonlinear_pred = Dense(units=self.output_dim,\n kernel_initializer=TruncatedNormal(stddev=0.1),\n bias_initializer=Constant(0.1),)(reshaped_pred_x)\n\n # ------------ ar component ------------\n if self.ar_window > 0:\n ar_pred_x = Reshape((self.ar_window * self.feature_num,),\n name=\"reshape_ar\")(short_input[:, -self.ar_window:])\n linear_pred = Dense(units=self.output_dim,\n kernel_initializer=TruncatedNormal(stddev=0.1),\n bias_initializer=Constant(0.1),)(ar_pred_x)\n else:\n linear_pred = 0\n y_pred = Add()([nonlinear_pred, linear_pred])\n self.model = Model(inputs=[long_input, short_input], outputs=y_pred)\n # lr decay\n # def lr_scheduler(epoch, r):\n # max_lr = 0.03\n # min_lr = 0.0001\n # lr = min_lr + (max_lr - min_lr) * math.exp(-epoch / 60)\n # return lr\n # callbacks = [tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1)]\n # initial_lr = 0.003\n # rate = math.exp(-1 / 60)\n # lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n # initial_lr,\n # decay_steps=249,\n # decay_rate=rate,\n # staircase=True\n # )\n #\n # self.model.compile(loss=\"mae\",\n # metrics=metrics,\n # optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule))\n\n self.model.compile(loss=self.loss,\n metrics=self.metrics,\n optimizer=tf.keras.optimizers.Adam(lr=self.lr))\n\n return self.model\n\n def __encoder(self, input, num, name='Encoder', training=None):\n \"\"\"\n Treat batch_size dimension and num dimension as one batch_size dimension\n (batch_size * num).\n :param input: <batch_size, num, time_step, input_dim>\n :param num: the number of input time series data. For short term data, the num is 1.\n :return: the embedded of the input <batch_size, num, last_rnn_hid_size>\n \"\"\"\n # input = Input(shape=(num, self.time_step, self.feature_num))\n batch_size_new = self.batch_size * num\n Tc = self.time_step - self.cnn_height + 1\n\n # CNN\n # reshaped input: (batch_size_new, time_step, feature_num, 1)\n reshaped_input = Lambda(lambda x:\n K.reshape(x, (-1, self.time_step, self.feature_num, 1),),\n name=name+'reshape_cnn')(input)\n # output: <batch_size_new, conv_out, 1, en_conv_hidden_size>\n cnn_out = Conv2D(filters=self.cnn_hid_size,\n kernel_size=(self.cnn_height, self.feature_num),\n padding=\"valid\",\n kernel_initializer=TruncatedNormal(stddev=0.1),\n bias_initializer=Constant(0.1),\n activation=\"relu\")(reshaped_input)\n cnn_out = Dropout(self.cnn_dropout)(cnn_out, training=training)\n\n rnn_input = Lambda(lambda x:\n K.reshape(x, (-1, num, Tc, self.cnn_hid_size)),)(cnn_out)\n\n # use AttentionRNNWrapper\n rnn_cells = [GRUCell(h_size, activation=\"relu\", dropout=self.rnn_dropout)\n for h_size in self.rnn_hid_sizes]\n\n attention_rnn = AttentionRNNWrapper(RNN(rnn_cells),\n weight_initializer=TruncatedNormal(stddev=0.1))\n\n outputs = []\n for i in range(num):\n input_i = rnn_input[:, i]\n # input_i = (batch, conv_hid_size, Tc)\n input_i = Permute((2, 1), input_shape=[Tc, self.cnn_hid_size])(input_i)\n # output = (batch, last_rnn_hid_size)\n output_i = attention_rnn(input_i, training=training)\n # output = (batch, 1, last_rnn_hid_size)\n output_i = Reshape((1, -1))(output_i)\n outputs.append(output_i)\n if len(outputs) > 1:\n output = Lambda(lambda x: concatenate(x, axis=1))(outputs)\n else:\n output = outputs[0]\n return output\n\n def _reshape_input_x(self, x):\n long_term = np.reshape(x[:, : self.time_step * self.long_num],\n [-1, self.long_num, self.time_step, x.shape[-1]])\n short_term = np.reshape(x[:, self.time_step * self.long_num:],\n [-1, self.time_step, x.shape[-1]])\n return long_term, short_term\n\n def _pre_processing(self, x, validation_data=None):\n long_term, short_term = self._reshape_input_x(x)\n if validation_data:\n val_x, val_y = validation_data\n long_val, short_val = self._reshape_input_x(val_x)\n validation_data = ([long_val, short_val], val_y)\n return [long_term, short_term], validation_data\n\n def _add_config_attributes(self, config, **new_attributes):\n # new_attributes are among [\"metrics\", \"epochs\", \"mc\", \"feature_num\", \"output_dim\"]\n if self.config is None:\n self.config = config\n else:\n if config:\n raise ValueError(\"You can only pass new configuations for 'mc', 'epochs' and \"\n \"'metrics' during incremental fitting. \"\n \"Additional configs passed are {}\".format(config))\n\n if new_attributes[\"metrics\"] is None:\n del new_attributes[\"metrics\"]\n self.config.update(new_attributes)\n\n def _check_input(self, x, y):\n input_feature_num = x.shape[-1]\n input_output_dim = y.shape[-1]\n if input_feature_num is None:\n raise ValueError(\"input x is None!\")\n if input_output_dim is None:\n raise ValueError(\"input y is None!\")\n\n if self.feature_num is not None and self.feature_num != input_feature_num:\n raise ValueError(\"input x has different feature number (the shape of last dimension) \"\n \"{} with the fitted model, which is {}.\"\n .format(input_feature_num, self.feature_num))\n if self.output_dim is not None and self.output_dim != input_output_dim:\n raise ValueError(\"input y has different prediction size (the shape of last dimension) \"\n \"of {} with the fitted model, which is {}.\"\n .format(input_output_dim, self.output_dim))\n return input_feature_num, input_output_dim\n\n def fit_eval(self, data, validation_data=None, mc=False, metrics=None,\n epochs=10, verbose=0, **config):\n x, y = data[0], data[1]\n feature_num, output_dim = self._check_input(x, y)\n self._add_config_attributes(config, epochs=epochs, mc=mc, metrics=metrics,\n feature_num=feature_num, output_dim=output_dim)\n self.apply_config(config=self.config)\n processed_x, processed_validation_data = self._pre_processing(x, validation_data)\n\n # if model is not initialized, __build the model\n if self.model is None:\n st = time.time()\n self.build()\n end = time.time()\n if verbose == 1:\n print(\"Build model took {}s\".format(end - st))\n\n st = time.time()\n hist = self.model.fit(processed_x, y, validation_data=processed_validation_data,\n batch_size=self.batch_size,\n epochs=self.epochs,\n verbose=verbose)\n\n if verbose == 1:\n print(\"Fit model took {}s\".format(time.time() - st))\n if validation_data is None:\n # get train metrics\n # results = self.model.evaluate(x, y)\n result = hist.history.get(self.metrics[0])[-1]\n else:\n result = hist.history.get('val_' + str(self.metrics[0]))[-1]\n return result\n\n def evaluate(self, x, y, metrics=['mse']):\n \"\"\"\n Evaluate on x, y\n :param x: input\n :param y: target\n :param metric: a list of metrics in string format\n :return: a list of metric evaluation results\n \"\"\"\n y_pred = self.predict(x)\n if y_pred.shape[1] == 1:\n multioutput = 'uniform_average'\n else:\n multioutput = 'raw_values'\n # y = np.squeeze(y, axis=2)\n return [Evaluator.evaluate(m, y, y_pred, multioutput=multioutput) for m in metrics]\n\n def predict(self, x, mc=False):\n input_x = self._reshape_input_x(x)\n return self.model.predict(input_x)\n\n def predict_with_uncertainty(self, x, n_iter=100):\n result = np.zeros((n_iter,) + (x.shape[0], self.output_dim))\n\n for i in range(n_iter):\n result[i, :, :] = self.predict(x, mc=True)\n\n prediction = result.mean(axis=0)\n uncertainty = result.std(axis=0)\n return prediction, uncertainty\n\n def save(self, model_path, config_path):\n self.model.save_weights(model_path)\n config_to_save = {\"cnn_height\": self.cnn_height,\n \"long_num\": self.long_num,\n \"time_step\": self.time_step,\n \"ar_window\": self.ar_window,\n \"cnn_hid_size\": self.cnn_hid_size,\n \"rnn_hid_sizes\": self.rnn_hid_sizes,\n \"cnn_dropout\": self.cnn_dropout,\n \"rnn_dropout\": self.rnn_dropout,\n \"lr\": self.lr,\n \"batch_size\": self.batch_size,\n # for fit eval\n \"epochs\": self.epochs,\n # todo: can not serialize metrics unless all elements are str\n \"metrics\": self.metrics,\n \"mc\": self.mc,\n \"feature_num\": self.feature_num,\n \"output_dim\": self.output_dim,\n \"loss\": self.loss\n }\n assert set(config_to_save.keys()) == self.saved_configs, \\\n \"The keys in config_to_save is not the same as self.saved_configs.\" \\\n \"Please keep them consistent\"\n # if self.decay_epochs > 0:\n # lr_decay_configs = {\"min_lr\": self.min_lr,\n # \"max_lr\": self.max_lr}\n # assert set(lr_decay_configs.keys()) == self.lr_decay_configs, \\\n # \"The keys in lr_decay_configs is not the same as self.lr_decay_configs.\" \\\n # \"Please keep them consistent\"\n # config_to_save.update(lr_decay_configs)\n # else:\n # lr_configs = {\"lr\": self.lr_value}\n # assert set(lr_configs.keys()) == self.lr_configs, \\\n # \"The keys in lr_configs is not the same as self.lr_configs.\" \\\n # \"Please keep them consistent\"\n # config_to_save.update(lr_configs)\n\n save_config(config_path, config_to_save)\n\n def restore(self, model_path, **config):\n \"\"\"\n restore model from file\n :param model_path: the model file\n :param config: the trial config\n \"\"\"\n self.config = config\n self.apply_config(rs=True, config=config)\n self.build()\n self.model.load_weights(model_path)\n\n def _get_optional_parameters(self):\n return {\n \"batch_size\",\n \"cnn_dropout\",\n \"rnn_dropout\",\n \"time_step\",\n \"cnn_height\",\n \"long_num\",\n \"ar_size\",\n \"loss\",\n \"cnn_hid_size\",\n \"rnn_hid_sizes\",\n \"lr\"\n }\n\n def _get_required_parameters(self):\n return {\n \"feature_num\",\n \"output_dim\"\n }\n", "#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport os\nfrom unittest import TestCase\n\nimport numpy as np\nimport pytest\nimport argparse\n\nimport torch\nimport torch.nn as nn\nfrom zoo.orca.automl.auto_estimator import AutoEstimator\nfrom zoo.automl.recipe.base import Recipe\nfrom zoo.orca.automl.pytorch_utils import LR_NAME\nfrom zoo.orca import init_orca_context, stop_orca_context\nfrom abc import abstractmethod\n\n\nclass Net(nn.Module):\n def __init__(self, dropout, fc1_size, fc2_size):\n super().__init__()\n self.fc1 = nn.Linear(50, fc1_size)\n self.relu1 = nn.ReLU()\n self.dout = nn.Dropout(dropout)\n self.fc2 = nn.Linear(fc1_size, fc2_size)\n self.prelu = nn.PReLU(1)\n self.out = nn.Linear(fc2_size, 1)\n self.out_act = nn.Sigmoid()\n\n def forward(self, input_):\n a1 = self.fc1(input_)\n h1 = self.relu1(a1)\n dout = self.dout(h1)\n a2 = self.fc2(dout)\n h2 = self.prelu(a2)\n a3 = self.out(h2)\n y = self.out_act(a3)\n return y\n\n\ndef model_creator(config):\n return Net(dropout=config[\"dropout\"],\n fc1_size=config[\"fc1_size\"],\n fc2_size=config[\"fc2_size\"])\n\n\ndef get_optimizer(model, config):\n return torch.optim.SGD(model.parameters(), lr=config[\"lr\"])\n\n\ndef get_train_val_data():\n def get_x_y(size):\n input_size = 50\n x1 = np.random.randn(size // 2, input_size)\n x2 = np.random.randn(size // 2, input_size) + 1.5\n x = np.concatenate([x1, x2], axis=0)\n y1 = np.zeros((size // 2, 1))\n y2 = np.ones((size // 2, 1))\n y = np.concatenate([y1, y2], axis=0)\n return x, y\n\n train_data = get_x_y(size=1000)\n val_data = get_x_y(size=400)\n return train_data, val_data\n\n\nclass LinearRecipe(Recipe):\n def __init__(self, training_iteration, num_samples):\n super().__init__()\n self.training_iteration = training_iteration\n self.num_samples = num_samples\n\n def search_space(self):\n from zoo.orca.automl import hp\n return {\n \"dropout\": hp.uniform(0.2, 0.3),\n \"fc1_size\": hp.choice([50, 64]),\n \"fc2_size\": hp.choice([100, 128]),\n LR_NAME: hp.choice([0.001, 0.003, 0.01]),\n \"batch_size\": hp.choice([32, 64])\n }\n\n\ndef train_example(args):\n auto_est = AutoEstimator.from_torch(\n model_creator=model_creator,\n optimizer=\"Adam\",\n loss=\"BCELoss\",\n logs_dir=\"/tmp/zoo_automl_logs\",\n resources_per_trial={\"cpu\": args.cpus_per_trial},\n name=\"test_fit\")\n train_data, val_data = get_train_val_data()\n recipe = LinearRecipe(training_iteration=args.epochs,\n num_samples=args.trials)\n auto_est.fit(data=train_data,\n validation_data=val_data,\n recipe=recipe,\n metric=\"accuracy\")\n # Choose the best model\n best_model = auto_est.get_best_model()\n best_model_accuracy = best_model.evaluate(x=val_data[0],\n y=val_data[1],\n metrics=['accuracy'])\n print(f'model accuracy is {best_model_accuracy[0]}')\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n prog='Autoestimator_pytorch',\n description='Automatically fit the model and return the best model.')\n parser.add_argument('--cluster_mode',\n type=str,\n default=\"local\",\n help='The mode for the Spark cluster.')\n parser.add_argument(\n \"--num_nodes\",\n type=int,\n default=1,\n help=\"The number of nodes to be used in the cluster. \"\n \"You can change it depending on your own cluster setting.\")\n parser.add_argument(\n \"--cores\",\n type=int,\n default=4,\n help=\"The number of cpu cores you want to use on each node. \"\n \"You can change it depending on your own cluster setting.\")\n parser.add_argument(\n \"--memory\",\n type=str,\n default=\"10g\",\n help=\"The memory you want to use on each node. \"\n \"You can change it depending on your own cluster setting.\")\n parser.add_argument(\"--workers_per_node\",\n type=int,\n default=2,\n help=\"The number of workers to run on each node\")\n parser.add_argument('--k8s_master',\n type=str,\n default=\"\",\n help=\"The k8s master. \"\n \"It should be k8s://https://<k8s-apiserver-host>: \"\n \"<k8s-apiserver-port>.\")\n parser.add_argument(\"--container_image\",\n type=str,\n default=\"\",\n help=\"The runtime k8s image. \"\n \"You can change it with your k8s image.\")\n parser.add_argument('--k8s_driver_host',\n type=str,\n default=\"\",\n help=\"The k8s driver localhost. \")\n parser.add_argument('--k8s_driver_port',\n type=str,\n default=\"\",\n help=\"The k8s driver port.\")\n parser.add_argument(\n '--cpus_per_trial',\n type=int,\n default=2,\n help=\"The number of cores you want to allocate for each trial.\")\n parser.add_argument('--epochs',\n type=int,\n default=1,\n help=\"The number of epochs in each trial.\")\n parser.add_argument('--trials',\n type=int,\n default=4,\n help=\"The number of searching trials.\")\n\n args = parser.parse_args()\n if args.cluster_mode == \"local\":\n init_orca_context(cluster_mode=\"local\",\n cores=args.cores,\n num_nodes=args.num_nodes,\n memory=args.memory,\n init_ray_on_spark=True)\n elif args.cluster_mode == \"yarn\":\n init_orca_context(cluster_mode=\"yarn-client\",\n cores=args.cores,\n memory=args.memory,\n init_ray_on_spark=True)\n elif args.cluster_mode == \"k8s\":\n if not args.k8s_master or not args.container_image \\\n or not args.k8s_driver_host or not args.k8s_driver_port:\n parser.print_help()\n parser.error('k8s_master, container_image,'\n 'k8s_driver_host/port are required not to be empty')\n init_orca_context(cluster_mode=\"k8s\",\n master=args.k8s_master,\n container_image=args.container_image,\n cores=args.cores,\n init_ray_on_spark=True,\n conf={\n \"spark.driver.host\": args.k8s_driver_host,\n \"spark.driver.port\": args.k8s_driver_port\n })\n\n train_example(args)\n stop_orca_context()\n" ]
[ [ "tensorflow.keras.models.load_model", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "tensorflow.keras.optimizers.RMSprop", "tensorflow.keras.layers.LSTM", "tensorflow.keras.layers.Input" ], [ "tensorflow.keras.initializers.Constant", "tensorflow.matmul", "tensorflow.keras.models.Model", "numpy.reshape", "tensorflow.keras.backend.dot", "tensorflow.keras.backend.sum", "tensorflow.keras.backend.concatenate", "tensorflow.keras.backend.rnn", "tensorflow.keras.backend.reshape", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.initializers.TruncatedNormal", "numpy.zeros" ], [ "torch.nn.Dropout", "torch.nn.PReLU", "torch.nn.Sigmoid", "numpy.concatenate", "torch.nn.Linear", "numpy.ones", "numpy.random.randn", "torch.nn.ReLU", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ILoveRedEd55/AIML_Detection_System
[ "b2fdd8475f069884060f7bb31f41953bae057d7b", "b2fdd8475f069884060f7bb31f41953bae057d7b" ]
[ "lib/src/layers/RNN.py", "src/net/ViolenceDetector.py" ]
[ "from src.layers.LayerHelper import *\nfrom settings import LayerSettings as layerSettings\nimport tensorflow as tf\nimport os\nCUDA_VISIBLE_DEVICES=0\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\" # set gpu number\n\ndef LSTM(name_, inputTensor_, numberOfOutputs_, isTraining_, dropoutProb_=None):\n\twith tf.name_scope(name_):\n\t\tcell = tf.nn.rnn_cell.LSTMCell(num_units=numberOfOutputs_,\n\t\t\t\t\t\t use_peepholes=True,\n\t\t\t\t\t\t initializer=layerSettings.LSTM_INITIALIZER,\n\t\t\t\t\t\t forget_bias=1.0,\n\t\t\t\t\t\t state_is_tuple=True,\n\t\t\t\t\t\t activation=tf.nn.tanh,\n\t\t\t\t\t\t name=name_+\"_cell\")\n\n\t\tif dropoutProb_ != None:\n\t\t\tdropoutProbTensor = tf.cond(isTraining_, lambda: 0.5, lambda: 1.0)\n\t\t\tcell = tf.nn.rnn_cell.DropoutWrapper(cell,\n\t\t\t\t\t\t\t input_keep_prob=dropoutProbTensor,\n\t\t\t\t\t\t\t output_keep_prob=dropoutProbTensor)\n\n\t\tstatePlaceHolder = tf.nn.rnn_cell.LSTMStateTuple( tf.placeholder(layerSettings.FLOAT_TYPE, [None, numberOfOutputs_]),\n\t\t\t\t\t\t\t\t tf.placeholder(layerSettings.FLOAT_TYPE, [None, numberOfOutputs_]) )\n\n\t\toutputTensor, stateTensor = tf.nn.dynamic_rnn(\tcell=cell,\n\t\t\t\t\t\t\t\tinitial_state=statePlaceHolder,\n\t\t\t\t\t\t\t\tinputs=inputTensor_)\n\n\t\t# Add Regularization Loss\n\t\tfor eachVariable in tf.trainable_variables():\n\t\t\tif name_ in eachVariable.name:\n\t\t\t\tif ('bias' not in eachVariable.name)and(layerSettings.REGULARIZER_WEIGHTS_DECAY != None):\n\t\t\t\t\tregularizationLoss = L2_Regularizer(eachVariable)\n\t\t\t\t\ttf.losses.add_loss(regularizationLoss, loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES)\n\t\t\t\t\t\n\n\treturn outputTensor, stateTensor, statePlaceHolder\n\n", "import tensorflow as tf\nimport os\nCUDA_VISIBLE_DEVICES=0\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\" # set gpu number\nimport numpy as np\nfrom net import DeploySettings as deploySettings\nfrom net import DataSettings as dataSettings\nfrom net import NetSettings as netSettings\n\nclass OutputSmoother:\n\tdef __init__(self):\n\t\tself._previousPrediction = False\n\t\tself._previousOutput = False\n\t\tself._countOfNeighborResult = 0\n\n\tdef Smooth(self, isFighting_):\n\t\tif isFighting_ != self._previousPrediction:\n\t\t\tself._countOfNeighborResult = 1\n\t\t\tself._previousPrediction = isFighting_\n\n\t\telif isFighting_ == self._previousPrediction:\n\t\t\tself._countOfNeighborResult += 1\n\t\t\tif self._countOfNeighborResult >= deploySettings.CHANGE_JUDGEMENT_THRESHOLD:\n\t\t\t\tself._previousOutput = isFighting_\n\t\t\t\n\n\t\treturn self._previousOutput\n\n\nclass ViolenceDetector:\n\tdef __init__(self):\n\t\t# PlaceHolders\n\t\tself._inputPlaceholder = tf.placeholder(dtype=dataSettings.FLOAT_TYPE,\n\t\t\t\t\t\t\tshape=[\t1, 1, dataSettings.GROUPED_SIZE,\n\t\t\t\t\t\t\t\tdataSettings.IMAGE_SIZE,\n\t\t\t\t\t\t\t\tdataSettings.IMAGE_SIZE,\n\t\t\t\t\t\t\t\tdataSettings.IMAGE_CHANNELS] )\n\t\tself._batchSizePlaceholder = tf.placeholder(tf.int32)\n\t\tself._unrolledSizePlaceholder = tf.placeholder(tf.int32)\n\t\tself._isTrainingPlaceholder = tf.placeholder(tf.bool)\n\t\tself._trainingStepPlaceholder = tf.placeholder(tf.int64)\n\n\t\t# Previous Frames Holder\n\t\tself._listOfPreviousFrames = []\n\t\tself._groupedInput = None\n\n\t\t# Net\n\t\tself._net = netSettings.GetNetwork(\tself._inputPlaceholder,\n\t\t\t\t\t\t\tself._batchSizePlaceholder,\n\t\t\t\t\t\t\tself._unrolledSizePlaceholder,\n\t\t\t\t\t\t\tself._isTrainingPlaceholder,\n\t\t\t\t\t\t\tself._trainingStepPlaceholder)\n\t\tself._net.Build()\n\t\tself._predictionOp = tf.nn.softmax(self._net.logitsOp, axis=-1, name=\"tf.nn.softmax\")\n\t\tself._listOfPreviousCellState = None\n\n\t\t# Session\n\t\tself.session = tf.Session()\n\t\tinit = tf.global_variables_initializer()\n\t\tself.session.run(init)\n\t\tself._recoverModelFromCheckpoints()\n\n\t\t# Output\n\t\tself._unsmoothedResults = []\n\t\tself._outputSmoother = OutputSmoother()\n\n\tdef __del__(self):\n\t\tself.session.close()\n\n\tdef Detect(self, netInputImage_):\n\t\t'''\n\t\t The argument 'netInputImage_' should be shape of:\n\t\t [dataSettings.IMAGE_SIZE, dataSettings.IMAGE_SIZE, dataSettings.IMAGE_CHANNELS]\n\t\t And the value of each pixel should be in the range of [-1, 1].\n\t\t Note, if you use OpenCV to read images or videos, you should convert the Color from\n\t\t BGR to RGB. Moreover, the value should also be converted from [0, 255] to [-1, 1].\n\t\t'''\n\t\tif dataSettings.GROUPED_SIZE == 1:\n\t\t\tself._groupedInput = netInputImage_.reshape(self._inputPlaceholder.shape)\n\n\t\telse:\n\t\t\tself._updateGroupedInputImages(netInputImage_)\n\n\t\tinputFeedDict = { self._inputPlaceholder : self._groupedInput,\n\t\t\t\t self._batchSizePlaceholder : 1,\n\t\t\t\t self._unrolledSizePlaceholder : 1,\n\t\t\t\t self._isTrainingPlaceholder : False,\n\t\t\t\t self._trainingStepPlaceholder : 0 }\n\t\tcellStateFeedDict = self._net.GetFeedDictOfLSTM(1, self._listOfPreviousCellState)\n\n\t\tinputFeedDict.update(cellStateFeedDict)\n\n\t\ttupleOfOutputs = self.session.run( [self._predictionOp] + self._net.GetListOfStatesTensorInLSTMs(),\n\t\t\t \t\t\t feed_dict = inputFeedDict )\n\t\tlistOfOutputs = list(tupleOfOutputs)\n\t\tprediction = listOfOutputs.pop(0)\n\t\tself._listOfPreviousCellState = listOfOutputs\n\n\t\tisFighting = np.equal(np.argmax(prediction), np.argmax(dataSettings.FIGHT_LABEL))\n\t\tself._unsmoothedResults.append(isFighting)\n\n\t\tsmoothedOutput = self._outputSmoother.Smooth(isFighting)\n\n\t\treturn smoothedOutput\n\n\t@property\n\tdef unsmoothedResults(self):\n\t\treturn self._unsmoothedResults\n\n\tdef _updateGroupedInputImages(self, newInputImage_):\n\t\tif len(self._listOfPreviousFrames) == dataSettings.GROUPED_SIZE:\n\t\t\t# Abandon the unsed frame\n\t\t\tself._listOfPreviousFrames.pop(0)\n\t\t\tself._listOfPreviousFrames.append(newInputImage_)\n\n\t\telse:\n\t\t\tblackFrame = np.full( shape=[dataSettings.IMAGE_SIZE, dataSettings.IMAGE_SIZE, dataSettings.IMAGE_CHANNELS],\n\t\t\t\t\t fill_value=-1.0,\n\t\t\t\t\t dtype=dataSettings.FLOAT_TYPE)\n\t\t\tfor i in range(dataSettings.GROUPED_SIZE-1):\n\t\t\t\tself._listOfPreviousFrames.append(blackFrame)\n\n\t\t\tself._listOfPreviousFrames.append(newInputImage_)\n\n\n\t\tself._groupedInput = np.concatenate(self._listOfPreviousFrames)\n\t\tself._groupedInput = self._groupedInput.reshape(self._inputPlaceholder.shape)\n\t\t\t\n\t\t\t\n\n\tdef _recoverModelFromCheckpoints(self):\n\t\tprint(\"Load Model from: \", deploySettings.PATH_TO_MODEL_CHECKPOINTS)\n\t\tmodelLoader = tf.train.Saver()\n\t\tmodelLoader.restore(self.session, deploySettings.PATH_TO_MODEL_CHECKPOINTS)\n\n\n" ]
[ [ "tensorflow.cond", "tensorflow.nn.dynamic_rnn", "tensorflow.nn.rnn_cell.DropoutWrapper", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.placeholder", "tensorflow.name_scope", "tensorflow.trainable_variables", "tensorflow.losses.add_loss" ], [ "tensorflow.nn.softmax", "tensorflow.placeholder", "numpy.full", "numpy.concatenate", "tensorflow.global_variables_initializer", "numpy.argmax", "tensorflow.Session", "tensorflow.train.Saver" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
tadasdanielius/P5-Vehicle-Detection-And-Tracking
[ "38513e91d863f7fff50703349aacbe5d5bbfae39" ]
[ "sdc/detection/cnn_classifier.py" ]
[ "from keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU\nfrom keras.optimizers import Adam\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import model_from_json\nfrom sklearn.preprocessing import normalize\nimport cv2\nimport numpy as np\nimport glob\nimport json\nfrom keras.layers import merge\nfrom keras.layers.core import Lambda\nfrom keras.models import Model\n\nimport tensorflow as tf\n\n\ndef make_parallel(model, gpu_count):\n def get_slice(data, idx, parts):\n shape = tf.shape(data)\n size = tf.concat(0, [shape[:1] // parts, shape[1:]])\n stride = tf.concat(0, [shape[:1] // parts, shape[1:] * 0])\n start = stride * idx\n return tf.slice(data, start, size)\n\n outputs_all = []\n for i in range(len(model.outputs)):\n outputs_all.append([])\n\n # Place a copy of the model on each GPU, each getting a slice of the batch\n for i in range(gpu_count):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('tower_%d' % i) as scope:\n\n inputs = []\n # Slice each input into a piece for processing on this GPU\n for x in model.inputs:\n input_shape = tuple(x.get_shape().as_list())[1:]\n slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx': i, 'parts': gpu_count})(x)\n inputs.append(slice_n)\n\n outputs = model(inputs)\n\n if not isinstance(outputs, list):\n outputs = [outputs]\n\n # Save all the outputs for merging back together later\n for l in range(len(outputs)):\n outputs_all[l].append(outputs[l])\n\n # merge outputs on CPU\n with tf.device('/cpu:0'):\n merged = []\n for outputs in outputs_all:\n merged.append(merge(outputs, mode='concat', concat_axis=0))\n\n return Model(input=model.inputs, output=merged)\n\n\nclass CNNClassifier:\n def __init__(self):\n self.classifier = None\n\n def get_model(self, parallel=False):\n model = Sequential()\n #model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=(64, 64, 3)))\n model.add(Convolution2D(8, 8, 8, subsample=(4, 4), border_mode=\"same\", activation='elu', name='Conv1'))\n model.add(Convolution2D(16, 5, 5, subsample=(2, 2), border_mode=\"same\", activation='elu', name='Conv2'))\n model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode=\"same\", activation='elu', name='Conv3'))\n model.add(Flatten())\n model.add(ELU())\n model.add(Dense(1024, activation='elu'))\n model.add(Dropout(.5))\n model.add(ELU())\n model.add(Dense(512, activation='elu'))\n model.add(Dropout(.5))\n model.add(Dense(1, name='output'))\n model.add(Activation('sigmoid'))\n if parallel:\n model = make_parallel(model, 2)\n #model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])\n self.model = model\n return model\n\n def _model(self):\n img_width, img_height = 64, 64\n model = Sequential()\n model.add(Convolution2D(8, 3, 3, input_shape=(img_width, img_height, 3)))\n model.add(Activation('elu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n #model.add(Convolution2D(16, 3, 3))\n #model.add(Activation('elu'))\n #model.add(MaxPooling2D(pool_size=(2, 2)))\n\n #model.add(Convolution2D(32, 3, 3))\n #model.add(Activation('elu'))\n #model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Dropout(0.5))\n model.add(Dense(1, activation='sigmoid'))\n #model = make_parallel(model, 2)\n self.model = model\n\n def compile(self):\n self.model.compile(loss='binary_crossentropy',\n optimizer='rmsprop', class_mode='binary',\n metrics=['accuracy'])\n\n def save(self):\n model_json = self.model.to_json()\n with open(\"./model.json\", \"w\") as json_file:\n json.dump(model_json, json_file)\n self.model.save_weights(\"./model.h5\")\n print(\"Saved model to disk\")\n\n def load(self):\n with open('./model.json', 'r') as jfile:\n self.model = model_from_json(json.load(jfile))\n\n self.compile()\n self.model.load_weights('./model.h5')\n\n def get_list(self):\n vehicles = np.array(glob.glob('training_data/vehicles/*/*'))\n y_vehicles = np.zeros(vehicles.shape) + 1\n non_vehicles = np.array(glob.glob('training_data/non-vehicles/*/*'))\n y_non_vehicles = np.zeros(non_vehicles.shape)\n X_data = np.concatenate((vehicles, non_vehicles))\n Y_data = np.concatenate((y_vehicles, y_non_vehicles))\n return X_data, Y_data\n\n def predict(self, image):\n #img = np.copy(image)\n #img = cv2.resize(img, (64, 64))\n x = image[None, :, :, :]\n result = self.model.predict(x, 1)\n return result\n\n def train(self, file_list, labels, test_size=0.2, nb_epoch=30, batch_size=128):\n X_train, X_test, Y_train, Y_test = train_test_split(file_list, labels, test_size=test_size, random_state=100)\n\n test_images = build_images(X_test)\n train_images = build_images(X_train)\n\n train_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.05,\n zoom_range=0.05,\n width_shift_range=0.1,\n height_shift_range=0.1,\n rotation_range=5,\n horizontal_flip=True)\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n train_generator = train_datagen.flow(train_images, Y_train, batch_size)\n test_generator = test_datagen.flow(test_images, Y_test, batch_size)\n\n nb_train_samples = (batch_size-1)*100\n nb_validation_samples = (batch_size-1)*20\n\n #self.get_model(parallel=False)\n self._model()\n self.compile()\n\n self.model.fit_generator(\n train_generator,\n samples_per_epoch=nb_train_samples,\n nb_epoch=nb_epoch, show_accuracy=True,\n validation_data=test_generator,\n nb_val_samples=nb_validation_samples)\n\ndef build_images(x):\n images = np.zeros((len(x), 64, 64, 3))\n for idx, img_fname in enumerate(x):\n im = cv2.imread(img_fname)\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n im = cv2.resize(im, (64, 64), interpolation=cv2.INTER_AREA)\n images[idx] = im\n return images\n\ndef do_all(nb_epoch=30, batch_size=256):\n clf = CNNClassifier()\n x, y = clf.get_list()\n clf.train(x, y, nb_epoch=nb_epoch, batch_size=batch_size)\n clf.save()\n\n" ]
[ [ "tensorflow.device", "tensorflow.concat", "tensorflow.shape", "tensorflow.slice", "sklearn.model_selection.train_test_split", "numpy.concatenate", "tensorflow.name_scope", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
ManjunathaPatkar/Machine-Learning
[ "f1c6ec1a9f802f6e88ed67c0da6c1e9373790537", "f1c6ec1a9f802f6e88ed67c0da6c1e9373790537" ]
[ "Machine Learning A-Z Template Folder/Part 2 - Regression/Section 5 - Multiple Linear Regression/data_preprocessing_template.py", "Machine Learning A-Z Template Folder/Part 1 - Data Preprocessing/Section 2 -------------------- Part 1 - Data Preprocessing --------------------/missing-data.py" ]
[ "# Data Preprocessing Template\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('50_Startups.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 4].values\n\n#encoding independent variable state\n#from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n#labelencoder_X = LabelEncoder()\n#X[:, 3] = labelencoder_X.fit_transform(X[:, 3])\n#onehotencoder = OneHotEncoder(categorical_features = [3])\n#X = onehotencoder.fit_transform(X).toarray()\n\n\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\nct = ColumnTransformer([(\"State\", OneHotEncoder(), [3])], remainder = 'passthrough')\nX= ct.fit_transform(X)\n\n#avoiding the dummy variable trap\nX=X[:,1:]\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Feature Scaling\n\"\"\"from sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\nsc_y = StandardScaler()\ny_train = sc_y.fit_transform(y_train)\"\"\"\n\n\n#fitting multiple linear regression to the training set\nfrom sklearn.linear_model import LinearRegression\nregressor=LinearRegression()\nregressor.fit(X_train,y_train)\n\n\n#Predicting the test set results\ny_pred=regressor.predict(X_test)\n\n#Building the optimal model using backward elimination\nimport statsmodels.api as sm\nX=np.append(arr=np.ones((50,1)).astype(int),values=X,axis=1)\n\n#X_opt=X[:,[0,1,2,3,4,5]]\nX_opt = np.array(X[:, [0, 1, 2, 3, 4, 5]], dtype=float)\nregressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()\nregressor_OLS.summary()\n\n\nX_opt = np.array(X[:, [0, 1, 3, 4, 5]], dtype=float)\nregressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()\nregressor_OLS.summary()\n\nX_opt = np.array(X[:, [0, 3, 4, 5]], dtype=float)\nregressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()\nregressor_OLS.summary()\n\n\nX_opt = np.array(X[:, [0, 3, 5]], dtype=float)\nregressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()\nregressor_OLS.summary()\n\n\nX_opt = np.array(X[:, [0, 3]], dtype=float)\nregressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()\nregressor_OLS.summary()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "# Data Preprocessing\n\n# Importing the libraries\n#import numpy as np\n#import matplotlib.pyplot as plt\n#import pandas as pd\n\n# Importing the dataset\n#dataset = pd.read_csv('Data.csv')\n#X = dataset.iloc[:, :-1].values\n#y = dataset.iloc[:, 3].values\n\n# Taking care of missing data\n#from sklearn.preprocessing import Imputer\n#imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)\n#imputer = imputer.fit(X[:, 1:3])\n#X[:, 1:3] = imputer.transform(X[:, 1:3])\n\n# Data Preprocessing\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Data.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 3].values\n\n# Taking care of missing data\n# Updated Imputer\nfrom sklearn.impute import SimpleImputer\nmissingvalues = SimpleImputer(missing_values = np.nan, strategy = 'mean', verbose = 0)\nmissingvalues = missingvalues.fit(X[:, 1:3])\nX[:, 1:3]=missingvalues.transform(X[:, 1:3])\n" ]
[ [ "pandas.read_csv", "sklearn.preprocessing.OneHotEncoder", "sklearn.model_selection.train_test_split", "numpy.ones", "sklearn.linear_model.LinearRegression", "numpy.array" ], [ "pandas.read_csv", "sklearn.impute.SimpleImputer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
marijnfs/onnxruntime
[ "6e1eb4b0efca9644c5f8979fbded9416fdd722dc" ]
[ "tools/ci_build/build.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport argparse\nimport glob\nimport multiprocessing\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport hashlib\nfrom logger import log\n\n\nclass BaseError(Exception):\n \"\"\"Base class for errors originating from build.py.\"\"\"\n pass\n\n\nclass BuildError(BaseError):\n \"\"\"Error from running build steps.\"\"\"\n\n def __init__(self, *messages):\n super().__init__(\"\\n\".join(messages))\n\n\nclass UsageError(BaseError):\n \"\"\"Usage related error.\"\"\"\n\n def __init__(self, message):\n super().__init__(message)\n\n\ndef _check_python_version():\n # According to the BUILD.md, python 3.5+ is required:\n # Python 2 is definitely not supported and it should be safer to consider\n # it won't run with python 4:\n if sys.version_info[0] != 3:\n raise BuildError(\n \"Bad python major version: expecting python 3, found version \"\n \"'{}'\".format(sys.version))\n if sys.version_info[1] < 5:\n raise BuildError(\n \"Bad python minor version: expecting python 3.5+, found version \"\n \"'{}'\".format(sys.version))\n\n\n_check_python_version()\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(\n description=\"ONNXRuntime CI build driver.\",\n usage=\"\"\" # noqa\n Default behavior is --update --build --test for native architecture builds.\n Default behavior is --update --build for cross-compiled builds.\n\n The Update phase will update git submodules, and run cmake to generate makefiles.\n The Build phase will build all projects.\n The Test phase will run all unit tests, and optionally the ONNX tests.\n\n Use the individual flags to only run the specified stages.\n \"\"\")\n # Main arguments\n parser.add_argument(\n \"--build_dir\", required=True, help=\"Path to the build directory.\")\n parser.add_argument(\n \"--config\", nargs=\"+\", default=[\"Debug\"],\n choices=[\"Debug\", \"MinSizeRel\", \"Release\", \"RelWithDebInfo\"],\n help=\"Configuration(s) to build.\")\n parser.add_argument(\n \"--update\", action='store_true', help=\"Update makefiles.\")\n parser.add_argument(\"--build\", action='store_true', help=\"Build.\")\n parser.add_argument(\n \"--clean\", action='store_true',\n help=\"Run 'cmake --build --target clean' for the selected config/s.\")\n parser.add_argument(\n \"--parallel\", action='store_true', help=\"\"\"Use parallel build.\n The build setup doesn't get all dependencies right, so --parallel\n only works if you're just rebuilding ONNXRuntime code. If you've\n done an update that fetched external dependencies you have to build\n without --parallel the first time. Once that's done , run with\n \"--build --parallel --test\" to just build in\n parallel and run tests.\"\"\")\n parser.add_argument(\"--test\", action='store_true', help=\"Run unit tests.\")\n parser.add_argument(\n \"--skip_tests\", action='store_true', help=\"Skip all tests.\")\n\n # Training options\n parser.add_argument(\n \"--enable_nvtx_profile\", action='store_true', help=\"Enable NVTX profile in ORT.\")\n parser.add_argument(\n \"--enable_training\", action='store_true', help=\"Enable training in ORT.\")\n parser.add_argument(\n \"--enable_training_python_frontend_e2e_tests\", action=\"store_true\",\n help=\"Enable the pytorch frontend training tests.\")\n parser.add_argument(\n \"--enable_training_pipeline_e2e_tests\", action=\"store_true\",\n help=\"Enable the pipeline c++ e2e tests.\")\n parser.add_argument(\n \"--use_horovod\", action='store_true', help=\"Enable Horovod.\")\n parser.add_argument(\n \"--mpi_home\", help=\"Path to MPI installation dir\")\n parser.add_argument(\n \"--nccl_home\", help=\"Path to NCCL installation dir\")\n\n # enable ONNX tests\n parser.add_argument(\n \"--enable_onnx_tests\", action='store_true',\n help=\"\"\"When running the Test phase, run onnx_test_running against\n available test data directories.\"\"\")\n parser.add_argument(\"--path_to_protoc_exe\", help=\"Path to protoc exe.\")\n parser.add_argument(\n \"--fuzz_testing\", action='store_true', help=\"Enable Fuzz testing of the onnxruntime.\")\n parser.add_argument(\n \"--enable_symbolic_shape_infer_tests\", action='store_true',\n help=\"\"\"When running the Test phase, run symbolic shape inference against\n available test data directories.\"\"\")\n\n # generate documentaiton\n parser.add_argument(\n \"--gen_doc\", action='store_true',\n help=\"Generate documentation on contrib ops\")\n\n # CUDA related\n parser.add_argument(\"--use_cuda\", action='store_true', help=\"Enable CUDA.\")\n parser.add_argument(\n \"--cuda_version\", help=\"The version of CUDA toolkit to use. \"\n \"Auto-detect if not specified. e.g. 9.0\")\n parser.add_argument(\n \"--cuda_home\", help=\"Path to CUDA home.\"\n \"Read from CUDA_HOME environment variable if --use_cuda is true and \"\n \"--cuda_home is not specified.\")\n parser.add_argument(\n \"--cudnn_home\", help=\"Path to CUDNN home. \"\n \"Read from CUDNN_HOME environment variable if --use_cuda is true and \"\n \"--cudnn_home is not specified.\")\n\n # Python bindings\n parser.add_argument(\n \"--enable_pybind\", action='store_true', help=\"Enable Python Bindings.\")\n parser.add_argument(\n \"--build_wheel\", action='store_true', help=\"Build Python Wheel.\")\n parser.add_argument(\n \"--wheel_name_suffix\", help=\"Suffix to append to created wheel names. \"\n \"This value is currently only used for nightly builds.\")\n parser.add_argument(\n \"--numpy_version\", help=\"Installs a specific version of numpy \"\n \"before building the python binding.\")\n parser.add_argument(\n \"--skip-keras-test\", action='store_true',\n help=\"Skip tests with Keras if keras is installed\")\n\n # C-Sharp bindings\n parser.add_argument(\n \"--build_csharp\", action='store_true',\n help=\"Build C#.Net DLL and NuGet package. This should be only used in CI pipelines. \"\n \"For building C# bindings and packaging them into nuget package use --build_nuget arg.\")\n\n parser.add_argument(\n \"--build_nuget\", action='store_true',\n help=\"Build C#.Net DLL and NuGet package on the local machine. \"\n \"Currently only Windows and Linux platforms are supported.\")\n\n # Java bindings\n parser.add_argument(\n \"--build_java\", action='store_true', help=\"Build Java bindings.\")\n\n # Node.js binding\n parser.add_argument(\n \"--build_nodejs\", action='store_true',\n help=\"Build Node.js binding and NPM package.\")\n\n # Build a shared lib\n parser.add_argument(\n \"--build_shared_lib\", action='store_true',\n help=\"Build a shared library for the ONNXRuntime.\")\n\n # Build options\n parser.add_argument(\n \"--cmake_extra_defines\", nargs=\"+\",\n help=\"Extra definitions to pass to CMake during build system \"\n \"generation. These are just CMake -D options without the leading -D.\")\n parser.add_argument(\n \"--target\",\n help=\"Build a specific target, e.g. winml_dll\")\n parser.add_argument(\n \"--x86\", action='store_true',\n help=\"Create x86 makefiles. Requires --update and no existing cache \"\n \"CMake setup. Delete CMakeCache.txt if needed\")\n parser.add_argument(\n \"--arm\", action='store_true',\n help=\"Create ARM makefiles. Requires --update and no existing cache \"\n \"CMake setup. Delete CMakeCache.txt if needed\")\n parser.add_argument(\n \"--arm64\", action='store_true',\n help=\"Create ARM64 makefiles. Requires --update and no existing cache \"\n \"CMake setup. Delete CMakeCache.txt if needed\")\n parser.add_argument(\n \"--msvc_toolset\", help=\"MSVC toolset to use. e.g. 14.11\")\n parser.add_argument(\"--android\", action='store_true', help='Build for Android')\n parser.add_argument(\n \"--android_abi\", default=\"arm64-v8a\",\n choices=[\"armeabi-v7a\", \"arm64-v8a\", \"x86\", \"x86_64\"],\n help=\"Specify the target Android Application Binary Interface (ABI)\")\n parser.add_argument(\"--android_api\", type=int, default=27, help='Android API Level, e.g. 21')\n parser.add_argument(\"--android_sdk_path\", type=str, help='Path to the Android SDK')\n parser.add_argument(\"--android_ndk_path\", default=\"\", help=\"Path to the Android NDK\")\n parser.add_argument(\"--android_cpp_shared\", action=\"store_true\",\n help=\"Build with shared libc++ instead of the default static libc++.\")\n parser.add_argument(\"--test_binary_size\", action=\"store_true\",\n help=\"If enabled, build will fail when the built binary size is larger than the threshold. \"\n \"This only applies to Android Minimal build for now.\")\n\n parser.add_argument(\"--ios\", action='store_true', help=\"build for ios\")\n parser.add_argument(\n \"--ios_sysroot\", default=\"\",\n help=\"Specify the location name of the macOS platform SDK to be used\")\n parser.add_argument(\n \"--ios_toolchain_dir\", default=\"\",\n help=\"Path to ios toolchain binaries\")\n parser.add_argument(\n \"--ios_toolchain_file\", default=\"\",\n help=\"Path to ios toolchain file, \"\n \"or cmake/onnxruntime_ios.toolchain.cmake will be used\")\n parser.add_argument(\n \"--xcode_code_signing_team_id\", default=\"\",\n help=\"The development team ID used for code signing in Xcode\")\n parser.add_argument(\n \"--use_xcode\", action='store_true',\n help=\"Use Xcode as cmake generator, this is only supported on MacOS.\")\n parser.add_argument(\n \"--osx_arch\", default=\"arm64\", choices=[\"arm64\", \"x86_64\"],\n help=\"Specify the Target specific architectures for macOS and iOS, This is only supported on MacOS\")\n parser.add_argument(\n \"--apple_deploy_target\", type=str,\n help=\"Specify the minimum version of the target platform \"\n \"(e.g. macOS or iOS)\"\n \"This is only supported on MacOS\")\n\n # Arguments needed by CI\n parser.add_argument(\n \"--cmake_path\", default=\"cmake\", help=\"Path to the CMake program.\")\n parser.add_argument(\n \"--ctest_path\", default=\"ctest\", help=\"Path to the CTest program.\")\n parser.add_argument(\n \"--skip_submodule_sync\", action='store_true', help=\"Don't do a \"\n \"'git submodule update'. Makes the Update phase faster.\")\n parser.add_argument(\n \"--use_vstest\", action='store_true',\n help=\"Use use_vstest for running unitests.\")\n parser.add_argument(\n \"--use_jemalloc\", action='store_true', help=\"Use jemalloc.\")\n parser.add_argument(\n \"--use_mimalloc\", default=['none'],\n choices=['none', 'stl', 'arena', 'all'], help=\"Use mimalloc.\")\n parser.add_argument(\n \"--use_openblas\", action='store_true', help=\"Build with OpenBLAS.\")\n parser.add_argument(\n \"--use_dnnl\", action='store_true', help=\"Build with DNNL.\")\n parser.add_argument(\n \"--use_mklml\", action='store_true', help=\"Build with MKLML.\")\n parser.add_argument(\n \"--use_featurizers\", action='store_true',\n help=\"Build with ML Featurizer support.\")\n parser.add_argument(\n \"--use_ngraph\", action='store_true', help=\"Build with nGraph.\")\n parser.add_argument(\n \"--use_openvino\", nargs=\"?\", const=\"CPU_FP32\",\n choices=[\"CPU_FP32\", \"GPU_FP32\", \"GPU_FP16\", \"VAD-M_FP16\",\n \"MYRIAD_FP16\", \"VAD-F_FP32\"],\n help=\"Build with OpenVINO for specific hardware.\")\n parser.add_argument(\n \"--use_nnapi\", action='store_true', help=\"Build with NNAPI support.\")\n parser.add_argument(\n \"--use_rknpu\", action='store_true', help=\"Build with RKNPU.\")\n parser.add_argument(\n \"--use_preinstalled_eigen\", action='store_true',\n help=\"Use pre-installed Eigen.\")\n parser.add_argument(\"--eigen_path\", help=\"Path to pre-installed Eigen.\")\n parser.add_argument(\n \"--use_openmp\", action='store_true', help=\"Build with OpenMP\")\n parser.add_argument(\n \"--enable_msinternal\", action=\"store_true\",\n help=\"Enable for Microsoft internal builds only.\")\n parser.add_argument(\"--llvm_path\", help=\"Path to llvm dir\")\n parser.add_argument(\n \"--use_vitisai\", action='store_true', help=\"Build with Vitis-AI\")\n parser.add_argument(\n \"--use_nuphar\", action='store_true', help=\"Build with nuphar\")\n parser.add_argument(\n \"--use_tensorrt\", action='store_true', help=\"Build with TensorRT\")\n parser.add_argument(\n \"--tensorrt_home\", help=\"Path to TensorRT installation dir\")\n parser.add_argument(\n \"--use_migraphx\", action='store_true', help=\"Build with MIGraphX\")\n parser.add_argument(\n \"--migraphx_home\", help=\"Path to MIGraphX installation dir\")\n parser.add_argument(\n \"--use_full_protobuf\", action='store_true',\n help=\"Use the full protobuf library\")\n\n parser.add_argument(\n \"--skip_onnx_tests\", action='store_true', help=\"Explicitly disable \"\n \"all onnx related tests. Note: Use --skip_tests to skip all tests.\")\n parser.add_argument(\n \"--skip_winml_tests\", action='store_true',\n help=\"Explicitly disable all WinML related tests\")\n parser.add_argument(\n \"--skip_nodejs_tests\", action='store_true',\n help=\"Explicitly disable all Node.js binding tests\")\n parser.add_argument(\n \"--enable_msvc_static_runtime\", action='store_true',\n help=\"Enable static linking of MSVC runtimes.\")\n parser.add_argument(\n \"--enable_language_interop_ops\", action='store_true',\n help=\"Enable operator implemented in language other than cpp\")\n parser.add_argument(\n \"--cmake_generator\",\n choices=['Visual Studio 15 2017', 'Visual Studio 16 2019', 'Ninja'],\n default='Visual Studio 15 2017' if is_windows() else None,\n help=\"Specify the generator that CMake invokes. \"\n \"This is only supported on Windows\")\n parser.add_argument(\n \"--enable_multi_device_test\", action='store_true',\n help=\"Test with multi-device. Mostly used for multi-device GPU\")\n parser.add_argument(\n \"--use_dml\", action='store_true', help=\"Build with DirectML.\")\n parser.add_argument(\n \"--use_winml\", action='store_true', help=\"Build with WinML.\")\n parser.add_argument(\n \"--winml_root_namespace_override\", type=str,\n help=\"Specify the namespace that WinML builds into.\")\n parser.add_argument(\n \"--use_telemetry\", action='store_true',\n help=\"Only official builds can set this flag to enable telemetry.\")\n parser.add_argument(\n \"--enable_wcos\", action='store_true',\n help=\"Build for Windows Core OS.\")\n parser.add_argument(\n \"--enable_windows_store\", action='store_true',\n help=\"Build for Windows Store\")\n parser.add_argument(\n \"--enable_lto\", action='store_true',\n help=\"Enable Link Time Optimization\")\n parser.add_argument(\n \"--use_acl\", nargs=\"?\", const=\"ACL_1905\",\n choices=[\"ACL_1902\", \"ACL_1905\", \"ACL_1908\", \"ACL_2002\"],\n help=\"Build with ACL for ARM architectures.\")\n parser.add_argument(\n \"--use_armnn\", action='store_true',\n help=\"Enable ArmNN Execution Provider.\")\n parser.add_argument(\n \"--armnn_relu\", action='store_true',\n help=\"Use the Relu operator implementation from the ArmNN EP.\")\n parser.add_argument(\n \"--armnn_bn\", action='store_true',\n help=\"Use the Batch Normalization operator implementation from the ArmNN EP.\")\n parser.add_argument(\n \"--build_micro_benchmarks\", action='store_true',\n help=\"Build ONNXRuntime micro-benchmarks.\")\n\n # options to reduce binary size\n parser.add_argument(\"--minimal_build\", action='store_true',\n help=\"Create a build that only supports ORT format models. \"\n \"See /docs/ONNX_Runtime_Format_Model_Usage.md for more information. \"\n \"RTTI is automatically disabled in a minimal build.\")\n parser.add_argument(\"--include_ops_by_model\", type=str, help=\"include ops from model(s) under designated path.\")\n parser.add_argument(\"--include_ops_by_config\", type=str,\n help=\"include ops from config file. \"\n \"See /docs/Reduced_Operator_Kernel_build.md for more information.\")\n\n parser.add_argument(\"--disable_contrib_ops\", action='store_true',\n help=\"Disable contrib ops (reduces binary size)\")\n parser.add_argument(\"--disable_ml_ops\", action='store_true',\n help=\"Disable traditional ML ops (reduces binary size)\")\n parser.add_argument(\"--disable_rtti\", action='store_true', help=\"Disable RTTI (reduces binary size)\")\n parser.add_argument(\"--disable_exceptions\", action='store_true',\n help=\"Disable exceptions to reduce binary size. Requires --minimal_build.\")\n parser.add_argument(\"--disable_ort_format_load\", action='store_true',\n help='Disable support for loading ORT format models in a non-minimal build.')\n\n return parser.parse_args()\n\n\ndef resolve_executable_path(command_or_path):\n \"\"\"Returns the absolute path of an executable.\"\"\"\n executable_path = shutil.which(command_or_path)\n if executable_path is None:\n raise BuildError(\"Failed to resolve executable path for \"\n \"'{}'.\".format(command_or_path))\n return os.path.realpath(executable_path)\n\n\ndef is_windows():\n return sys.platform.startswith(\"win\")\n\n\ndef is_macOS():\n return sys.platform.startswith(\"darwin\")\n\n\ndef is_linux():\n return sys.platform.startswith(\"linux\")\n\n\ndef get_linux_distro():\n try:\n with open('/etc/os-release', 'r') as f:\n dist_info = dict(\n line.strip().split('=', 1) for line in f.readlines())\n return dist_info.get('NAME', '').strip('\"'), dist_info.get(\n 'VERSION', '').strip('\"')\n except (IOError, ValueError):\n return '', ''\n\n\ndef is_ubuntu_1604():\n dist, ver = get_linux_distro()\n return dist == 'Ubuntu' and ver.startswith('16.04')\n\n\ndef get_config_build_dir(build_dir, config):\n # build directory per configuration\n return os.path.join(build_dir, config)\n\n\ndef run_subprocess(args, cwd=None, capture=False, dll_path=None,\n shell=False, env={}):\n log.info(\"Running subprocess in '{0}'\\n{1}\".format(\n cwd or os.getcwd(), args))\n my_env = os.environ.copy()\n if dll_path:\n if is_windows():\n my_env[\"PATH\"] = dll_path + os.pathsep + my_env[\"PATH\"]\n else:\n if \"LD_LIBRARY_PATH\" in my_env:\n my_env[\"LD_LIBRARY_PATH\"] += os.pathsep + dll_path\n else:\n my_env[\"LD_LIBRARY_PATH\"] = dll_path\n\n stdout, stderr = (subprocess.PIPE, subprocess.STDOUT) if capture else (\n None, None)\n my_env.update(env)\n completed_process = subprocess.run(\n args, cwd=cwd, check=True, stdout=stdout, stderr=stderr,\n env=my_env, shell=shell)\n log.debug(\"Subprocess completed. Return code=\" +\n str(completed_process.returncode))\n return completed_process\n\n\ndef update_submodules(source_dir):\n run_subprocess([\"git\", \"submodule\", \"sync\", \"--recursive\"], cwd=source_dir)\n run_subprocess([\"git\", \"submodule\", \"update\", \"--init\", \"--recursive\"],\n cwd=source_dir)\n\n\ndef is_docker():\n path = '/proc/self/cgroup'\n return (\n os.path.exists('/.dockerenv') or\n os.path.isfile(path) and any('docker' in line for line in open(path))\n )\n\n\ndef is_sudo():\n return 'SUDO_UID' in os.environ.keys()\n\n\ndef install_apt_package(package):\n have = package in str(run_subprocess(\n [\"apt\", \"list\", \"--installed\", package], capture=True).stdout)\n if not have:\n if is_sudo():\n run_subprocess(['apt-get', 'install', '-y', package])\n else:\n raise BuildError(package + \" APT package missing. Please re-run \"\n \"this script using sudo to install.\")\n\n\ndef install_ubuntu_deps(args):\n \"\"\"Check if the necessary Ubuntu dependencies are installed.\n Not required on docker. Provide help output if missing.\"\"\"\n\n # check we need the packages first\n if not (args.enable_pybind or args.use_openblas):\n return\n\n # not needed on docker as packages are pre-installed\n if not is_docker():\n try:\n if args.enable_pybind:\n install_apt_package(\"python3\")\n\n if args.use_openblas:\n install_apt_package(\"libopenblas-dev\")\n\n except Exception as e:\n raise BuildError(\"Error setting up required APT packages. \"\n \"{}\".format(str(e)))\n\n\ndef install_python_deps(numpy_version=\"\"):\n dep_packages = ['setuptools', 'wheel', 'pytest']\n dep_packages.append('numpy=={}'.format(numpy_version) if numpy_version\n else 'numpy>=1.16.6')\n dep_packages.append('sympy>=1.1')\n dep_packages.append('packaging')\n dep_packages.append('cerberus')\n run_subprocess([sys.executable, '-m', 'pip', 'install', '--trusted-host',\n 'files.pythonhosted.org'] + dep_packages)\n\n\n# We need to install Torch to test certain functionalities of the ORT Python package\ndef install_torch():\n # Command works for both Windows\n run_subprocess([sys.executable, '-m', 'pip', 'install', '--trusted-host',\n 'files.pythonhosted.org', 'torch===1.5.1+cu101', 'torchvision===0.6.1+cu101',\n '-f', 'https://download.pytorch.org/whl/torch_stable.html'])\n\n\ndef check_md5(filename, expected_md5):\n if not os.path.exists(filename):\n return False\n hash_md5 = hashlib.md5()\n BLOCKSIZE = 1024*64\n with open(filename, \"rb\") as f:\n buf = f.read(BLOCKSIZE)\n while len(buf) > 0:\n hash_md5.update(buf)\n buf = f.read(BLOCKSIZE)\n hex = hash_md5.hexdigest()\n if hex != expected_md5:\n log.info('md5 mismatch, expect %s, got %s' % (expected_md5, hex))\n os.remove(filename)\n return False\n return True\n\n\ndef setup_test_data(build_dir, configs):\n # create a shortcut for test models if there is a 'models'\n # folder in build_dir\n if is_windows():\n src_model_dir = os.path.join(build_dir, 'models')\n if os.path.exists('C:\\\\local\\\\models') and not os.path.exists(\n src_model_dir):\n log.debug(\"creating shortcut %s -> %s\" % (\n 'C:\\\\local\\\\models', src_model_dir))\n run_subprocess(['mklink', '/D', '/J', src_model_dir,\n 'C:\\\\local\\\\models'], shell=True)\n for config in configs:\n config_build_dir = get_config_build_dir(build_dir, config)\n os.makedirs(config_build_dir, exist_ok=True)\n dest_model_dir = os.path.join(config_build_dir, 'models')\n if os.path.exists('C:\\\\local\\\\models') and not os.path.exists(\n dest_model_dir):\n log.debug(\"creating shortcut %s -> %s\" % (\n 'C:\\\\local\\\\models', dest_model_dir))\n run_subprocess(['mklink', '/D', '/J', dest_model_dir,\n 'C:\\\\local\\\\models'], shell=True)\n elif os.path.exists(src_model_dir) and not os.path.exists(\n dest_model_dir):\n log.debug(\"creating shortcut %s -> %s\" % (\n src_model_dir, dest_model_dir))\n run_subprocess(['mklink', '/D', '/J', dest_model_dir,\n src_model_dir], shell=True)\n\n\ndef use_dev_mode(args):\n if args.use_acl:\n return 'OFF'\n if args.use_armnn:\n return 'OFF'\n if args.ios and is_macOS():\n return 'OFF'\n return 'ON'\n\n\ndef generate_build_tree(cmake_path, source_dir, build_dir, cuda_home, cudnn_home,\n mpi_home, nccl_home, tensorrt_home, migraphx_home,\n path_to_protoc_exe, configs, cmake_extra_defines, args, cmake_extra_args):\n log.info(\"Generating CMake build tree\")\n cmake_dir = os.path.join(source_dir, \"cmake\")\n # TODO: fix jemalloc build so it does not conflict with onnxruntime\n # shared lib builds. (e.g. onnxuntime_pybind)\n # for now, disable jemalloc if pybind is also enabled.\n cmake_args = [\n cmake_path, cmake_dir,\n \"-Donnxruntime_RUN_ONNX_TESTS=\" + (\n \"ON\" if args.enable_onnx_tests else \"OFF\"),\n \"-Donnxruntime_BUILD_WINML_TESTS=\" + (\n \"OFF\" if args.skip_winml_tests else \"ON\"),\n \"-Donnxruntime_GENERATE_TEST_REPORTS=ON\",\n \"-Donnxruntime_DEV_MODE=\" + use_dev_mode(args),\n \"-DPYTHON_EXECUTABLE=\" + sys.executable,\n \"-Donnxruntime_USE_CUDA=\" + (\"ON\" if args.use_cuda else \"OFF\"),\n \"-Donnxruntime_CUDNN_HOME=\" + (cudnn_home if args.use_cuda else \"\"),\n \"-Donnxruntime_USE_FEATURIZERS=\" + (\n \"ON\" if args.use_featurizers else \"OFF\"),\n \"-Donnxruntime_CUDA_HOME=\" + (cuda_home if args.use_cuda else \"\"),\n \"-Donnxruntime_USE_JEMALLOC=\" + (\"ON\" if args.use_jemalloc else \"OFF\"),\n \"-Donnxruntime_USE_MIMALLOC_STL_ALLOCATOR=\" + (\n \"ON\" if args.use_mimalloc == \"stl\" or\n args.use_mimalloc == \"all\" else \"OFF\"),\n \"-Donnxruntime_USE_MIMALLOC_ARENA_ALLOCATOR=\" + (\n \"ON\" if args.use_mimalloc == \"arena\" or\n args.use_mimalloc == \"all\" else \"OFF\"),\n \"-Donnxruntime_ENABLE_PYTHON=\" + (\n \"ON\" if args.enable_pybind else \"OFF\"),\n \"-Donnxruntime_BUILD_CSHARP=\" + (\"ON\" if args.build_csharp else \"OFF\"),\n \"-Donnxruntime_BUILD_JAVA=\" + (\"ON\" if args.build_java else \"OFF\"),\n \"-Donnxruntime_BUILD_NODEJS=\" + (\"ON\" if args.build_nodejs else \"OFF\"),\n \"-Donnxruntime_BUILD_SHARED_LIB=\" + (\n \"ON\" if args.build_shared_lib else \"OFF\"),\n \"-Donnxruntime_USE_EIGEN_FOR_BLAS=\" + (\n \"OFF\" if args.use_openblas else \"ON\"),\n \"-Donnxruntime_USE_OPENBLAS=\" + (\"ON\" if args.use_openblas else \"OFF\"),\n \"-Donnxruntime_USE_DNNL=\" + (\"ON\" if args.use_dnnl else \"OFF\"),\n \"-Donnxruntime_USE_MKLML=\" + (\"ON\" if args.use_mklml else \"OFF\"),\n \"-Donnxruntime_USE_NGRAPH=\" + (\"ON\" if args.use_ngraph else \"OFF\"),\n \"-Donnxruntime_USE_NNAPI_BUILTIN=\" + (\"ON\" if args.use_nnapi else \"OFF\"),\n \"-Donnxruntime_USE_RKNPU=\" + (\"ON\" if args.use_rknpu else \"OFF\"),\n \"-Donnxruntime_USE_OPENMP=\" + (\n \"ON\" if args.use_openmp and not (\n args.use_nnapi or (args.use_mklml and (is_macOS() or is_windows())) or args.use_ngraph or\n args.android or (args.ios and is_macOS())\n or args.use_rknpu)\n else \"OFF\"),\n \"-Donnxruntime_USE_TVM=\" + (\"ON\" if args.use_nuphar else \"OFF\"),\n \"-Donnxruntime_USE_LLVM=\" + (\"ON\" if args.use_nuphar else \"OFF\"),\n \"-Donnxruntime_ENABLE_MICROSOFT_INTERNAL=\" + (\n \"ON\" if args.enable_msinternal else \"OFF\"),\n \"-Donnxruntime_USE_VITISAI=\" + (\"ON\" if args.use_vitisai else \"OFF\"),\n \"-Donnxruntime_USE_NUPHAR=\" + (\"ON\" if args.use_nuphar else \"OFF\"),\n \"-Donnxruntime_USE_TENSORRT=\" + (\"ON\" if args.use_tensorrt else \"OFF\"),\n \"-Donnxruntime_TENSORRT_HOME=\" + (\n tensorrt_home if args.use_tensorrt else \"\"),\n # set vars for migraphx\n \"-Donnxruntime_USE_MIGRAPHX=\" + (\"ON\" if args.use_migraphx else \"OFF\"),\n \"-Donnxruntime_MIGRAPHX_HOME=\" + (migraphx_home if args.use_migraphx else \"\"),\n # By default - we currently support only cross compiling for\n # ARM/ARM64 (no native compilation supported through this\n # script).\n \"-Donnxruntime_CROSS_COMPILING=\" + (\n \"ON\" if args.arm64 or args.arm else \"OFF\"),\n \"-Donnxruntime_DISABLE_CONTRIB_OPS=\" + (\"ON\" if args.disable_contrib_ops else \"OFF\"),\n \"-Donnxruntime_DISABLE_ML_OPS=\" + (\"ON\" if args.disable_ml_ops else \"OFF\"),\n \"-Donnxruntime_DISABLE_RTTI=\" + (\"ON\" if args.disable_rtti else \"OFF\"),\n \"-Donnxruntime_DISABLE_EXCEPTIONS=\" + (\"ON\" if args.disable_exceptions else \"OFF\"),\n \"-Donnxruntime_DISABLE_ORT_FORMAT_LOAD=\" + (\"ON\" if args.disable_ort_format_load else \"OFF\"),\n \"-Donnxruntime_MINIMAL_BUILD=\" + (\"ON\" if args.minimal_build else \"OFF\"),\n \"-Donnxruntime_REDUCED_OPS_BUILD=\" + (\n \"ON\" if args.include_ops_by_config or args.include_ops_by_model else \"OFF\"),\n \"-Donnxruntime_MSVC_STATIC_RUNTIME=\" + (\n \"ON\" if args.enable_msvc_static_runtime else \"OFF\"),\n # enable pyop if it is nightly build\n \"-Donnxruntime_ENABLE_LANGUAGE_INTEROP_OPS=\" + (\n \"ON\" if args.enable_language_interop_ops else \"OFF\"),\n \"-Donnxruntime_USE_DML=\" + (\"ON\" if args.use_dml else \"OFF\"),\n \"-Donnxruntime_USE_WINML=\" + (\"ON\" if args.use_winml else \"OFF\"),\n \"-Donnxruntime_USE_TELEMETRY=\" + (\n \"ON\" if args.use_telemetry else \"OFF\"),\n \"-Donnxruntime_ENABLE_LTO=\" + (\"ON\" if args.enable_lto else \"OFF\"),\n \"-Donnxruntime_USE_ACL=\" + (\"ON\" if args.use_acl else \"OFF\"),\n \"-Donnxruntime_USE_ACL_1902=\" + (\n \"ON\" if args.use_acl == \"ACL_1902\" else \"OFF\"),\n \"-Donnxruntime_USE_ACL_1905=\" + (\n \"ON\" if args.use_acl == \"ACL_1905\" else \"OFF\"),\n \"-Donnxruntime_USE_ACL_1908=\" + (\n \"ON\" if args.use_acl == \"ACL_1908\" else \"OFF\"),\n \"-Donnxruntime_USE_ACL_2002=\" + (\n \"ON\" if args.use_acl == \"ACL_2002\" else \"OFF\"),\n \"-Donnxruntime_USE_ARMNN=\" + (\n \"ON\" if args.use_armnn else \"OFF\"),\n \"-Donnxruntime_ARMNN_RELU_USE_CPU=\" + (\n \"OFF\" if args.armnn_relu else \"ON\"),\n \"-Donnxruntime_ARMNN_BN_USE_CPU=\" + (\n \"OFF\" if args.armnn_bn else \"ON\"),\n # Training related flags\n \"-Donnxruntime_ENABLE_NVTX_PROFILE=\" + (\n \"ON\" if args.enable_nvtx_profile else \"OFF\"),\n \"-Donnxruntime_ENABLE_TRAINING=\" + (\n \"ON\" if args.enable_training else \"OFF\"),\n \"-Donnxruntime_USE_HOROVOD=\" + (\n \"ON\" if args.use_horovod else \"OFF\"),\n \"-Donnxruntime_BUILD_BENCHMARKS=\" + (\n \"ON\" if args.build_micro_benchmarks else \"OFF\")\n ]\n\n if mpi_home and os.path.exists(mpi_home):\n cmake_args += [\"-Donnxruntime_MPI_HOME=\" + mpi_home]\n\n if nccl_home and os.path.exists(nccl_home):\n cmake_args += [\"-Donnxruntime_NCCL_HOME=\" + nccl_home]\n\n if args.winml_root_namespace_override:\n cmake_args += [\"-Donnxruntime_WINML_NAMESPACE_OVERRIDE=\" +\n args.winml_root_namespace_override]\n if args.use_openvino:\n cmake_args += [\"-Donnxruntime_USE_OPENVINO=ON\",\n \"-Donnxruntime_USE_OPENVINO_MYRIAD=\" + (\n \"ON\" if args.use_openvino == \"MYRIAD_FP16\" else \"OFF\"),\n \"-Donnxruntime_USE_OPENVINO_GPU_FP32=\" + (\n \"ON\" if args.use_openvino == \"GPU_FP32\" else \"OFF\"),\n \"-Donnxruntime_USE_OPENVINO_GPU_FP16=\" + (\n \"ON\" if args.use_openvino == \"GPU_FP16\" else \"OFF\"),\n \"-Donnxruntime_USE_OPENVINO_CPU_FP32=\" + (\n \"ON\" if args.use_openvino == \"CPU_FP32\" else \"OFF\"),\n \"-Donnxruntime_USE_OPENVINO_VAD_M=\" + (\n \"ON\" if args.use_openvino == \"VAD-M_FP16\" else \"OFF\"),\n \"-Donnxruntime_USE_OPENVINO_VAD_F=\" + (\n \"ON\" if args.use_openvino == \"VAD-F_FP32\" else \"OFF\"),\n \"-Donnxruntime_USE_OPENVINO_BINARY=\" + (\n \"ON\" if args.use_openvino else \"OFF\")]\n # temp turn on only for linux gpu build\n if not is_windows():\n if args.use_cuda:\n cmake_args += [\n \"-Donnxruntime_USE_FULL_PROTOBUF=ON\"]\n\n # nGraph, TensorRT and OpenVINO providers currently only supports\n # full_protobuf option.\n if (args.use_full_protobuf or args.use_ngraph or args.use_tensorrt or\n args.use_openvino or args.use_vitisai or args.gen_doc):\n cmake_args += [\n \"-Donnxruntime_USE_FULL_PROTOBUF=ON\",\n \"-DProtobuf_USE_STATIC_LIBS=ON\"\n ]\n\n if args.use_nuphar and args.llvm_path is not None:\n cmake_args += [\"-DLLVM_DIR=%s\" % args.llvm_path]\n\n if args.use_cuda and not is_windows():\n nvml_stub_path = cuda_home + \"/lib64/stubs\"\n cmake_args += [\"-DCUDA_CUDA_LIBRARY=\" + nvml_stub_path]\n\n if args.use_preinstalled_eigen:\n cmake_args += [\"-Donnxruntime_USE_PREINSTALLED_EIGEN=ON\",\n \"-Deigen_SOURCE_PATH=\" + args.eigen_path]\n\n if args.android:\n cmake_args += [\n \"-DCMAKE_TOOLCHAIN_FILE=\" + args.android_ndk_path +\n \"/build/cmake/android.toolchain.cmake\",\n \"-DANDROID_PLATFORM=android-\" + str(args.android_api),\n \"-DANDROID_ABI=\" + str(args.android_abi)\n ]\n\n if args.android_cpp_shared:\n cmake_args += [\"-DANDROID_STL=c++_shared\"]\n\n if args.ios:\n if is_macOS():\n needed_args = [\n args.use_xcode,\n args.ios_sysroot,\n args.apple_deploy_target,\n ]\n arg_names = [\n \"--use_xcode \" +\n \"<need use xcode to cross build iOS on MacOS>\",\n \"--ios_sysroot \" +\n \"<the location or name of the macOS platform SDK>\",\n \"--apple_deploy_target \" +\n \"<the minimum version of the target platform>\",\n ]\n if not all(needed_args):\n raise BuildError(\n \"iOS build on MacOS canceled due to missing arguments: \" +\n ', '.join(\n val for val, cond in zip(arg_names, needed_args)\n if not cond))\n cmake_args += [\n \"-DCMAKE_SYSTEM_NAME=iOS\",\n \"-Donnxruntime_BUILD_SHARED_LIB=ON\",\n \"-DCMAKE_OSX_SYSROOT=\" + args.ios_sysroot,\n \"-DCMAKE_OSX_ARCHITECTURES=\" + args.osx_arch,\n \"-DCMAKE_OSX_DEPLOYMENT_TARGET=\" + args.apple_deploy_target,\n # we do not need protoc binary for ios cross build\n \"-Dprotobuf_BUILD_PROTOC_BINARIES=OFF\",\n \"-DCMAKE_TOOLCHAIN_FILE=\" + (\n args.ios_toolchain_file if args.ios_toolchain_file\n else \"../cmake/onnxruntime_ios.toolchain.cmake\")\n ]\n # Code sign the binaries, if the code signing development team id is provided\n if args.xcode_code_signing_team_id:\n cmake_args += [\"-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=\" + args.xcode_code_signing_team_id]\n else:\n # TODO: the cross compiling on Linux is not officially supported by Apple\n # and is already broken with the latest codebase, so it should be removed.\n # We are cross compiling on Linux\n needed_args = [\n args.ios_sysroot,\n args.arm64 or args.arm,\n args.ios_toolchain_dir\n ]\n arg_names = [\n \"--ios_sysroot <path to sysroot>\",\n \"--arm or --arm64\",\n \"--ios_toolchain_dir <path to toolchain>\"\n ]\n if not all(needed_args):\n raise BuildError(\n \"iOS build canceled due to missing arguments: \" +\n ', '.join(\n val for val, cond in zip(arg_names, needed_args)\n if not cond))\n compilers = sorted(\n glob.glob(args.ios_toolchain_dir + \"/bin/*-clang*\"))\n os.environ[\"PATH\"] = os.path.join(\n args.ios_toolchain_dir, \"bin\") + os.pathsep + os.environ.get(\n \"PATH\", \"\")\n os.environ[\"LD_LIBRARY_PATH\"] = os.path.join(\n args.ios_toolchain_dir, \"/lib\") + os.pathsep + os.environ.get(\n \"LD_LIBRARY_PATH\", \"\")\n if len(compilers) != 2:\n raise BuildError(\n \"error identifying compilers in ios_toolchain_dir\")\n cmake_args += [\n \"-DCMAKE_OSX_ARCHITECTURES=\" +\n (\"arm64\" if args.arm64 else \"arm\"),\n \"-DCMAKE_SYSTEM_NAME=iOSCross\",\n \"-Donnxruntime_BUILD_UNIT_TESTS=OFF\",\n \"-DCMAKE_OSX_SYSROOT=\" + args.ios_sysroot,\n \"-DCMAKE_C_COMPILER=\" + compilers[0],\n \"-DCMAKE_CXX_COMPILER=\" + compilers[1]\n ]\n\n if path_to_protoc_exe:\n cmake_args += [\n \"-DONNX_CUSTOM_PROTOC_EXECUTABLE=%s\" % path_to_protoc_exe]\n\n if args.fuzz_testing:\n if not (args.build_shared_lib and\n is_windows() and\n args.cmake_generator == 'Visual Studio 16 2019' and\n args.use_full_protobuf):\n raise BuildError(\n \"Fuzz test has only be tested with build shared libs option using MSVC on windows\")\n cmake_args += [\n \"-Donnxruntime_BUILD_UNIT_TESTS=ON\",\n \"-Donnxruntime_FUZZ_TEST=ON\",\n \"-Donnxruntime_USE_FULL_PROTOBUF=ON\"]\n\n if args.gen_doc:\n cmake_args += [\"-Donnxruntime_PYBIND_EXPORT_OPSCHEMA=ON\"]\n else:\n cmake_args += [\"-Donnxruntime_PYBIND_EXPORT_OPSCHEMA=OFF\"]\n\n cmake_args += [\"-D{}\".format(define) for define in cmake_extra_defines]\n\n cmake_args += cmake_extra_args\n\n # ADO pipelines will store the pipeline build number\n # (e.g. 191101-2300.1.master) and source version in environment\n # variables. If present, use these values to define the\n # WinML/ORT DLL versions.\n build_number = os.getenv('Build_BuildNumber')\n source_version = os.getenv('Build_SourceVersion')\n if build_number and source_version:\n build_matches = re.fullmatch(\n r\"(\\d\\d)(\\d\\d)(\\d\\d)(\\d\\d)\\.(\\d+)\", build_number)\n if build_matches:\n YY = build_matches.group(2)\n MM = build_matches.group(3)\n DD = build_matches.group(4)\n\n # Get ORT major and minor number\n with open(os.path.join(source_dir, 'VERSION_NUMBER')) as f:\n first_line = f.readline()\n ort_version_matches = re.match(r\"(\\d+).(\\d+)\", first_line)\n if not ort_version_matches:\n raise BuildError(\"Couldn't read version from VERSION_FILE\")\n ort_major = ort_version_matches.group(1)\n ort_minor = ort_version_matches.group(2)\n # Example (BuildNumber: 191101-2300.1.master,\n # SourceVersion: 0bce7ae6755c792eda558e5d27ded701707dc404)\n # MajorPart = 1\n # MinorPart = 0\n # BuildPart = 1911\n # PrivatePart = 123\n # String = 191101-2300.1.master.0bce7ae\n cmake_args += [\n \"-DVERSION_MAJOR_PART={}\".format(ort_major),\n \"-DVERSION_MINOR_PART={}\".format(ort_minor),\n \"-DVERSION_BUILD_PART={}\".format(YY),\n \"-DVERSION_PRIVATE_PART={}{}\".format(MM, DD),\n \"-DVERSION_STRING={}.{}.{}.{}\".format(\n ort_major, ort_minor, build_number,\n source_version[0:7])\n ]\n\n for config in configs:\n config_build_dir = get_config_build_dir(build_dir, config)\n os.makedirs(config_build_dir, exist_ok=True)\n if args.use_nuphar:\n os.environ[\"PATH\"] = os.path.join(\n config_build_dir, \"external\", \"tvm\",\n config) + os.pathsep + os.path.dirname(sys.executable) + os.pathsep + os.environ[\"PATH\"]\n\n run_subprocess(\n cmake_args + [\n \"-Donnxruntime_ENABLE_MEMLEAK_CHECKER=\" +\n (\"ON\" if config.lower() == 'debug' and not args.use_nuphar and not\n args.use_ngraph and not args.use_openvino and not\n args.enable_msvc_static_runtime\n else \"OFF\"), \"-DCMAKE_BUILD_TYPE={}\".format(config)],\n cwd=config_build_dir)\n\n\ndef clean_targets(cmake_path, build_dir, configs):\n for config in configs:\n log.info(\"Cleaning targets for %s configuration\", config)\n build_dir2 = get_config_build_dir(build_dir, config)\n cmd_args = [cmake_path,\n \"--build\", build_dir2,\n \"--config\", config,\n \"--target\", \"clean\"]\n\n run_subprocess(cmd_args)\n\n\ndef build_targets(args, cmake_path, build_dir, configs, parallel, target=None):\n for config in configs:\n log.info(\"Building targets for %s configuration\", config)\n build_dir2 = get_config_build_dir(build_dir, config)\n cmd_args = [cmake_path,\n \"--build\", build_dir2,\n \"--config\", config]\n if target:\n cmd_args.extend(['--target', target])\n\n build_tool_args = []\n if parallel:\n num_cores = str(multiprocessing.cpu_count())\n if is_windows() and args.cmake_generator != 'Ninja':\n build_tool_args += [\n \"/maxcpucount:\" + num_cores,\n # if nodeReuse is true, msbuild processes will stay around for a bit after the build completes\n \"/nodeReuse:False\",\n ]\n elif (is_macOS() and args.use_xcode):\n # CMake will generate correct build tool args for Xcode\n cmd_args += [\"--parallel\", num_cores]\n elif args.cmake_generator != 'Ninja':\n build_tool_args += [\"-j\" + num_cores]\n\n if build_tool_args:\n cmd_args += [\"--\"]\n cmd_args += build_tool_args\n\n env = {}\n if args.android:\n env['ANDROID_SDK_ROOT'] = args.android_sdk_path\n\n run_subprocess(cmd_args, env=env)\n\n\ndef add_dir_if_exists(directory, dir_list):\n if os.path.isdir(directory):\n dir_list.append(directory)\n\n\ndef setup_cuda_vars(args):\n cuda_home = \"\"\n cudnn_home = \"\"\n\n if args.use_cuda:\n cuda_home = args.cuda_home if args.cuda_home else os.getenv(\n \"CUDA_HOME\")\n cudnn_home = args.cudnn_home if args.cudnn_home else os.getenv(\n \"CUDNN_HOME\")\n\n cuda_home_valid = (cuda_home is not None and os.path.exists(cuda_home))\n cudnn_home_valid = (cudnn_home is not None and os.path.exists(\n cudnn_home))\n\n if not cuda_home_valid or not cudnn_home_valid:\n raise BuildError(\n \"cuda_home and cudnn_home paths must be specified and valid.\",\n \"cuda_home='{}' valid={}. cudnn_home='{}' valid={}\"\n .format(\n cuda_home, cuda_home_valid, cudnn_home, cudnn_home_valid))\n\n return cuda_home, cudnn_home\n\n\ndef setup_tensorrt_vars(args):\n tensorrt_home = \"\"\n if args.use_tensorrt:\n tensorrt_home = (args.tensorrt_home if args.tensorrt_home\n else os.getenv(\"TENSORRT_HOME\"))\n tensorrt_home_valid = (tensorrt_home is not None and\n os.path.exists(tensorrt_home))\n if not tensorrt_home_valid:\n raise BuildError(\n \"tensorrt_home paths must be specified and valid.\",\n \"tensorrt_home='{}' valid={}.\"\n .format(tensorrt_home, tensorrt_home_valid))\n\n # Set maximum workspace size in byte for\n # TensorRT (1GB = 1073741824 bytes).\n os.environ[\"ORT_TENSORRT_MAX_WORKSPACE_SIZE\"] = \"1073741824\"\n\n # Set maximum number of iterations to detect unsupported nodes\n # and partition the models for TensorRT.\n os.environ[\"ORT_TENSORRT_MAX_PARTITION_ITERATIONS\"] = \"1000\"\n\n # Set minimum subgraph node size in graph partitioning\n # for TensorRT.\n os.environ[\"ORT_TENSORRT_MIN_SUBGRAPH_SIZE\"] = \"1\"\n\n # Set FP16 flag\n os.environ[\"ORT_TENSORRT_FP16_ENABLE\"] = \"0\"\n\n return tensorrt_home\n\n\ndef setup_migraphx_vars(args):\n\n migraphx_home = None\n\n if (args.use_migraphx):\n print(\"migraphx_home = {}\".format(args.migraphx_home))\n migraphx_home = args.migraphx_home or os.getenv(\"MIGRAPHX_HOME\") or None\n\n migraphx_home_not_valid = (migraphx_home and not os.path.exists(migraphx_home))\n\n if (migraphx_home_not_valid):\n raise BuildError(\"migraphx_home paths must be specified and valid.\",\n \"migraphx_home='{}' valid={}.\"\n .format(migraphx_home, migraphx_home_not_valid))\n return migraphx_home or ''\n\n\ndef setup_dml_build(args, cmake_path, build_dir, configs):\n if args.use_dml:\n for config in configs:\n # Run the RESTORE_PACKAGES target to perform the initial\n # NuGet setup.\n cmd_args = [cmake_path,\n \"--build\", get_config_build_dir(build_dir, config),\n \"--config\", config,\n \"--target\", \"RESTORE_PACKAGES\"]\n run_subprocess(cmd_args)\n\n\ndef adb_push(src, dest, **kwargs):\n return run_subprocess(['adb', 'push', src, dest], **kwargs)\n\n\ndef adb_shell(*args, **kwargs):\n return run_subprocess(['adb', 'shell', *args], **kwargs)\n\n\ndef run_android_tests(args, source_dir, config, cwd):\n if args.android_abi == 'x86_64':\n run_subprocess(os.path.join(\n source_dir, 'tools', 'ci_build', 'github', 'android',\n 'start_android_emulator.sh'))\n adb_push('testdata', '/data/local/tmp/', cwd=cwd)\n adb_push(\n os.path.join(source_dir, 'cmake', 'external', 'onnx', 'onnx', 'backend', 'test'),\n '/data/local/tmp/', cwd=cwd)\n adb_push('onnxruntime_test_all', '/data/local/tmp/', cwd=cwd)\n adb_push('onnx_test_runner', '/data/local/tmp/', cwd=cwd)\n adb_shell('cd /data/local/tmp && /data/local/tmp/onnxruntime_test_all')\n if args.use_nnapi:\n adb_shell('cd /data/local/tmp && /data/local/tmp/onnx_test_runner -e nnapi /data/local/tmp/test')\n else:\n adb_shell('cd /data/local/tmp && /data/local/tmp/onnx_test_runner /data/local/tmp/test')\n # run shared_lib_test if necessary\n if args.build_shared_lib:\n adb_push('libonnxruntime.so', '/data/local/tmp/', cwd=cwd)\n adb_push('onnxruntime_shared_lib_test', '/data/local/tmp/', cwd=cwd)\n adb_shell(\n 'cd /data/local/tmp && ' +\n 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp && ' +\n '/data/local/tmp/onnxruntime_shared_lib_test')\n elif args.android_abi == 'arm64-v8a':\n # For Android arm64 abi we are only verify the size of the binary generated by minimal build config\n # Will fail the build if the shared_lib size is larger than the threshold\n if args.minimal_build and config == 'MinSizeRel' and args.build_shared_lib and args.test_binary_size:\n # set current size limit to 1100KB\n bin_size_threshold = 1100000\n bin_actual_size = os.path.getsize(os.path.join(cwd, 'libonnxruntime.so'))\n log.info('Android arm64 minsizerel libonnxruntime.so size [' + str(bin_actual_size) + 'B]')\n # Write the binary size to a file for uploading later\n with open(os.path.join(cwd, 'binary_size_data.txt'), 'w') as file:\n file.writelines([\n 'os,arch,build_config,size\\n',\n 'android,arm64-v8a,minimal-baseline,' + str(bin_actual_size) + '\\n'\n ])\n if bin_actual_size > bin_size_threshold:\n raise BuildError('Android arm64 minsizerel libonnxruntime.so size [' + str(bin_actual_size) +\n 'B] is bigger than threshold [' + str(bin_size_threshold) + 'B]')\n\n\ndef run_ios_tests(args, source_dir, config, cwd):\n cpr = run_subprocess([\"xcodebuild\", \"test\", \"-project\", \"./onnxruntime.xcodeproj\",\n \"-configuration\", config,\n \"-scheme\", \"onnxruntime_test_all_xc\", \"-destination\",\n \"platform=iOS Simulator,OS=latest,name=iPhone SE (2nd generation)\"], cwd=cwd)\n if cpr.returncode == 0:\n cpr = run_subprocess([\"xcodebuild\", \"test\", \"-project\", \"./onnxruntime.xcodeproj\",\n \"-configuration\", config,\n \"-scheme\", \"onnxruntime_shared_lib_test_xc\", \"-destination\",\n \"platform=iOS Simulator,OS=latest,name=iPhone SE (2nd generation)\"], cwd=cwd)\n cpr.check_returncode()\n\n\ndef run_orttraining_test_orttrainer_frontend_separately(cwd):\n class TestNameCollecterPlugin:\n def __init__(self):\n self.collected = set()\n\n def pytest_collection_modifyitems(self, items):\n for item in items:\n print('item.name: ', item.name)\n test_name = item.name\n start = test_name.find('[')\n if start > 0:\n test_name = test_name[:start]\n self.collected.add(test_name)\n\n import pytest\n\n plugin = TestNameCollecterPlugin()\n test_script_filename = os.path.join(cwd, \"orttraining_test_orttrainer_frontend.py\")\n pytest.main(['--collect-only', test_script_filename], plugins=[plugin])\n\n for test_name in plugin.collected:\n run_subprocess([\n sys.executable, '-m', 'pytest',\n 'orttraining_test_orttrainer_frontend.py', '-v', '-k', test_name], cwd=cwd)\n\n\ndef run_training_python_frontend_tests(cwd):\n run_subprocess([sys.executable, 'onnxruntime_test_ort_trainer.py'], cwd=cwd)\n run_subprocess([sys.executable, 'onnxruntime_test_training_unit_tests.py'], cwd=cwd)\n run_subprocess([\n sys.executable, 'orttraining_test_transformers.py',\n 'BertModelTest.test_for_pretraining_full_precision_list_input'], cwd=cwd)\n run_subprocess([\n sys.executable, 'orttraining_test_transformers.py',\n 'BertModelTest.test_for_pretraining_full_precision_dict_input'], cwd=cwd)\n run_subprocess([\n sys.executable, 'orttraining_test_transformers.py',\n 'BertModelTest.test_for_pretraining_full_precision_list_and_dict_input'], cwd=cwd)\n\n # TODO: use run_orttraining_test_orttrainer_frontend_separately to work around a sporadic segfault.\n # shall revert to run_subprocess call once the segfault issue is resolved.\n run_orttraining_test_orttrainer_frontend_separately(cwd)\n # run_subprocess([sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_orttrainer_frontend.py'], cwd=cwd)\n\n run_subprocess([sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_orttrainer_bert_toy_onnx.py'], cwd=cwd)\n\n\ndef run_training_python_frontend_e2e_tests(cwd):\n # frontend tests are to be added here:\n log.info(\"Running python frontend e2e tests.\")\n\n import torch\n ngpus = torch.cuda.device_count()\n if ngpus > 1:\n bert_pretrain_script = 'orttraining_run_bert_pretrain.py'\n log.debug('RUN: mpirun -n {} ''-x' 'NCCL_DEBUG=INFO'' {} {} {}'.format(\n ngpus, sys.executable, bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_throughput'))\n run_subprocess([\n 'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable,\n bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_throughput'], cwd=cwd)\n\n log.debug('RUN: mpirun -n {} ''-x' 'NCCL_DEBUG=INFO'' {} {} {}'.format(\n ngpus, sys.executable, bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_convergence'))\n run_subprocess([\n 'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable,\n bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_convergence'], cwd=cwd)\n\n # a long run\n log.debug('RUN: mpirun -n {} ''-x' 'NCCL_DEBUG=INFO'' {} {}'.format(\n ngpus, sys.executable, bert_pretrain_script))\n run_subprocess([\n 'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable,\n bert_pretrain_script], cwd=cwd)\n\n log.debug('RUN: mpirun -n {} {} orttraining_run_glue.py'.format(ngpus, sys.executable))\n run_subprocess([\n 'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable, 'orttraining_run_glue.py'], cwd=cwd)\n\n # with orttraining_run_glue.py.\n # 1. we like to force to use single GPU (with CUDA_VISIBLE_DEVICES)\n # for fine-tune tests.\n # 2. need to run test separately (not to mix between fp16\n # and full precision runs. this need to be investigated).\n run_subprocess(\n [sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_bert_with_mrpc', '-v'],\n cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})\n\n run_subprocess(\n [sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_bert_fp16_with_mrpc', '-v'],\n cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})\n\n run_subprocess(\n [sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_roberta_with_mrpc', '-v'],\n cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})\n\n run_subprocess(\n [sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_roberta_fp16_with_mrpc', '-v'],\n cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})\n\n run_subprocess(\n [sys.executable, 'orttraining_run_multiple_choice.py', 'ORTMultipleChoiceTest.test_bert_fp16_with_swag', '-v'],\n cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})\n\n run_subprocess([sys.executable, 'onnxruntime_test_ort_trainer_with_mixed_precision.py'], cwd=cwd)\n\n run_subprocess([\n sys.executable, 'orttraining_test_transformers.py',\n 'BertModelTest.test_for_pretraining_mixed_precision'], cwd=cwd)\n\n # this test is not stable. need to skip to unblock release\n # run_subprocess([\n # sys.executable, 'orttraining_test_transformers.py',\n # 'BertModelTest.test_for_pretraining_mixed_precision_with_gradient_accumulation'], cwd=cwd)\n\n\ndef run_training_pipeline_e2e_tests(cwd):\n # pipeline tests are to be added here:\n log.info(\"Running pipeline e2e tests.\")\n\n import torch\n ngpus = torch.cuda.device_count()\n\n command = ['./onnxruntime_training_bert',\n '--ort_log_severity', '1',\n '--optimizer=Lamb',\n '--learning_rate=3e-3',\n '--max_seq_length=128',\n '--max_predictions_per_seq=20',\n '--warmup_ratio=0.2843',\n '--warmup_mode=Poly',\n '--model_name', '/bert_ort/bert_models/nv/bert-large/' +\n 'bert-large-uncased_L_24_H_1024_A_16_V_30528_S_512_Dp_0.1_optimized_layer_norm_opset12',\n '--train_data_dir', '/bert_data/128/books_wiki_en_corpus/train',\n '--test_data_dir', '/bert_data/128/books_wiki_en_corpus/test',\n '--display_loss_steps', '1',\n '--use_nccl',\n '--use_mixed_precision',\n '--allreduce_in_fp16',\n '--gradient_accumulation_steps', '48',\n '--num_train_steps', '96',\n '--train_batch_size', '50']\n\n # TODO: currently the CI machine only has 4 GPUs for parallel tests.\n # Fill in more pipeline partition options when the machine has different GPUs counts.\n if ngpus != 4:\n return\n\n # Test 4-way pipeline parallel\n pp_command = ['mpirun', '-n', str(ngpus)] + command + ['--pipeline_parallel_size', '4', '--cut_group_info',\n '1149:407-1219/1341/1463/1585/1707/1829,' +\n '1881:407-1951/2073/2195/2317/2439/2561,' +\n '2613:407-2683/2805/2927/3049/3171/3293']\n command_str = ', '.join(pp_command)\n log.debug('RUN: ' + command_str)\n run_subprocess(pp_command, cwd=cwd)\n\n # Test 2-way data parallel + 2-way pipeline parallel\n pp_dp_command = ['mpirun', '-n', str(ngpus)]\n pp_dp_command = pp_dp_command + command\n pp_dp_command = pp_dp_command + ['--data_parallel_size', '2', '--pipeline_parallel_size',\n '2', '--cut_group_info',\n '1881:407-1951/2073/2195/2317/2439/2561/2683/2805/2927/3049/3171/3293']\n command_str = ', '.join(pp_dp_command)\n log.debug('RUN: ' + command_str)\n run_subprocess(pp_dp_command, cwd=cwd)\n\n\ndef run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs):\n for config in configs:\n log.info(\"Running tests for %s configuration\", config)\n cwd = get_config_build_dir(build_dir, config)\n\n if args.enable_training and args.use_cuda and args.enable_training_python_frontend_e2e_tests:\n # run frontend tests for orttraining-linux-gpu-frontend_test-ci-pipeline.\n # this is not a PR merge test so skip other non-frontend tests.\n run_training_python_frontend_e2e_tests(cwd=cwd)\n run_training_python_frontend_tests(cwd=cwd)\n continue\n\n if args.enable_training and args.use_cuda and args.enable_training_pipeline_e2e_tests:\n # run distributed pipeline test on 4-GPU CI machine.\n run_training_pipeline_e2e_tests(cwd=cwd)\n continue\n\n if args.android:\n run_android_tests(args, source_dir, config, cwd)\n continue\n elif args.ios:\n run_ios_tests(args, source_dir, config, cwd)\n continue\n dll_path_list = []\n if args.use_nuphar:\n dll_path_list.append(os.path.join(\n build_dir, config, \"external\", \"tvm\", config))\n if args.use_tensorrt:\n dll_path_list.append(os.path.join(args.tensorrt_home, 'lib'))\n if args.use_mklml:\n dll_path_list.append(os.path.join(build_dir, config, \"mklml\", \"src\", \"project_mklml\", \"lib\"))\n if not is_windows():\n # A workaround for making libonnxruntime_providers_shared.so loadable.\n dll_path_list.append(os.path.join(build_dir, config))\n\n dll_path = None\n if len(dll_path_list) > 0:\n dll_path = os.pathsep.join(dll_path_list)\n\n if ctest_path is None:\n # Get the \"Google Test Adapter\" for vstest.\n if not os.path.exists(os.path.join(cwd,\n 'googletestadapter.0.17.1')):\n run_subprocess(\n ['nuget.exe', 'restore',\n os.path.join(source_dir, 'packages.config'),\n '-ConfigFile', os.path.join(source_dir, 'NuGet.config'),\n '-PackagesDirectory', cwd])\n cwd2 = os.path.join(cwd, config)\n executables = ['onnxruntime_test_all.exe']\n if args.build_shared_lib:\n executables.append('onnxruntime_shared_lib_test.exe')\n executables.append('onnxruntime_global_thread_pools_test.exe')\n run_subprocess(\n ['vstest.console.exe', '--parallel',\n '--TestAdapterPath:..\\\\googletestadapter.0.17.1\\\\build\\\\_common', # noqa\n '/Logger:trx', '/Enablecodecoverage', '/Platform:x64',\n \"/Settings:%s\" % os.path.join(\n source_dir, 'cmake\\\\codeconv.runsettings')] + executables,\n cwd=cwd2, dll_path=dll_path)\n else:\n ctest_cmd = [ctest_path, \"--build-config\", config, \"--verbose\", \"--timeout\", \"3600\"]\n run_subprocess(ctest_cmd, cwd=cwd, dll_path=dll_path)\n\n if args.enable_pybind:\n # Disable python tests for TensorRT because many tests are\n # not supported yet.\n if args.use_tensorrt:\n return\n\n # Disable python tests in a reduced build as we don't know which ops have been included and which\n # models can run\n if args.include_ops_by_model or args.include_ops_by_config or args.minimal_build:\n return\n\n if is_windows():\n cwd = os.path.join(cwd, config)\n\n run_subprocess([sys.executable, 'onnxruntime_test_python.py'], cwd=cwd, dll_path=dll_path)\n\n if args.enable_symbolic_shape_infer_tests:\n run_subprocess([sys.executable, 'onnxruntime_test_python_symbolic_shape_infer.py'],\n cwd=cwd, dll_path=dll_path)\n\n # For CUDA enabled builds test IOBinding feature\n if args.use_cuda:\n # We need to have Torch installed to test the IOBinding feature\n # which currently uses Torch's allocator to allocate GPU memory for testing\n log.info(\"Testing IOBinding feature\")\n run_subprocess([sys.executable, 'onnxruntime_test_python_iobinding.py'], cwd=cwd, dll_path=dll_path)\n\n if not args.disable_ml_ops:\n run_subprocess([sys.executable, 'onnxruntime_test_python_mlops.py'], cwd=cwd, dll_path=dll_path)\n\n if args.enable_training and args.use_cuda:\n # run basic frontend tests\n run_training_python_frontend_tests(cwd=cwd)\n\n try:\n import onnx # noqa\n onnx_test = True\n except ImportError as error:\n log.exception(error)\n log.warning(\"onnx is not installed. The ONNX tests will be skipped.\")\n onnx_test = False\n\n if onnx_test:\n run_subprocess([sys.executable, 'onnxruntime_test_python_backend.py'], cwd=cwd, dll_path=dll_path)\n\n if not args.disable_ml_ops:\n run_subprocess([sys.executable, 'onnxruntime_test_python_backend_mlops.py'],\n cwd=cwd, dll_path=dll_path)\n\n run_subprocess([sys.executable,\n os.path.join(source_dir, 'onnxruntime', 'test', 'onnx', 'gen_test_models.py'),\n '--output_dir', 'test_models'], cwd=cwd)\n\n if not args.skip_onnx_tests:\n run_subprocess([os.path.join(cwd, 'onnx_test_runner'), 'test_models'], cwd=cwd)\n if config != 'Debug':\n run_subprocess([sys.executable, 'onnx_backend_test_series.py'], cwd=cwd, dll_path=dll_path)\n\n if not args.skip_keras_test:\n try:\n import onnxmltools # noqa\n import keras # noqa\n onnxml_test = True\n except ImportError:\n log.warning(\n \"onnxmltools and keras are not installed. \"\n \"The keras tests will be skipped.\")\n onnxml_test = False\n if onnxml_test:\n run_subprocess(\n [sys.executable, 'onnxruntime_test_python_keras.py'],\n cwd=cwd, dll_path=dll_path)\n\n\ndef nuphar_run_python_tests(build_dir, configs):\n \"\"\"nuphar temporary function for running python tests separately\n as it requires ONNX 1.5.0\n \"\"\"\n for config in configs:\n if config == 'Debug':\n continue\n cwd = get_config_build_dir(build_dir, config)\n if is_windows():\n cwd = os.path.join(cwd, config)\n dll_path = os.path.join(build_dir, config, \"external\", \"tvm\", config)\n # install onnx for shape inference in testing Nuphar scripts\n # this needs to happen after onnx_test_data preparation which\n # uses onnx 1.3.0\n run_subprocess(\n [sys.executable, '-m', 'pip', 'install', '--user', 'onnx==1.5.0'])\n run_subprocess(\n [sys.executable, 'onnxruntime_test_python_nuphar.py'],\n cwd=cwd, dll_path=dll_path)\n\n\ndef run_nodejs_tests(nodejs_binding_dir):\n args = ['npm', 'test', '--', '--timeout=2000']\n if is_windows():\n args = ['cmd', '/c'] + args\n run_subprocess(args, cwd=nodejs_binding_dir)\n\n\ndef build_python_wheel(\n source_dir, build_dir, configs, use_cuda, use_ngraph, use_dnnl,\n use_tensorrt, use_openvino, use_nuphar, use_vitisai, use_acl, use_armnn, use_dml,\n wheel_name_suffix, enable_training, nightly_build=False, featurizers_build=False, use_ninja=False):\n for config in configs:\n cwd = get_config_build_dir(build_dir, config)\n if is_windows() and not use_ninja:\n cwd = os.path.join(cwd, config)\n\n args = [sys.executable, os.path.join(source_dir, 'setup.py'),\n 'bdist_wheel']\n\n # We explicitly override the platform tag in the name of the generated build wheel\n # so that we can install the wheel on Mac OS X versions 10.12+.\n # Without this explicit override, we will something like this while building on MacOS 10.14 -\n # [WARNING] MACOSX_DEPLOYMENT_TARGET is set to a lower value (10.12)\n # than the version on which the Python interpreter was compiled (10.14) and will be ignored.\n # Since we need to support 10.12+, we explicitly override the platform tag.\n # See PR #3626 for more details\n if is_macOS():\n args += ['-p', 'macosx_10_12_x86_64']\n\n # Any combination of the following arguments can be applied\n if nightly_build:\n args.append('--nightly_build')\n if featurizers_build:\n args.append(\"--use_featurizers\")\n if wheel_name_suffix:\n args.append('--wheel_name_suffix={}'.format(wheel_name_suffix))\n if enable_training:\n args.append(\"--enable_training\")\n\n # The following arguments are mutually exclusive\n if use_tensorrt:\n args.append('--use_tensorrt')\n elif use_cuda:\n args.append('--use_cuda')\n elif use_ngraph:\n args.append('--use_ngraph')\n elif use_openvino:\n args.append('--use_openvino')\n elif use_dnnl:\n args.append('--use_dnnl')\n elif use_nuphar:\n args.append('--use_nuphar')\n elif use_vitisai:\n args.append('--use_vitisai')\n elif use_acl:\n args.append('--use_acl')\n elif use_armnn:\n args.append('--use_armnn')\n elif use_dml:\n args.append('--use_dml')\n\n run_subprocess(args, cwd=cwd)\n\n\ndef derive_linux_build_property():\n if is_windows():\n return \"/p:IsLinuxBuild=\\\"false\\\"\"\n else:\n return \"/p:IsLinuxBuild=\\\"true\\\"\"\n\n\ndef build_nuget_package(source_dir, build_dir, configs, use_cuda, use_openvino, use_tensorrt, use_dnnl, use_mklml):\n if not (is_windows() or is_linux()):\n raise BuildError(\n 'Currently csharp builds and nuget package creation is only supportted '\n 'on Windows and Linux platforms.')\n\n csharp_build_dir = os.path.join(source_dir, 'csharp')\n is_linux_build = derive_linux_build_property()\n\n # derive package name and execution provider based on the build args\n execution_provider = \"/p:ExecutionProvider=\\\"None\\\"\"\n package_name = \"/p:OrtPackageId=\\\"Microsoft.ML.OnnxRuntime\\\"\"\n if use_openvino:\n execution_provider = \"/p:ExecutionProvider=\\\"openvino\\\"\"\n package_name = \"/p:OrtPackageId=\\\"Microsoft.ML.OnnxRuntime.OpenVino\\\"\"\n elif use_tensorrt:\n execution_provider = \"/p:ExecutionProvider=\\\"tensorrt\\\"\"\n package_name = \"/p:OrtPackageId=\\\"Microsoft.ML.OnnxRuntime.TensorRT\\\"\"\n elif use_dnnl:\n execution_provider = \"/p:ExecutionProvider=\\\"dnnl\\\"\"\n package_name = \"/p:OrtPackageId=\\\"Microsoft.ML.OnnxRuntime.DNNL\\\"\"\n elif use_cuda:\n package_name = \"/p:OrtPackageId=\\\"Microsoft.ML.OnnxRuntime.Gpu\\\"\"\n elif use_mklml:\n package_name = \"/p:OrtPackageId=\\\"Microsoft.ML.OnnxRuntime.MKLML\\\"\"\n else:\n pass\n\n # set build directory based on build_dir arg\n native_dir = os.path.normpath(os.path.join(source_dir, build_dir))\n ort_build_dir = \"/p:OnnxRuntimeBuildDirectory=\\\"\" + native_dir + \"\\\"\"\n\n # dotnet restore\n cmd_args = [\"dotnet\", \"restore\", \"OnnxRuntime.CSharp.sln\", \"--configfile\", \"Nuget.CSharp.config\"]\n run_subprocess(cmd_args, cwd=csharp_build_dir)\n\n # build csharp bindings and create nuget package for each config\n for config in configs:\n if is_linux():\n native_build_dir = os.path.join(native_dir, config)\n cmd_args = [\"make\", \"install\", \"DESTDIR=.//nuget-staging\"]\n run_subprocess(cmd_args, cwd=native_build_dir)\n\n configuration = \"/p:Configuration=\\\"\" + config + \"\\\"\"\n\n cmd_args = [\"dotnet\", \"msbuild\", \"OnnxRuntime.CSharp.sln\", configuration, package_name, is_linux_build,\n ort_build_dir]\n run_subprocess(cmd_args, cwd=csharp_build_dir)\n\n cmd_args = [\n \"dotnet\", \"msbuild\", \"OnnxRuntime.CSharp.proj\", \"/t:CreatePackage\",\n package_name, configuration, execution_provider, is_linux_build, ort_build_dir]\n run_subprocess(cmd_args, cwd=csharp_build_dir)\n\n\ndef run_csharp_tests(source_dir, build_dir, use_cuda, use_openvino, use_tensorrt, use_dnnl):\n # Currently only running tests on windows.\n if not is_windows():\n return\n csharp_source_dir = os.path.join(source_dir, 'csharp')\n is_linux_build = derive_linux_build_property()\n\n # define macros based on build args\n macros = \"\"\n if use_openvino:\n macros += \"USE_OPENVINO;\"\n if use_tensorrt:\n macros += \"USE_TENSORRT;\"\n if use_dnnl:\n macros += \"USE_DNNL;\"\n if use_cuda:\n macros += \"USE_CUDA;\"\n\n define_constants = \"\"\n if macros != \"\":\n define_constants = \"/p:DefineConstants=\\\"\" + macros + \"\\\"\"\n\n # set build directory based on build_dir arg\n native_build_dir = os.path.normpath(os.path.join(source_dir, build_dir))\n ort_build_dir = \"/p:OnnxRuntimeBuildDirectory=\\\"\" + native_build_dir + \"\\\"\"\n\n # Skip pretrained models test. Only run unit tests as part of the build\n # add \"--verbosity\", \"detailed\" to this command if required\n cmd_args = [\"dotnet\", \"test\", \"test\\\\Microsoft.ML.OnnxRuntime.Tests\\\\Microsoft.ML.OnnxRuntime.Tests.csproj\",\n \"--filter\", \"FullyQualifiedName!=Microsoft.ML.OnnxRuntime.Tests.InferenceTest.TestPreTrainedModels\",\n is_linux_build, define_constants, ort_build_dir]\n run_subprocess(cmd_args, cwd=csharp_source_dir)\n\n\ndef build_protoc_for_host(cmake_path, source_dir, build_dir, args):\n if (args.arm or args.arm64 or args.enable_windows_store) and (not is_windows() and not args.ios):\n raise BuildError(\n 'Currently only support building protoc for Windows host while '\n 'cross-compiling for ARM/ARM64/Store and linux cross-compiling iOS')\n\n log.info(\n \"Building protoc for host to be used in cross-compiled build process\")\n protoc_build_dir = os.path.join(os.getcwd(), build_dir, 'host_protoc')\n os.makedirs(protoc_build_dir, exist_ok=True)\n # Generate step\n cmd_args = [\n cmake_path,\n os.path.join(source_dir, 'cmake', 'external', 'protobuf', 'cmake'),\n '-Dprotobuf_BUILD_TESTS=OFF',\n '-Dprotobuf_WITH_ZLIB_DEFAULT=OFF',\n '-Dprotobuf_BUILD_SHARED_LIBS=OFF'\n ]\n\n is_ninja = args.cmake_generator == 'Ninja'\n if args.cmake_generator is not None and not (is_macOS() and args.use_xcode):\n cmd_args += ['-G', args.cmake_generator]\n if is_windows():\n if not is_ninja:\n cmd_args += ['-T', 'host=x64']\n elif is_macOS():\n if args.use_xcode:\n cmd_args += ['-G', 'Xcode']\n # CMake < 3.18 has a bug setting system arch to arm64 (if not specified) for Xcode 12,\n # protoc for host should be built using host architecture\n # Explicitly specify the CMAKE_OSX_ARCHITECTURES for x86_64 Mac.\n import platform\n if platform.machine() == 'x86_64':\n cmd_args += ['-DCMAKE_OSX_ARCHITECTURES=x86_64']\n\n run_subprocess(cmd_args, cwd=protoc_build_dir)\n # Build step\n cmd_args = [cmake_path,\n \"--build\", protoc_build_dir,\n \"--config\", \"Release\",\n \"--target\", \"protoc\"]\n run_subprocess(cmd_args)\n\n # Absolute protoc path is needed for cmake\n config_dir = ''\n suffix = ''\n\n if (is_windows() and not is_ninja) or (is_macOS() and args.use_xcode):\n config_dir = 'Release'\n\n if is_windows():\n suffix = '.exe'\n\n expected_protoc_path = os.path.join(protoc_build_dir, config_dir, 'protoc' + suffix)\n\n if not os.path.exists(expected_protoc_path):\n raise BuildError(\"Couldn't find {}. Host build of protoc failed.\".format(expected_protoc_path))\n\n return expected_protoc_path\n\n\ndef generate_documentation(source_dir, build_dir, configs):\n operator_doc_path = os.path.join(source_dir, 'docs', 'ContribOperators.md')\n opkernel_doc_path = os.path.join(source_dir, 'docs', 'OperatorKernels.md')\n for config in configs:\n # Copy the gen_contrib_doc.py.\n shutil.copy(\n os.path.join(source_dir, 'tools', 'python', 'gen_contrib_doc.py'),\n os.path.join(build_dir, config))\n shutil.copy(\n os.path.join(source_dir, 'tools', 'python', 'gen_opkernel_doc.py'),\n os.path.join(build_dir, config))\n run_subprocess(\n [sys.executable,\n 'gen_contrib_doc.py',\n '--output_path', operator_doc_path],\n cwd=os.path.join(build_dir, config))\n run_subprocess(\n [sys.executable,\n 'gen_opkernel_doc.py',\n '--output_path', opkernel_doc_path],\n cwd=os.path.join(build_dir, config))\n docdiff = ''\n try:\n docdiff = subprocess.check_output(['git', 'diff', opkernel_doc_path])\n except subprocess.CalledProcessError:\n print('git diff returned non-zero error code')\n if len(docdiff) > 0:\n # Show warning instead of throwing exception, because it is\n # dependent on build configuration for including\n # execution propviders\n log.warning(\n 'The updated opkernel document file ' + str(opkernel_doc_path) +\n ' is different from the checked in version. Consider '\n 'regenerating the file with CPU, DNNL and CUDA providers enabled.')\n log.debug('diff:\\n' + str(docdiff))\n\n docdiff = ''\n try:\n docdiff = subprocess.check_output(['git', 'diff', operator_doc_path])\n except subprocess.CalledProcessError:\n print('git diff returned non-zero error code')\n if len(docdiff) > 0:\n raise BuildError(\n 'The updated operator document file ' +\n str(operator_doc_path) + ' must be checked in.\\n diff:\\n' +\n str(docdiff))\n\n\ndef main():\n args = parse_arguments()\n cmake_extra_defines = (args.cmake_extra_defines\n if args.cmake_extra_defines else [])\n cross_compiling = args.arm or args.arm64 or args.android\n\n # If there was no explicit argument saying what to do, default\n # to update, build and test (for native builds).\n if not (args.update or args.clean or args.build or args.test):\n log.debug(\n \"Defaulting to running update, build \"\n \"[and test for native builds].\")\n args.update = True\n args.build = True\n if cross_compiling:\n args.test = args.android_abi == 'x86_64' or args.android_abi == 'arm64-v8a'\n else:\n args.test = True\n\n if args.skip_tests:\n args.test = False\n\n if args.include_ops_by_model or args.include_ops_by_config:\n from exclude_unused_ops import exclude_unused_ops\n models_path = args.include_ops_by_model if args.include_ops_by_model else ''\n config_path = args.include_ops_by_config if args.include_ops_by_config else ''\n exclude_unused_ops(models_path, config_path, use_cuda=args.use_cuda)\n\n if args.use_tensorrt:\n args.use_cuda = True\n\n if args.build_wheel or args.gen_doc:\n args.enable_pybind = True\n\n if args.build_csharp or args.build_nuget or args.build_java or args.build_nodejs:\n args.build_shared_lib = True\n\n if args.build_nuget and cross_compiling:\n raise BuildError('Currently nuget package creation is not supported while cross-compiling')\n\n if args.enable_pybind and args.disable_exceptions:\n raise BuildError('Python bindings require exceptions to be enabled.')\n\n if args.minimal_build and args.disable_ort_format_load:\n raise BuildError('Minimal build requires loading ORT format models.')\n\n # Disabling unit tests for VAD-F as FPGA only supports\n # models with NCHW layout\n if args.use_openvino == \"VAD-F_FP32\":\n args.test = False\n\n configs = set(args.config)\n\n # setup paths and directories\n cmake_path = resolve_executable_path(args.cmake_path)\n ctest_path = None if args.use_vstest else resolve_executable_path(\n args.ctest_path)\n build_dir = args.build_dir\n script_dir = os.path.realpath(os.path.dirname(__file__))\n source_dir = os.path.normpath(os.path.join(script_dir, \"..\", \"..\"))\n\n # if using cuda, setup cuda paths and env vars\n cuda_home, cudnn_home = setup_cuda_vars(args)\n\n mpi_home = args.mpi_home\n nccl_home = args.nccl_home\n\n # if using tensorrt, setup tensorrt paths\n tensorrt_home = setup_tensorrt_vars(args)\n\n # if using migraphx, setup migraphx paths\n migraphx_home = setup_migraphx_vars(args)\n\n os.makedirs(build_dir, exist_ok=True)\n\n log.info(\"Build started\")\n if args.update:\n cmake_extra_args = []\n path_to_protoc_exe = args.path_to_protoc_exe\n if not args.skip_submodule_sync:\n update_submodules(source_dir)\n if is_windows():\n if args.cmake_generator == 'Ninja':\n if args.x86 or args.arm or args.arm64:\n raise BuildError(\n \"To cross-compile with Ninja, load the toolset \"\n \"environment for the target processor (e.g. Cross \"\n \"Tools Command Prompt for VS)\")\n cmake_extra_args = ['-G', args.cmake_generator]\n elif args.x86:\n cmake_extra_args = [\n '-A', 'Win32', '-T', 'host=x64', '-G', args.cmake_generator\n ]\n elif args.arm or args.arm64:\n # Cross-compiling for ARM(64) architecture\n # First build protoc for host to use during cross-compilation\n if path_to_protoc_exe is None:\n path_to_protoc_exe = build_protoc_for_host(\n cmake_path, source_dir, build_dir, args)\n if args.arm:\n cmake_extra_args = ['-A', 'ARM']\n else:\n cmake_extra_args = ['-A', 'ARM64']\n cmake_extra_args += ['-G', args.cmake_generator]\n # Cannot test on host build machine for cross-compiled\n # builds (Override any user-defined behaviour for test if any)\n if args.test:\n log.info(\n \"Cannot test on host build machine for cross-compiled \"\n \"ARM(64) builds. Will skip test running after build.\")\n args.test = False\n else:\n if (args.msvc_toolset == '14.16' and\n args.cmake_generator == 'Visual Studio 16 2019'):\n # CUDA 10.0 requires _MSC_VER >= 1700 and\n # _MSC_VER < 1920, aka Visual Studio version\n # in [2012, 2019). In VS2019, we have to use\n # Side-by-side minor version MSVC toolsets from\n # Visual Studio 2017 14.16 is MSVC version\n # 141 is MSVC Toolset Version\n # Cuda VS extension should be installed to\n # C:\\Program Files (x86)\\Microsoft Visual\n # Studio\\2019\\Enterprise\\MSBuild\\Microsoft\\VC\\v160\\BuildCustomizations # noqa\n toolset = 'v141,host=x64,version=' + args.msvc_toolset\n elif args.msvc_toolset:\n toolset = 'host=x64,version=' + args.msvc_toolset\n else:\n toolset = 'host=x64'\n if args.cuda_version:\n toolset += ',cuda=' + args.cuda_version\n cmake_extra_args = [\n '-A', 'x64', '-T', toolset, '-G', args.cmake_generator\n ]\n if args.enable_windows_store:\n cmake_extra_args.append(\n '-DCMAKE_TOOLCHAIN_FILE=' + os.path.join(\n source_dir, 'cmake', 'store_toolchain.cmake'))\n if args.enable_wcos:\n cmake_extra_args.append('-DCMAKE_USER_MAKE_RULES_OVERRIDE=wcos_rules_override.cmake')\n elif args.cmake_generator is not None and not (is_macOS() and args.use_xcode):\n cmake_extra_args += ['-G', args.cmake_generator]\n elif is_macOS() and args.use_xcode:\n cmake_extra_args += ['-G', 'Xcode']\n\n if (args.android or args.ios or args.enable_windows_store) and args.path_to_protoc_exe is None:\n # Cross-compiling for Android and iOS\n path_to_protoc_exe = build_protoc_for_host(\n cmake_path, source_dir, build_dir, args)\n\n if is_ubuntu_1604():\n if (args.arm or args.arm64):\n raise BuildError(\n \"Only Windows ARM(64) cross-compiled builds supported \"\n \"currently through this script\")\n install_ubuntu_deps(args)\n if not is_docker() and not args.use_acl and not args.use_armnn:\n install_python_deps()\n if args.enable_pybind and is_windows():\n install_python_deps(args.numpy_version)\n if args.enable_onnx_tests:\n setup_test_data(build_dir, configs)\n generate_build_tree(\n cmake_path, source_dir, build_dir, cuda_home, cudnn_home, mpi_home, nccl_home,\n tensorrt_home, migraphx_home, path_to_protoc_exe, configs, cmake_extra_defines,\n args, cmake_extra_args)\n\n if args.clean:\n clean_targets(cmake_path, build_dir, configs)\n\n # if using DML, perform initial nuget package restore\n setup_dml_build(args, cmake_path, build_dir, configs)\n\n if args.build:\n build_targets(args, cmake_path, build_dir, configs, args.parallel, args.target)\n\n if args.test:\n run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs)\n\n # run nuphar python tests last, as it installs ONNX 1.5.0\n if args.enable_pybind and not args.skip_onnx_tests and args.use_nuphar:\n nuphar_run_python_tests(build_dir, configs)\n\n # run node.js binding tests\n if args.build_nodejs and not args.skip_nodejs_tests:\n nodejs_binding_dir = os.path.normpath(os.path.join(source_dir, \"nodejs\"))\n run_nodejs_tests(nodejs_binding_dir)\n\n if args.build:\n if args.build_wheel:\n nightly_build = bool(os.getenv('NIGHTLY_BUILD') == '1')\n build_python_wheel(\n source_dir,\n build_dir,\n configs,\n args.use_cuda,\n args.use_ngraph,\n args.use_dnnl,\n args.use_tensorrt,\n args.use_openvino,\n args.use_nuphar,\n args.use_vitisai,\n args.use_acl,\n args.use_armnn,\n args.use_dml,\n args.wheel_name_suffix,\n args.enable_training,\n nightly_build=nightly_build,\n featurizers_build=args.use_featurizers,\n use_ninja=(args.cmake_generator == 'Ninja')\n )\n if args.build_nuget:\n build_nuget_package(\n source_dir,\n build_dir,\n configs,\n args.use_cuda,\n args.use_openvino,\n args.use_tensorrt,\n args.use_dnnl,\n args.use_mklml\n )\n\n if args.test and args.build_nuget:\n run_csharp_tests(\n source_dir,\n build_dir,\n args.use_cuda,\n args.use_openvino,\n args.use_tensorrt,\n args.use_dnnl)\n\n if args.gen_doc and (args.build or args.test):\n generate_documentation(source_dir, build_dir, configs)\n\n log.info(\"Build complete\")\n\n\nif __name__ == \"__main__\":\n try:\n sys.exit(main())\n except BaseError as e:\n log.error(str(e))\n sys.exit(1)\n" ]
[ [ "torch.cuda.device_count" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
insilicomedicine/TRIP
[ "5e7b9da298aa47a71c71e1144ff1d8e538dbccaa", "5e7b9da298aa47a71c71e1144ff1d8e538dbccaa" ]
[ "core/learnable_priors/normal_prior.py", "core/generative_models/gans/wgan.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.distributions import MultivariateNormal\n\nclass Normal(nn.Module):\n def __init__(self, num_vars=100):\n super(Normal, self).__init__()\n\n self.num_vars = num_vars\n\n self.means = nn.Parameter(torch.zeros(num_vars))\n self.std = nn.Parameter(torch.eye(num_vars))\n\n def log_prob(self, x):\n distr = MultivariateNormal(self.means, self.std)\n return distr.log_prob(x)\n\n def sample(self, num_samples):\n distr = MultivariateNormal(self.means, self.std)\n return distr.sample_n(num_samples)\n", "import torch\nimport torch.nn as nn\nfrom torch import autograd\nimport torch.optim as optim\n\nfrom ...utils import TrainStats\n\nclass WGAN(nn.Module):\n def __init__(self, gen, discr, prior, n_critic=5, gamma=1, gp=True,\n device='cpu'):\n super(WGAN, self).__init__()\n\n self.gen = gen\n self.discr = discr\n self.prior = prior\n\n self.gamma = gamma\n\n self.n_critic = n_critic\n\n self.gp = gp\n\n self.device = device\n\n def get_losses(self, x, compute_reinforce=False):\n # get generator samples\n sampled_latents = self.prior.sample(x.shape[0])\n sampled_latents = sampled_latents.detach()\n sampled_images = self.gen(sampled_latents)\n\n # get discriminator outputs\n real_discr = self.discr(x)\n fake_discr = self.discr(sampled_images)\n\n # compute gradient penalties\n if self.gp:\n alphas = torch.rand(x.shape[0], 1, 1, 1).repeat(1, x.shape[1],\n x.shape[2],\n x.shape[3])\n alphas = alphas.to(self.device)\n int_points = alphas * sampled_images + (1 - alphas) * x\n int_points_discr = self.discr(int_points)\n\n gradients = autograd.grad(outputs=int_points_discr, inputs=int_points,\n grad_outputs=torch.ones(\n int_points_discr.size()).to(self.device),\n create_graph=True, retain_graph=True,\n only_inputs=True)[0]\n\n grad_norm = ((gradients.norm(2, dim=1) - 1) ** 2).mean()\n\n # compute reinforce loss\n if compute_reinforce:\n rews = (fake_discr - fake_discr.mean()).detach()\n rews = rews / rews.std()\n lp_loss = -(rews * self.prior.log_prob(sampled_latents)).mean()\n else:\n lp_loss = torch.zeros(1).mean()\n\n # compute losses\n gen_loss = -fake_discr.mean()\n discr_loss = -(\n real_discr.mean() - fake_discr.mean())\n\n if self.gp:\n discr_loss = discr_loss + self.gamma * grad_norm\n\n return gen_loss, \\\n discr_loss, \\\n lp_loss, \\\n {\n 'gen_loss': gen_loss.detach().cpu().numpy(),\n 'discr_loss': discr_loss.detach().cpu().numpy(),\n 'lp_loss': lp_loss.detach().cpu().numpy(),\n 'grad_norm': grad_norm.detach().cpu().numpy()\n }\n\n def make_training(self, train_loader, global_stats=None, num_iterations=20000, verbose_step=50,\n train_lp=True, lr=1e-4, lp_lr=1e-4):\n gen_optimizer = optim.Adam(self.gen.parameters(), lr=lr, betas=(0.5, .9))\n discr_optimizer = optim.Adam(self.discr.parameters(), lr=lr,\n betas=(0.5, .9))\n lp_optimizer = optim.Adam(self.prior.parameters(), lr=lp_lr)\n\n local_stats = TrainStats()\n\n cur_iteration = 0\n\n epoch_i = 0\n while cur_iteration < num_iterations:\n i = 0\n\n print(\"Epoch\", epoch_i, \":\")\n for x_batch, _ in train_loader:\n x_batch = x_batch.to(self.device)\n\n print(\"!\", end='')\n i += 1\n\n gen_loss, discr_loss, lp_loss, cur_stats = self.get_losses(\n x_batch, (i % self.n_critic == 0) and train_lp)\n local_stats.update(cur_stats)\n if global_stats is not None:\n global_stats.update(cur_stats)\n\n if i % self.n_critic == 0:\n gen_optimizer.zero_grad()\n gen_loss.backward()\n gen_optimizer.step()\n\n if train_lp:\n lp_optimizer.zero_grad()\n lp_loss.backward()\n lp_optimizer.step()\n\n self.prior.stabilize()\n else:\n discr_optimizer.zero_grad()\n discr_loss.backward()\n discr_optimizer.step()\n\n cur_iteration += 1\n if cur_iteration >= num_iterations:\n break\n\n if i % verbose_step == 0:\n local_stats.print()\n local_stats.reset()\n i = 0\n\n epoch_i += 1\n if i > 0:\n local_stats.print()\n local_stats.reset()\n\n return global_stats\n\n def sample(self, num_samples):\n z = self.prior.sample(num_samples)\n samples = self.gen(z)\n return samples.detach().cpu().numpy()" ]
[ [ "torch.distributions.MultivariateNormal", "torch.eye", "torch.zeros" ], [ "torch.rand", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ejjaffe/dit
[ "c9d206f03d1de5a0a298b1d0ea9d79ea5e789ee1", "c9d206f03d1de5a0a298b1d0ea9d79ea5e789ee1", "c9d206f03d1de5a0a298b1d0ea9d79ea5e789ee1", "c9d206f03d1de5a0a298b1d0ea9d79ea5e789ee1" ]
[ "dit/math/ops.py", "dit/divergences/maximum_correlation.py", "tests/test_bgm.py", "dit/other/renyi_entropy.py" ]
[ "\"\"\"\nClasses to contextualize math operations in log vs linear space.\n\n\"\"\"\nfrom types import MethodType\n\nimport numpy as np\n\nfrom ..exceptions import InvalidBase\n\n\n__all__ = (\n 'get_ops',\n 'LinearOperations',\n 'LogOperations',\n)\n\n\n# For 2.x, these are ascii strings. For 3.x these are unicode strings.\nacceptable_base_strings = {'linear', 'e'}\n\n\ndef get_ops(base):\n \"\"\"\n Returns an *Operations instance, depending on the base.\n\n Parameters\n ----------\n base : float, 'linear', 'e'\n The base for the Operations instance.\n\n \"\"\"\n # Let's not initialize unless we have to.\n if base in cache:\n ops = cache[base]\n else:\n # This assumes that 'linear' is in cache.\n ops = LogOperations(base)\n cache[base] = ops\n return ops\n\n\ndef exp_func(b):\n \"\"\"\n Returns a base-`b` exponential function.\n\n Parameters\n ----------\n b : positive float or 'e'\n The base of the desired exponential function.\n\n Returns\n -------\n exp : function\n The base-`b` exponential function. The returned function will operate\n elementwise on NumPy arrays, but note, it is not a ufunc.\n\n Examples\n --------\n >>> exp2 = exp_func(2)\n >>> exp2(1)\n 2.0\n >>> exp3 = exp_func(3)\n >>> exp3(1)\n 3.0\n\n Raises\n ------\n InvalidBase\n If the base is less than zero or equal to one.\n\n \"\"\"\n from dit.utils import is_string_like\n\n if is_string_like(b) and b not in acceptable_base_strings:\n raise InvalidBase(msg=b)\n\n if b == 'linear':\n exp = lambda x: x # pragma: no branch\n elif b == 2:\n exp = np.exp2\n elif b == 10:\n exp = lambda x: 10**x\n elif b == 'e' or np.isclose(b, np.e):\n exp = np.exp\n else:\n if b <= 0 or b == 1:\n raise InvalidBase(b)\n\n def exp(x, base=b):\n \"\"\"\n Return `base`**`x`\n\n Parameters\n ----------\n x : float\n The number to exponentiate\n base : float\n The base of the exponential\n\n Returns\n -------\n p : float\n `base`**`x`\n \"\"\"\n return base**np.asarray(x)\n\n return exp\n\n\ndef log_func(b):\n \"\"\"\n Returns a base-`b` logarithm function.\n\n Parameters\n ----------\n b : positive float or 'e'\n The base of the desired logarithm function.\n\n Returns\n -------\n log : function\n The base-`b` logarithm function. The returned function will operate\n elementwise on NumPy arrays, but note, it is not a ufunc.\n\n Examples\n --------\n >>> log2 = log_func(2)\n >>> log2(2)\n 1.0\n >>> log3 = log_func(3)\n >>> log3(3)\n 1.0\n\n Raises\n ------\n InvalidBase\n If the base is less than zero or equal to one.\n\n \"\"\"\n from dit.utils import is_string_like\n\n if is_string_like(b) and b not in acceptable_base_strings:\n raise InvalidBase(msg=b)\n\n if b == 'linear':\n log = lambda x: x # pragma: no branch\n elif b == 2:\n log = np.log2\n elif b == 10:\n log = np.log10\n elif b == 'e' or np.isclose(b, np.e):\n log = np.log\n else:\n if b <= 0 or b == 1:\n raise InvalidBase(b)\n\n Z = np.log(b)\n\n def log(x, func=np.log):\n \"\"\"\n Return the log of `x`\n\n Parameters\n ----------\n x : float\n The value to take the log of\n func : function\n A logarithm function\n\n Returns\n -------\n log : float\n The logarithm of `x` in base `b` (from outer scope)\n \"\"\"\n return func(x) / Z\n\n return log\n\n\nclass Operations(object):\n \"\"\"\n Base class which implements certain math operations.\n\n For example, regular addition with log probabilities is handled specially.\n\n While we could implement many more operations, we do not. Their usage\n is uncommon and their implementation would be slower as well. For example,\n subtraction with log probabailities must go as:\n\n .. math::\n log_2(x-y) = log_2(x) + log_2(1 - 2^[ log_2(y) - log_2(x) ])\n\n Note that if :math:`y > x`, then :math:`log(y) > log(x)` and the inner term\n of the second logarithm will be less than 0, yielding NaN.\n\n \"\"\"\n\n ### Do we allow base == 'e' or should we convert to its numerical value?\n ### Ans: We store whatever was specified but provide get_base() with an\n ### option to return a numerical base.\n\n one = None\n zero = None\n base = None\n exp = None\n log = None\n\n def get_base(self, numerical=False):\n \"\"\"\n Returns the base in which operations take place.\n\n For linear-based operations, the result is 'linear'.\n\n Parameters\n ----------\n numerical : bool\n If `True`, then if the base is 'e', it is returned as a float.\n\n \"\"\"\n if numerical and self.base == 'e':\n base = np.exp(1)\n else:\n base = self.base\n return base\n\n def is_null(self, p):\n \"\"\"\n Returns `True` if `p` is a null probability.\n\n Parameters\n ----------\n p : float\n The probability to be tested.\n\n \"\"\"\n return np.isclose(self.zero, p)\n\n def is_null_exact(self, p):\n \"\"\"\n Returns `True` if `p` is exactly a null probability.\n\n Parameters\n ----------\n p : float\n The probability to be tested.\n\n \"\"\"\n return self.zero == p\n\n def add(self, x, y):\n \"\"\" Abstract base class \"\"\"\n raise NotImplementedError\n\n def add_inplace(self, x, y):\n \"\"\" Abstract base class \"\"\"\n raise NotImplementedError\n\n def add_reduce(self, x):\n \"\"\" Abstract base class \"\"\"\n raise NotImplementedError\n\n def mult(self, x, y):\n \"\"\" Abstract base class \"\"\"\n raise NotImplementedError\n\n def mult_inplace(self, x, y):\n \"\"\" Abstract base class \"\"\"\n raise NotImplementedError\n\n def mult_reduce(self, x):\n \"\"\" Abstract base class \"\"\"\n raise NotImplementedError\n\n def invert(self, x):\n \"\"\" Abstract base class \"\"\"\n raise NotImplementedError\n\n def normalize(self, x):\n \"\"\" Abstract base class \"\"\"\n raise NotImplementedError\n\n\nclass LinearOperations(Operations):\n \"\"\"\n The class of operations on linear values.\n \"\"\"\n\n one = 1\n zero = 0\n base = 'linear'\n\n # If the functions below are standard Python functions (as opposed to\n # NumPy ufuncs), then they will be treated as unbound methods for the class.\n # During instantiation, they are bound to the instance (since before\n # instantiation they are class methods) and thus, we are left with\n # bound methods (undesirably). If we had modified these attributes in the\n # __init__ function, then they would not be bound (or even unbound methods)\n # but functions instead (desirably). This is precisely what LogOperations\n # does, which is why it does not have this issue. An alternative approach\n # is to explicitly declare these functions to be static methods, as we\n # do below.\n #\n exp = staticmethod(exp_func(base))\n log = staticmethod(log_func(base))\n\n def add(self, x, y):\n \"\"\"\n Add the arrays element-wise. Neither x nor y will be modified.\n\n Assumption: :math:`y >= 0`.\n\n Operation: :math:`z[i] = x[i] + y[i]`\n\n Parameters\n ----------\n x, y : NumPy arrays, shape (n,)\n The arrays to add.\n\n Returns\n -------\n z : NumPy array, shape (n,)\n The resultant array.\n\n \"\"\"\n z = x + y\n return z\n\n def add_inplace(self, x, y):\n \"\"\"\n Adds `y` to `x`, in-place. `x` will be modified, but `y` will not.\n\n Assumption: :math:`y >= 0`.\n\n Operation: :math:`x[i] += y[i]`\n\n Parameters\n ----------\n x, y : NumPy arrays, shape (n,)\n The arrays to add.\n\n Returns\n -------\n x : NumPy array, shape (n,)\n The resultant array.\n\n \"\"\"\n x += y\n return x\n\n def add_reduce(self, x, axis=None):\n \"\"\"\n Performs an `addition' reduction on `x`.\n\n Assumption: :math:`y >= 0`.\n\n Operation: :math:`z = \\\\sum_i x[i]`\n\n Returns\n -------\n z : float\n The summation of the elements in `x`.\n\n \"\"\"\n z = x.sum(axis=axis)\n return z\n\n def mult(self, x, y):\n \"\"\"\n Multiplies the arrays element-wise. Neither x nor y will be modified.\n\n Operation: :math:`z[i] = x[i] * y[i]`\n\n Parameters\n ----------\n x, y : NumPy arrays, shape (n,)\n The arrays to multiply.\n\n Returns\n -------\n z : NumPy array, shape (n,)\n The resultant array.\n\n \"\"\"\n z = x * y\n return z\n\n def mult_inplace(self, x, y):\n \"\"\"\n Multiplies `y` to `x`, in-place. `x` will be modified, but `y` will not.\n\n Operation: :math:`x[i] *= y[i]`\n\n Parameters\n ----------\n x, y : NumPy arrays, shape (n,)\n The arrays to multiply.\n\n Returns\n -------\n x : NumPy array, shape (n,)\n The resultant array.\n\n \"\"\"\n x *= y\n return x\n\n def mult_reduce(self, x, axis=None):\n \"\"\"\n Performs an `multiplication' reduction on `x`.\n\n Operation: :math:`z = \\\\prod_i x[i]`\n\n Returns\n -------\n z : float\n The product of the elements in `x`.\n\n \"\"\"\n z = np.prod(x, axis=axis)\n return z\n\n def invert(self, x):\n \"\"\"\n Returns the element-wise multiplicative inverse of x.\n\n Operation: :math:`z[i] = 1/x[i]`\n\n Parameters\n ----------\n x : NumPy array, shape (n,)\n The array to invert.\n\n Returns\n -------\n z : NumPy array, shape (n,)\n The inverted array.\n\n \"\"\"\n z = 1 / x\n return z\n\n def normalize(self, x, axis=None):\n \"\"\"\n Returns a normalized version of x.\n\n Operation: :math:`z[i] = x[i] / sum(x)`\n\n If x is 2D and axis is None, then normalization is over all elements.\n Use axis=-1 to normalize each row of x.\n\n Parameters\n ----------\n x : NumPy array, shape (n,)\n The array to normalize.\n\n Returns\n -------\n z : NumPy array, shape (n,)\n The normalized array.\n\n \"\"\"\n z = x / x.sum(axis=None)\n return z\n\n\ndef set_add(ops):\n \"\"\"\n Set the add method on the LogOperations instance.\n\n \"\"\"\n # To preserve numerical accuracy, we must make use of a logaddexp\n # function. These functions only exist in Numpy for base-e and base-2.\n # For all other bases, we must convert and then convert back.\n\n # In each case, we use default arguments to make the function that we\n # are calling 'local'.\n base = ops.base\n if base == 2:\n def add(self, x, y, func=np.logaddexp2):\n return func(x, y)\n elif base == 'e' or np.isclose(base, np.e):\n def add(self, x, y, func=np.logaddexp):\n return func(x, y)\n else:\n # No need to optimize this...\n def add(self, x, y):\n # Convert log_b probabilities to log_2 probabilities.\n x2 = x * np.log2(base)\n y2 = y * np.log2(base)\n z = np.logaddexp2(x2, y2)\n # Convert log_2 probabilities to log_b probabilities.\n z *= self.log(2)\n return z\n\n add.__doc__ = \"\"\"\n Add the arrays element-wise. Neither x nor y will be modified.\n\n Assumption: y <= 0.\n\n Parameters\n ----------\n x, y : NumPy arrays, shape (n,)\n The arrays to add.\n\n Returns\n -------\n z : NumPy array, shape (n,)\n The resultant array.\n\n \"\"\"\n ops.add = MethodType(add, ops)\n\n\ndef set_add_inplace(ops):\n \"\"\"\n Set the add_inplace method on the LogOperations instance.\n\n \"\"\"\n base = ops.base\n if base == 2:\n def add_inplace(self, x, y, func=np.logaddexp2):\n return func(x, y, x)\n elif base == 'e' or np.isclose(base, np.e):\n def add_inplace(self, x, y, func=np.logaddexp):\n return func(x, y, x)\n else:\n def add_inplace(self, x, y):\n x *= np.log2(base)\n y2 = y * np.log2(base)\n np.logaddexp2(x, y2, x)\n x *= self.log(2)\n return x\n\n add_inplace.__doc__ = \"\"\"\n Adds `y` to `x`, in-place. `x` will be modified, but `y` will not.\n\n Assumption: :math:`y <= 0`.\n\n Parameters\n ----------\n x, y : NumPy arrays, shape (n,)\n The arrays to add.\n\n Returns\n -------\n x : NumPy array, shape (n,)\n The resultant array.\n\n \"\"\"\n ops.add_inplace = MethodType(add_inplace, ops)\n\n\ndef set_add_reduce(ops):\n \"\"\"\n Set the add_reduce method on the LogOperations instance.\n\n \"\"\"\n # https://github.com/numpy/numpy/issues/4599\n base = ops.base\n if base == 2:\n def add_reduce(self, x, axis=None, func=np.logaddexp2):\n if len(x) == 0:\n # Since logaddexp.identity is None, we handle it separately.\n z = self.zero\n else:\n # Note, we are converting to a NumPy array, if necessary.\n z = func.reduce(x, axis=axis, dtype=float)\n return z\n\n elif base == 'e' or np.isclose(base, np.e):\n def add_reduce(self, x, axis=None, func=np.logaddexp):\n if len(x) == 0:\n # Since logaddexp.identity is None, we handle it separately.\n z = self.zero\n else:\n # Note, we are converting to a NumPy array, if necessary.\n z = func.reduce(x, axis=axis, dtype=float)\n return z\n\n else:\n def add_reduce(self, x, axis=None):\n if len(x) == 0:\n # Since logaddexp.identity is None, we handle it separately.\n z = self.zero\n else:\n # Note, we are converting to a NumPy array, if necessary.\n # Change the base-2, add, and then convert back.\n x2 = x * np.log2(base)\n z = np.logaddexp2.reduce(x2, axis=axis, dtype=float)\n z /= np.log2(base)\n return z\n\n add_reduce.__doc__ = \"\"\"\n Performs an `addition' reduction on `x`.\n\n Assumption: :math:`y <= 0`.\n\n Returns\n -------\n z : float\n The summation of the elements in `x`.\n\n \"\"\"\n ops.add_reduce = MethodType(add_reduce, ops)\n\n\nclass LogOperations(Operations):\n\n one = None\n zero = None\n base = None\n exp = None\n log = None\n\n def __init__(self, base):\n \"\"\"\n Initialize the log operation manager.\n\n Parameters\n ----------\n base : float\n The base of the logarithm.\n\n \"\"\"\n self.set_base(base)\n\n def set_base(self, base):\n \"\"\"\n Change the base of the logarithm.\n\n Parameters\n ----------\n base : float\n The base of the logarithm.\n\n \"\"\"\n self.base = base\n self.exp = exp_func(base)\n self.log = log_func(base)\n # Note: When base < 1, zero == +inf. When base > 1, zero == -inf.\n self.one = self.log(1)\n self.zero = self.log(0)\n\n # Update the add methods.\n set_add(self)\n set_add_inplace(self)\n set_add_reduce(self)\n\n def mult(self, x, y):\n \"\"\"\n Multiplies the arrays element-wise. Neither `x` nor `y` will be modified.\n\n Parameters\n ----------\n x, y : NumPy arrays, shape (n,)\n The arrays to multiply.\n\n Returns\n -------\n z : NumPy array, shape (n,)\n The resultant array.\n\n \"\"\"\n z = x + y\n return z\n\n def mult_inplace(self, x, y):\n \"\"\"\n Multiplies `y` to `x`, in-place. `x` will be modified, but `y` will not.\n\n Parameters\n ----------\n x, y : NumPy arrays, shape (n,)\n The arrays to multiply.\n\n Returns\n -------\n x : NumPy array, shape (n,)\n The resultant array.\n\n \"\"\"\n x += y\n return x\n\n def mult_reduce(self, x, axis=None):\n \"\"\"\n Performs an `multiplication' reduction on `x`.\n\n Returns\n -------\n z : float\n The product of the elements in `x`.\n\n \"\"\"\n # The identity for addition in NumPy is zero.\n # This corresponds to an identity of 1 for log operations, and this is\n # exactly the desired identity for multiplying probabilities.\n z = x.sum(axis=axis)\n return z\n\n def invert(self, x):\n \"\"\"\n Returns the element-wise multiplicative inverse of `x`: :math:`1/x`.\n\n Parameters\n ----------\n x : NumPy array, shape (n,)\n The array to invert.\n\n Returns\n -------\n z : NumPy array, shape (n,)\n The inverted array.\n\n \"\"\"\n z = -x\n return z\n\n def normalize(self, x, axis=None):\n \"\"\"\n Returns a normalized version of `x`.\n\n Non-log equivalent operation: :math:`z[i] = x[i] / sum(x)`\n\n If `x` is 2D and axis is None, then normalization is over all elements.\n Use axis=-1 to normalize each row of `x`.\n\n Parameters\n ----------\n x : NumPy array, shape (n,)\n The array to normalize.\n\n Returns\n -------\n z : NumPy array, shape (n,)\n The normalized array.\n\n \"\"\"\n # The API way would be: mult(x, invert( add_reduce(x) ))\n # We'll avoid some of those function calls.\n z = x - self.add_reduce(x, axis=axis)\n return z\n\n\ncache = {\n 'linear': LinearOperations(),\n 2: LogOperations(2),\n 'e': LogOperations('e')\n}\n", "\"\"\"\nCompute the maximum correlation:\n.. math::\n \\rho(X:Y) = max_{f, g} E(f(X)g(Y))\n\"\"\"\n\nimport numpy as np\n\nfrom ..exceptions import ditException\nfrom ..helpers import normalize_rvs\n\n\n__all__ = (\n 'conditional_maximum_correlation_pmf',\n 'maximum_correlation',\n 'maximum_correlation_pmf',\n)\n\n\nsvdvals = lambda m: np.linalg.svd(m, compute_uv=False)\n\n\ndef conditional_maximum_correlation_pmf(pmf):\n \"\"\"\n Compute the conditional maximum correlation from a 3-dimensional\n pmf. The maximum correlation is computed between the first two dimensions\n given the third.\n\n Parameters\n ----------\n pmf : np.ndarray\n The probability distribution.\n\n Returns\n -------\n rho_max : float\n The conditional maximum correlation.\n \"\"\"\n pXYgZ = pmf / pmf.sum(axis=(0, 1), keepdims=True)\n pXgZ = pXYgZ.sum(axis=1, keepdims=True)\n pYgZ = pXYgZ.sum(axis=0, keepdims=True)\n Q = np.where(pmf, pXYgZ / (np.sqrt(pXgZ) * np.sqrt(pYgZ)), 0)\n Q[np.isnan(Q)] = 0\n\n rho_max = max(svdvals(np.squeeze(m))[1] for m in np.dsplit(Q, Q.shape[2]))\n\n return rho_max\n\n\ndef maximum_correlation_pmf(pXY):\n \"\"\"\n Compute the maximum correlation from a 2-dimensional\n pmf. The maximum correlation is computed between the two dimensions.\n\n Parameters\n ----------\n pmf : np.ndarray\n The probability distribution.\n\n Returns\n -------\n rho_max : float\n The maximum correlation.\n \"\"\"\n pX = pXY.sum(axis=1, keepdims=True)\n pY = pXY.sum(axis=0, keepdims=True)\n Q = pXY / (np.sqrt(pX) * np.sqrt(pY))\n Q[np.isnan(Q)] = 0\n\n rho_max = svdvals(Q)[1]\n\n return rho_max\n\n\ndef maximum_correlation(dist, rvs=None, crvs=None, rv_mode=None):\n \"\"\"\n Compute the (conditional) maximum or Renyi correlation between two variables:\n\n .. math::\n rho_max = max_{f, g} rho(f(X,Z), g(Y,Z) | Z)\n\n Parameters\n ----------\n dist : Distribution\n The distribution for which the maximum correlation is to computed.\n rvs : list, None; len(rvs) == 2\n A list of lists. Each inner list specifies the indexes of the random\n variables for which the maximum correlation is to be computed. If None,\n then all random variables are used, which is equivalent to passing\n `rvs=dist.rvs`.\n crvs : list, None\n A single list of indexes specifying the random variables to\n condition on. If None, then no variables are conditioned on.\n rv_mode : str, None\n Specifies how to interpret `rvs` and `crvs`. Valid options are:\n {'indices', 'names'}. If equal to 'indices', then the elements of\n `crvs` and `rvs` are interpreted as random variable indices. If\n equal to 'names', the the elements are interpreted as random\n variable names. If `None`, then the value of `dist._rv_mode` is\n consulted, which defaults to 'indices'.\n\n Returns\n -------\n rho_max : float; -1 <= rho_max <= 1\n The conditional maximum correlation between `rvs` given `crvs`.\n \"\"\"\n rvs, crvs, rv_mode = normalize_rvs(dist, rvs, crvs, rv_mode)\n\n if len(rvs) != 2:\n msg = 'Maximum correlation can only be computed for 2 variables, not {}.'.format(len(rvs))\n raise ditException(msg)\n\n if crvs:\n dist = dist.copy().coalesce(rvs + [crvs])\n else:\n dist = dist.copy().coalesce(rvs)\n\n dist.make_dense()\n pmf = dist.pmf.reshape(list(map(len, dist.alphabet)))\n\n if crvs:\n rho_max = conditional_maximum_correlation_pmf(pmf)\n else:\n rho_max = maximum_correlation_pmf(pmf)\n\n return rho_max\n", "\"\"\"\nTests for dit.bgm.\n\"\"\"\n\nimport pytest\n\nimport numpy as np\nimport dit\nimport networkx as nx\n\n\ndef test_distribution_from_bayesnet_nonames():\n # Smoke test without rv names.\n x = nx.DiGraph()\n d = dit.example_dists.Xor()\n cdist, dists = d.condition_on([0, 1])\n x.add_edge(0, 2)\n x.add_edge(1, 2)\n x.nodes[2]['dist'] = (cdist.outcomes, dists)\n x.nodes[0]['dist'] = cdist.marginal([0])\n x.nodes[1]['dist'] = cdist.marginal([1])\n d2 = dit.distribution_from_bayesnet(x)\n d3 = dit.distribution_from_bayesnet(x, [0, 1, 2])\n assert d.is_approx_equal(d2)\n assert d.is_approx_equal(d3)\n\n # Use a dictionary too\n x.nodes[2]['dist'] = dict(zip(cdist.outcomes, dists))\n d4 = dit.distribution_from_bayesnet(x)\n assert d.is_approx_equal(d4)\n\n del x.nodes[1]['dist']\n with pytest.raises(ValueError):\n dit.distribution_from_bayesnet(x)\n\n\ndef test_distribution_from_bayesnet_names():\n # Smoke test with rv names.\n x = nx.DiGraph()\n d = dit.example_dists.Xor()\n d.set_rv_names(['A', 'B', 'C'])\n cdist, dists = d.condition_on(['A', 'B'])\n x.add_edge('A', 'C')\n x.add_edge('B', 'C')\n x.nodes['C']['dist'] = (cdist.outcomes, dists)\n x.nodes['A']['dist'] = cdist.marginal(['A'])\n x.nodes['B']['dist'] = cdist.marginal(['B'])\n d2 = dit.distribution_from_bayesnet(x)\n assert d.is_approx_equal(d2)\n\n # Specify names\n d3 = dit.distribution_from_bayesnet(x, ['A', 'B', 'C'])\n assert d.is_approx_equal(d2)\n\n # Test with a non-Cartesian product distribution\n dd = x.nodes['B']['dist']\n dd._sample_space = dit.SampleSpace(list(dd.sample_space()))\n d3 = dit.distribution_from_bayesnet(x)\n assert d.is_approx_equal(d3)\n\n\ndef test_distribution_from_bayesnet_func():\n # Smoke test with distributions as functions.\n x = nx.DiGraph()\n x.add_edge('A', 'C')\n x.add_edge('B', 'C')\n\n d = dit.example_dists.Xor()\n sample_space = d._sample_space\n\n def uniform(node_val, parents):\n return 0.5\n\n def xor(node_val, parents):\n if parents['A'] != parents['B']:\n output = '1'\n else:\n output = '0'\n\n # If output agrees with passed in output, p = 1\n p = int(output == node_val)\n\n return p\n\n x.nodes['C']['dist'] = xor\n x.nodes['A']['dist'] = uniform\n x.nodes['B']['dist'] = uniform\n # Samplespace is required when functions are callable.\n with pytest.raises(ValueError):\n dit.distribution_from_bayesnet(x)\n\n d2 = dit.distribution_from_bayesnet(x, sample_space=sample_space)\n assert d.is_approx_equal(d2)\n\n ss = ['000', '001', '010', '011', '100', '101', '110', '111']\n d3 = dit.distribution_from_bayesnet(x, sample_space=ss)\n # Can't test using is_approx_equal, since one has SampleSpace and the\n # other has CartesianProduct as sample spaces. So is_approx_equal would\n # always be False.\n assert np.allclose(d.pmf, d3.pmf)\n assert list(d.sample_space()) == list(d3.sample_space())\n\n\ndef test_distribution_from_bayesnet_error():\n # Test distribution_from_bayesnet with functions and distributions.\n # This is not allowed and should fail.\n\n x = nx.DiGraph()\n x.add_edge('A', 'C')\n x.add_edge('B', 'C')\n\n d = dit.example_dists.Xor()\n sample_space = d._sample_space\n\n def uniform(node_val, parents):\n return 0.5\n\n unif = dit.Distribution('01', [.5, .5])\n unif.set_rv_names('A')\n\n x.nodes['C']['dist'] = uniform\n x.nodes['A']['dist'] = unif\n x.nodes['B']['dist'] = uniform\n\n with pytest.raises(Exception):\n dit.distribution_from_bayesnet(x, sample_space=sample_space)\n\n\ndef test_bad_names1():\n x = nx.DiGraph()\n d = dit.example_dists.Xor()\n cdist, dists = d.condition_on([0, 1])\n x.add_edge(0, 2)\n x.add_edge(1, 2)\n x.nodes[2]['dist'] = (cdist.outcomes, dists)\n x.nodes[0]['dist'] = cdist.marginal([0])\n x.nodes[1]['dist'] = cdist.marginal([1])\n with pytest.raises(ValueError):\n dit.distribution_from_bayesnet(x, [0, 1])\n with pytest.raises(ValueError):\n dit.distribution_from_bayesnet(x, [0, 1, 1])\n with pytest.raises(ValueError):\n dit.distribution_from_bayesnet(x, [0, 1, 2, 3])\n with pytest.raises(ValueError):\n dit.distribution_from_bayesnet(x, [0, 1, 2, 2])\n\n\ndef test_bad_names2():\n \"\"\"\n Now the distributions have bad names.\n\n \"\"\"\n x = nx.DiGraph()\n d = dit.example_dists.Xor()\n cdist, dists = d.condition_on([0, 1])\n x.add_edge(0, 2)\n x.add_edge(1, 2)\n\n # Node 2 should have more than one dist. If we pass just a distribution in,\n # as if it had no parents, then an exception should raise.\n\n # x.nodes[2]['dist'] = (cdist.outcomes, dists)\n x.nodes[2]['dist'] = cdist.marginal([0])\n x.nodes[0]['dist'] = cdist.marginal([0])\n x.nodes[1]['dist'] = cdist.marginal([1])\n with pytest.raises(Exception):\n dit.distribution_from_bayesnet(x)\n\n # If they don't have the same length, it's invalid too.\n x.nodes[2]['dist'] = (cdist.outcomes, dists[:0])\n with pytest.raises(Exception):\n dit.distribution_from_bayesnet(x)\n", "\"\"\"\nRenyi Entropy.\n\"\"\"\n\nimport numpy as np\n\nfrom ..helpers import normalize_rvs\nfrom ..utils import flatten\nfrom ..multivariate import entropy\n\n\n__all__ = (\n 'renyi_entropy',\n)\n\n\ndef renyi_entropy(dist, order, rvs=None, rv_mode=None):\n \"\"\"\n Compute the Renyi entropy of order `order`.\n\n Parameters\n ----------\n dist : Distribution\n The distribution to take the Renyi entropy of.\n order : float >= 0\n The order of the Renyi entropy.\n rvs : list, None\n The indexes of the random variable used to calculate the Renyi entropy\n of. If None, then the Renyi entropy is calculated over all random\n variables.\n rv_mode : str, None\n Specifies how to interpret `rvs` and `crvs`. Valid options are:\n {'indices', 'names'}. If equal to 'indices', then the elements of\n `crvs` and `rvs` are interpreted as random variable indices. If equal\n to 'names', the the elements are interpreted as random variable names.\n If `None`, then the value of `dist._rv_mode` is consulted, which\n defaults to 'indices'.\n\n Returns\n -------\n H_a : float\n The Renyi entropy.\n\n Raises\n ------\n ditException\n Raised if `rvs` or `crvs` contain non-existant random variables.\n ValueError\n Raised if `order` is not a non-negative float.\n\n \"\"\"\n if order < 0:\n msg = \"`order` must be a non-negative real number\"\n raise ValueError(msg)\n\n if dist.is_joint and rvs is not None:\n rvs = list(flatten(normalize_rvs(dist, rvs, None, rv_mode)[0]))\n dist = dist.marginal(rvs, rv_mode)\n\n pmf = dist.pmf\n\n if order == 0:\n H_a = np.log2(pmf.size)\n elif order == 1:\n H_a = entropy(dist)\n elif order == np.inf:\n H_a = -np.log2(pmf.max())\n else:\n H_a = 1 / (1 - order) * np.log2((pmf**order).sum())\n\n return H_a\n" ]
[ [ "numpy.log", "numpy.log2", "numpy.asarray", "numpy.logaddexp2.reduce", "numpy.prod", "numpy.logaddexp2", "numpy.exp", "numpy.isclose" ], [ "numpy.linalg.svd", "numpy.sqrt", "numpy.isnan", "numpy.squeeze", "numpy.dsplit" ], [ "numpy.allclose" ], [ "numpy.log2" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NingAnMe/voxelmorph
[ "3a1a4c2f456af2dba5552efc1b08c68af38e54dc" ]
[ "scripts/sphere/register.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\nExample script to register two volumes with VoxelMorph models.\n\nPlease make sure to use trained models appropriately. Let's say we have a model trained to register \na scan (moving) to an atlas (fixed). To register a scan to the atlas and save the warp field, run:\n\n register.py --moving moving.nii.gz --fixed fixed.nii.gz --model model.pt \n --moved moved.nii.gz --warp warp.nii.gz\n\nThe source and target input images are expected to be affinely registered.\n\nIf you use this code, please cite the following, and read function docs for further info/citations\n VoxelMorph: A Learning Framework for Deformable Medical Image Registration \n G. Balakrishnan, A. Zhao, M. R. Sabuncu, J. Guttag, A.V. Dalca. \n IEEE TMI: Transactions on Medical Imaging. 38(8). pp 1788-1800. 2019. \n\n or\n\n Unsupervised Learning for Probabilistic Diffeomorphic Registration for Images and Surfaces\n A.V. Dalca, G. Balakrishnan, J. Guttag, M.R. Sabuncu. \n MedIA: Medical Image Analysis. (57). pp 226-236, 2019 \n\nCopyright 2020 Adrian V. Dalca\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in \ncompliance with the License. You may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is\ndistributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or \nimplied. See the License for the specific language governing permissions and limitations under \nthe License.\n\"\"\"\n\nimport os\nimport argparse\nimport matplotlib.pyplot as plt\n# third party\nimport numpy as np\nimport nibabel as nib\nimport torch\nfrom scipy.interpolate import RegularGridInterpolator\nfrom astropy.coordinates import cartesian_to_spherical, spherical_to_cartesian\n\n# import voxelmorph with sphere backend\nos.environ['VXM_BACKEND'] = 'sphere'\nimport voxelmorph as vxm # nopep8\nimport math\n\n# parse commandline args\nparser = argparse.ArgumentParser()\nparser.add_argument('--moving', required=True, help='moving image (source) filename')\nparser.add_argument('--fixed', required=True, help='fixed image (target) filename')\nparser.add_argument('--moved', help='warped image output filename')\nparser.add_argument('--model', required=True, help='pytorch model for nonlinear registration')\n# parser.add_argument('--normalize_type', default='std', help='select the data normalization processing type')\nparser.add_argument('--warp', help='output warp deformation filename')\nparser.add_argument('--sphere_sub', help='sphere_sub image filename')\nparser.add_argument('--sphere_atlas', help='sphere_atlas image filename')\nparser.add_argument('--sphere_reg', help='sphere.reg image output filename')\nparser.add_argument('--sulc_sub', help='silc_sub image filename')\nparser.add_argument('--sulc_atlas', help='silc_atlas image filename')\nparser.add_argument('--sphere_freesurfer', help='sphere_freesurfer image filename')\nparser.add_argument('--plot_image', help='show time image output filename')\nparser.add_argument('--plot_image_dif_1', help='show dif image output filename')\nparser.add_argument('--plot_image_dif_2', help='show dif image output filename')\nparser.add_argument('-g', '--gpu', help='GPU number(s) - if not supplied, CPU is used')\nparser.add_argument('--multichannel', action='store_true',\n help='specify that data has multiple channels')\n\nargs = parser.parse_args()\n\n\ndef meannormalize(sub_data):\n mean = np.mean(sub_data)\n std = np.std(sub_data)\n norm = (sub_data - mean) / std\n return norm, mean, std\n\n\ndef backmeannormalize(input, mean, std):\n output = input * std + mean\n return output\n\n\ndef minmaxnormalize(sub_data):\n zeros = sub_data == 0\n max = np.max(sub_data)\n min = np.min(sub_data)\n norm = (sub_data - min) / (max - min)\n norm[zeros] = 0\n return norm\n\n\ndef backminmaxnormalize(input, max, min):\n output = input * (max - min) + min\n return output\n\n\ndef domainnorm(sub_data):\n domain = 33\n norm = sub_data / domain\n return norm\n\n\ndef backdomainnorm(sub_data):\n domain = 33\n output = sub_data * domain\n return output\n\n\n# def normalize_forword(data, type=\"std\"):\n# if type == \"std\":\n# return meannormalize(data)\n# elif type == \"min_max\":\n# return minmaxnormalize(data)\n# else:\n# raise KeyError(\"type is error\")\n#\n# def normalize_backword(data, a, b, type=\"std\"):\n# if type == \"std\":\n# return backmeannormalize(data, a, b)\n# elif type == \"min_max\":\n# return backminmaxnormalize(data, a, b)\n# else:\n# raise KeyError(\"type is error\")\n\ndef interpolate(warp_file, lh_sphere):\n x = np.linspace(-128, 128, 256) # phi ###\n y = np.linspace(0, 512, 512) # theta ###\n\n # print(warp_file.files)\n warp = warp_file.squeeze()\n warp = warp.permute(0, 2, 1)\n warp = warp.detach().numpy()\n # warp = warp_file['vol']\n # warp = np.moveaxis(warp, 1, -1)\n\n interpolate_function_x = RegularGridInterpolator((x, y), -warp[0]) # x-axis\n interpolate_function_y = RegularGridInterpolator((x, y), -warp[1]) # y-axis\n\n coords, faces = nib.freesurfer.read_geometry(lh_sphere)\n r, phi, theta = cartesian_to_spherical(coords[:, 0], coords[:, 1], coords[:, 2])\n p = phi.degree\n t = theta.degree\n\n theta_bins = 512\n phi_bins = 256\n theta_width = math.degrees(2 * np.pi) / theta_bins\n t /= theta_width\n phi_width = math.degrees(np.pi) / phi_bins\n p /= phi_width\n t = t.reshape(-1, 1)\n p = p.reshape(-1, 1)\n pts = np.concatenate((p, t), axis=1)\n\n new_pts_x = interpolate_function_x(pts)\n new_pts_y = interpolate_function_y(pts)\n x_prime = pts.T[0] + new_pts_x\n y_prime = pts.T[1] + new_pts_y\n\n x_prime *= phi_width\n y_prime *= theta_width\n y_prime = np.clip(y_prime, 0, 360)\n x_prime = np.clip(x_prime, -90, 90)\n\n t_prime = [math.radians(i) for i in y_prime]\n p_prime = [math.radians(i) for i in x_prime]\n t_prime = np.array(t_prime)\n p_prime = np.array(p_prime)\n\n return r, p_prime, t_prime\n\n# save 4 image\ndef save4image(lh_sphere_sub, lh_sphere_atlas, lh_sulc_sub, lh_sulc_atlas, lh_sphere_freesurfer, phi_prime, theta_prime,\n imagesavefilename):\n lh_morph_sulc_sub = nib.freesurfer.read_morph_data(lh_sulc_sub)\n lh_morph_sulc_atlas = nib.freesurfer.read_morph_data(lh_sulc_atlas)\n\n coords_sub, faces_sub = nib.freesurfer.read_geometry(lh_sphere_sub)\n r_sub, phi_sub, theta_sub = cartesian_to_spherical(coords_sub[:, 0], coords_sub[:, 1], coords_sub[:, 2])\n coords_atlas, faces_atlas = nib.freesurfer.read_geometry(lh_sphere_atlas)\n r_atlas, phi_atlas, theta_atlas = cartesian_to_spherical(coords_atlas[:, 0], coords_atlas[:, 1], coords_atlas[:, 2])\n coords_freesurfer, faces_freesurfer = nib.freesurfer.read_geometry(lh_sphere_freesurfer)\n r_reg, phi_reg, theta_reg = cartesian_to_spherical(coords_freesurfer[:, 0], coords_freesurfer[:, 1],\n coords_freesurfer[:, 2])\n\n fig = plt.figure(figsize=(14, 7))\n ax = fig.add_subplot(141)\n ax.scatter(phi_sub.degree, theta_sub.degree, s=0.1,\n c=lh_morph_sulc_sub) # phi.degree: [-90, 90], theta.degree: [0, 360]\n plt.title('Moving')\n\n ax = fig.add_subplot(142)\n ax.scatter(phi_atlas.degree, theta_atlas.degree, s=0.1, c=lh_morph_sulc_atlas)\n plt.title('Fixed')\n\n ax = fig.add_subplot(143)\n phi_prime = [math.degrees(p) for p in phi_prime]\n thtea_prime = [math.degrees(t) for t in theta_prime]\n ax.scatter(phi_prime, thtea_prime, s=0.1, c=lh_morph_sulc_sub) # (256, 512)\n plt.title('Moved')\n\n ax = fig.add_subplot(144)\n ax.scatter(phi_reg.degree, theta_reg.degree, s=0.1, c=lh_morph_sulc_sub) # (256, 512)\n plt.title('Moved FreeSurfer')\n\n plt.savefig(imagesavefilename)\n\n\ndef xyz2degree(lh_sphere, lh_sulc):\n # coords: return (x, y, z) coordinates\n # faces: defining mesh triangles\n coords, faces = nib.freesurfer.read_geometry(lh_sphere)\n\n # (r: radius, phi: latitude, theta: longitude) in radians\n r, phi, theta = cartesian_to_spherical(coords[:, 0], coords[:, 1], coords[:, 2])\n\n lat = phi.degree + 90\n lon = theta.degree\n # resize to (512, 256)\n y_bins = 512\n x_bins = 256\n y_width = math.degrees(2 * np.pi) / y_bins\n ys = lon // y_width\n x_width = math.degrees(np.pi) / x_bins\n xs = lat // x_width\n\n ys = np.clip(ys, 0, 511)\n xs = np.clip(xs, 0, 255)\n\n # load curv and sulc info\n lh_morph_sulc = nib.freesurfer.read_morph_data(lh_sulc)\n xs = xs.astype(np.int32)\n ys = ys.astype(np.int32)\n\n # values store [theta, phi, sulc value, curv value]\n values = np.zeros((512, 256))\n values[ys, xs] = lh_morph_sulc\n # values[1, ys, xs] = lh_morph_curv\n\n return values\n\ndef xyz2degree2(phi, theta, lh_sulc):\n\n lat = phi + 90\n lon = theta\n # resize to (512, 256)\n y_bins = 512\n x_bins = 256\n y_width = math.degrees(2 * np.pi) / y_bins\n ys = lon // y_width\n x_width = math.degrees(np.pi) / x_bins\n xs = lat // x_width\n\n ys = np.clip(ys, 0, 511)\n xs = np.clip(xs, 0, 255)\n\n # load curv and sulc info\n lh_morph_sulc = nib.freesurfer.read_morph_data(lh_sulc)\n xs = xs.astype(np.int32)\n ys = ys.astype(np.int32)\n\n # values store [theta, phi, sulc value, curv value]\n values = np.zeros((512, 256))\n values[ys, xs] = lh_morph_sulc\n # values[1, ys, xs] = lh_morph_curv\n\n return values\n\n# device handling\nif args.gpu and (args.gpu != '-1'):\n device = 'cuda'\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\nelse:\n device = 'cpu'\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n# load moving and fixed images\nadd_feat_axis = not args.multichannel\nmoving = vxm.py.utils.load_volfile(args.moving, add_batch_axis=True, add_feat_axis=add_feat_axis)\nfixed, fixed_affine = vxm.py.utils.load_volfile(\n args.fixed, add_batch_axis=True, add_feat_axis=add_feat_axis, ret_affine=True)\n\n# load and set up model\nmodel = vxm.networks.VxmDense.load(args.model, device)\nmodel.to(device)\nmodel.eval()\n\n# set up normalize type\n# normalize_type = args.normalize_type\n# normalize_type = \"min_max\"\n\n# set up tensors and permute\n# moving, a_moving, b_moving = normalize_forword(moving, type=normalize_type)\n# fixed, a_fixed, b_fixed = normalize_forword(fixed, type=normalize_type)\n\n# moving = domainnorm(moving)\nmoving = minmaxnormalize(moving)\nfixed = minmaxnormalize(fixed)\n\ninput_moving = torch.from_numpy(moving).to(device).float().permute(0, 3, 1, 2)\ninput_fixed = torch.from_numpy(fixed).to(device).float().permute(0, 3, 1, 2)\n\n# predict\nmoved, warp = model(input_moving, input_fixed, registration=True)\n# moved = normalize_backword(moved, a_moving, b_moving, type=normalize_type)\n# moved = backdomainnorm(moved)\n\n\nif args.sphere_sub:\n c, faces = nib.freesurfer.read_geometry(args.sphere_sub)\n coords = np.empty(shape=c.shape)\n r, phi_prime, theta_prime = interpolate(warp, args.sphere_sub)\n coords[:, 0], coords[:, 1], coords[:, 2] = spherical_to_cartesian(r, phi_prime, theta_prime)\n nib.freesurfer.io.write_geometry(args.sphere_reg, coords, faces)\n\nif args.plot_image:\n lh_sphere_sub = args.sphere_sub\n lh_sphere_atlas = args.sphere_atlas\n lh_sulc_sub = args.sulc_sub\n lh_sulc_atlas = args.sulc_atlas\n lh_sphere_freesurfer = args.sphere_freesurfer\n imagesavefilename = args.plot_image\n save4image(lh_sphere_sub, lh_sphere_atlas, lh_sulc_sub, lh_sulc_atlas, lh_sphere_freesurfer, phi_prime, theta_prime,\n imagesavefilename)\nif args.plot_image_dif_1 or args.plot_image_dif_2:\n imagesavefilenamedif_1 = args.plot_image_dif_1\n imagesavefilenamedif_2 = args.plot_image_dif_2\n dif_moving = xyz2degree(lh_sphere_sub, lh_sulc_sub)\n dif_moved = xyz2degree2(phi_prime, theta_prime, lh_sulc_sub)\n dif_freesurfer = xyz2degree(lh_sphere_freesurfer, lh_sulc_sub)\n dif_moved_moving = dif_moved - dif_moving\n print(np.nanmax(dif_moved_moving), np.nanmin(dif_moved_moving), np.nanmean(dif_moved_moving))\n dif_freesurfer_moved = dif_freesurfer - dif_moved\n\n plt.figure(figsize=(14, 7))\n plt.imshow(dif_moved_moving)\n plt.title('moved_moving')\n plt.colorbar()\n plt.savefig(imagesavefilenamedif_1)\n\n plt.figure(figsize=(14, 7))\n plt.imshow(dif_freesurfer_moved)\n plt.title('freesurfer_moved')\n plt.colorbar()\n plt.savefig(imagesavefilenamedif_2)\n\n\n# save moved image\nif args.moved:\n moved = moved.detach().cpu().numpy().squeeze()\n vxm.py.utils.save_volfile(moved, args.moved, fixed_affine)\n\n# save warp\nif args.warp:\n warp = warp.detach().cpu().numpy().squeeze()\n vxm.py.utils.save_volfile(warp, args.warp, fixed_affine)\n" ]
[ [ "numpy.nanmax", "matplotlib.pyplot.imshow", "numpy.linspace", "numpy.nanmin", "numpy.concatenate", "numpy.max", "numpy.mean", "numpy.nanmean", "numpy.clip", "scipy.interpolate.RegularGridInterpolator", "torch.from_numpy", "numpy.std", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "numpy.min", "matplotlib.pyplot.savefig", "numpy.array", "matplotlib.pyplot.colorbar", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.14", "1.6", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
meretp/pymor
[ "01876cd39e04bec6d4299f36b59663cd08beafd3", "0965a5c3d0725466103efae5190493fceb2bf441", "0965a5c3d0725466103efae5190493fceb2bf441", "01876cd39e04bec6d4299f36b59663cd08beafd3" ]
[ "src/pymor/reductors/residual.py", "src/pymor/bindings/ngsolve.py", "src/pymor/algorithms/lrradi.py", "src/pymortests/randrangefinder.py" ]
[ "# This file is part of the pyMOR project (https://www.pymor.org).\n# Copyright 2013-2021 pyMOR developers and contributors. All rights reserved.\n# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)\n\nimport numpy as np\n\nfrom pymor.algorithms.image import estimate_image_hierarchical\nfrom pymor.algorithms.projection import project, project_to_subbasis\nfrom pymor.core.base import BasicObject\nfrom pymor.core.exceptions import ImageCollectionError\nfrom pymor.operators.constructions import ZeroOperator\nfrom pymor.operators.interface import Operator\n\n\nclass ResidualReductor(BasicObject):\n \"\"\"Generic reduced basis residual reductor.\n\n Given an operator and a right-hand side, the residual is given by::\n\n residual.apply(U, mu) == operator.apply(U, mu) - rhs.as_range_array(mu)\n\n When operator maps to functionals instead of vectors, we are interested in the Riesz\n representative of the residual::\n\n residual.apply(U, mu)\n == product.apply_inverse(operator.apply(U, mu) - rhs.as_range_array(mu))\n\n Given a basis `RB` of a subspace of the source space of `operator`, this reductor\n uses :func:`~pymor.algorithms.image.estimate_image_hierarchical` to determine\n a low-dimensional subspace containing the image of the subspace under\n `residual` (resp. `riesz_residual`), computes an orthonormal basis\n `residual_range` for this range space and then returns the Petrov-Galerkin projection ::\n\n projected_residual\n == project(residual, range_basis=residual_range, source_basis=RB)\n\n of the residual operator. Given a reduced basis coefficient vector `u`, w.r.t.\n `RB`, the (dual) norm of the residual can then be computed as ::\n\n projected_residual.apply(u, mu).norm()\n\n Moreover, a `reconstruct` method is provided such that ::\n\n residual_reductor.reconstruct(projected_residual.apply(u, mu))\n == residual.apply(RB.lincomb(u), mu)\n\n Parameters\n ----------\n RB\n |VectorArray| containing a basis of the reduced space onto which to project.\n operator\n See definition of `residual`.\n rhs\n See definition of `residual`. If `None`, zero right-hand side is assumed.\n product\n Inner product |Operator| w.r.t. which to orthonormalize and w.r.t. which to\n compute the Riesz representatives in case `operator` maps to functionals.\n riesz_representatives\n If `True` compute the Riesz representative of the residual.\n \"\"\"\n\n def __init__(self, RB, operator, rhs=None, product=None, riesz_representatives=False):\n assert RB in operator.source\n assert rhs is None \\\n or (rhs.source.is_scalar and rhs.range == operator.range and rhs.linear)\n assert product is None or product.source == product.range == operator.range\n\n self.__auto_init(locals())\n self.residual_range = operator.range.empty()\n self.residual_range_dims = []\n\n def reduce(self):\n if self.residual_range is not False:\n with self.logger.block('Estimating residual range ...'):\n try:\n self.residual_range, self.residual_range_dims = \\\n estimate_image_hierarchical([self.operator], [self.rhs],\n self.RB,\n (self.residual_range, self.residual_range_dims),\n orthonormalize=True, product=self.product,\n riesz_representatives=self.riesz_representatives)\n except ImageCollectionError as e:\n self.logger.warning(f'Cannot compute range of {e.op}. Evaluation will be slow.')\n self.residual_range = False\n\n if self.residual_range is False:\n operator = project(self.operator, None, self.RB)\n return NonProjectedResidualOperator(operator, self.rhs, self.riesz_representatives, self.product)\n\n with self.logger.block('Projecting residual operator ...'):\n if self.riesz_representatives:\n operator = project(self.operator, self.residual_range, self.RB, product=None) # the product cancels out\n rhs = project(self.rhs, self.residual_range, None, product=None)\n else:\n operator = project(self.operator, self.residual_range, self.RB, product=self.product)\n rhs = project(self.rhs, self.residual_range, None, product=self.product)\n\n return ResidualOperator(operator, rhs)\n\n def reconstruct(self, u):\n \"\"\"Reconstruct high-dimensional residual vector from reduced vector `u`.\"\"\"\n if self.residual_range is False:\n if self.product:\n return u * (u.norm() / u.norm(self.product))[0]\n else:\n return u\n else:\n return self.residual_range[:u.dim].lincomb(u.to_numpy())\n\n\nclass ResidualOperator(Operator):\n \"\"\"Instantiated by :class:`ResidualReductor`.\"\"\"\n\n def __init__(self, operator, rhs, name=None):\n self.__auto_init(locals())\n self.source = operator.source\n self.range = operator.range\n self.linear = operator.linear\n self.rhs_vector = rhs.as_range_array() if rhs and not rhs.parametric else None\n\n def apply(self, U, mu=None):\n V = self.operator.apply(U, mu=mu)\n if self.rhs:\n F = self.rhs_vector or self.rhs.as_range_array(mu)\n if len(V) > 1:\n V -= F[[0]*len(V)]\n else:\n V -= F\n return V\n\n def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):\n return ResidualOperator(project_to_subbasis(self.operator, dim_range, dim_source),\n project_to_subbasis(self.rhs, dim_range, None),\n name=name)\n\n\nclass NonProjectedResidualOperator(ResidualOperator):\n \"\"\"Instantiated by :class:`ResidualReductor`.\n\n Not to be used directly.\n \"\"\"\n\n def __init__(self, operator, rhs, riesz_representatives, product):\n super().__init__(operator, rhs)\n self.__auto_init(locals())\n\n def apply(self, U, mu=None):\n R = super().apply(U, mu=mu)\n if self.product:\n if self.riesz_representatives:\n R_riesz = self.product.apply_inverse(R)\n # divide by norm, except when norm is zero:\n inversel2 = 1./R_riesz.norm()\n inversel2 = np.nan_to_num(inversel2)\n R_riesz.scal(np.sqrt(R_riesz.pairwise_inner(R)) * inversel2)\n return R_riesz\n else:\n # divide by norm, except when norm is zero:\n inversel2 = 1./R.norm()\n inversel2 = np.nan_to_num(inversel2)\n R.scal(np.sqrt(self.product.pairwise_apply2(R, R)) * inversel2)\n return R\n else:\n return R\n\n def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):\n return self.with_(operator=project_to_subbasis(self.operator, None, dim_source))\n\n\nclass ImplicitEulerResidualReductor(BasicObject):\n \"\"\"Reduced basis residual reductor with mass operator for implicit Euler timestepping.\n\n Given an operator, mass and a functional, the concatenation of residual operator\n with the Riesz isomorphism is given by::\n\n riesz_residual.apply(U, U_old, mu)\n == product.apply_inverse(operator.apply(U, mu) + 1/dt*mass.apply(U, mu)\n - 1/dt*mass.apply(U_old, mu) - rhs.as_vector(mu))\n\n This reductor determines a low-dimensional subspace of the image of a reduced basis space under\n `riesz_residual` using :func:`~pymor.algorithms.image.estimate_image_hierarchical`, computes an\n orthonormal basis `residual_range` of this range space and then returns the Petrov-Galerkin\n projection ::\n\n projected_riesz_residual\n == riesz_residual.projected(range_basis=residual_range, source_basis=RB)\n\n of the `riesz_residual` operator. Given reduced basis coefficient vectors `u` and `u_old`,\n the dual norm of the residual can then be computed as ::\n\n projected_riesz_residual.apply(u, u_old, mu).norm()\n\n Moreover, a `reconstruct` method is provided such that ::\n\n residual_reductor.reconstruct(projected_riesz_residual.apply(u, u_old, mu))\n == riesz_residual.apply(RB.lincomb(u), RB.lincomb(u_old), mu)\n\n Parameters\n ----------\n operator\n See definition of `riesz_residual`.\n mass\n The mass operator. See definition of `riesz_residual`.\n dt\n The time step size. See definition of `riesz_residual`.\n rhs\n See definition of `riesz_residual`. If `None`, zero right-hand side is assumed.\n RB\n |VectorArray| containing a basis of the reduced space onto which to project.\n product\n Inner product |Operator| w.r.t. which to compute the Riesz representatives.\n \"\"\"\n\n def __init__(self, RB, operator, mass, dt, rhs=None, product=None):\n assert RB in operator.source\n assert rhs.source.is_scalar and rhs.range == operator.range and rhs.linear\n assert product is None or product.source == product.range == operator.range\n\n self.__auto_init(locals())\n self.residual_range = operator.range.empty()\n self.residual_range_dims = []\n\n def reduce(self):\n if self.residual_range is not False:\n with self.logger.block('Estimating residual range ...'):\n try:\n self.residual_range, self.residual_range_dims = \\\n estimate_image_hierarchical([self.operator, self.mass], [self.rhs],\n self.RB,\n (self.residual_range, self.residual_range_dims),\n orthonormalize=True, product=self.product,\n riesz_representatives=True)\n except ImageCollectionError as e:\n self.logger.warning(f'Cannot compute range of {e.op}. Evaluation will be slow.')\n self.residual_range = False\n\n if self.residual_range is False:\n operator = project(self.operator, None, self.RB)\n mass = project(self.mass, None, self.RB)\n return NonProjectedImplicitEulerResidualOperator(operator, mass, self.rhs, self.dt, self.product)\n\n with self.logger.block('Projecting residual operator ...'):\n # the product always cancels out\n operator = project(self.operator, self.residual_range, self.RB, product=None)\n mass = project(self.mass, self.residual_range, self.RB, product=None)\n rhs = project(self.rhs, self.residual_range, None, product=None)\n\n return ImplicitEulerResidualOperator(operator, mass, rhs, self.dt)\n\n def reconstruct(self, u):\n \"\"\"Reconstruct high-dimensional residual vector from reduced vector `u`.\"\"\"\n if self.residual_range is False:\n if self.product:\n return u * (u.norm() / u.norm(self.product))[0]\n else:\n return u\n else:\n return self.residual_range[:u.dim].lincomb(u.to_numpy())\n\n\nclass ImplicitEulerResidualOperator(Operator):\n \"\"\"Instantiated by :class:`ImplicitEulerResidualReductor`.\"\"\"\n\n def __init__(self, operator, mass, rhs, dt, name=None):\n self.__auto_init(locals())\n self.source = operator.source\n self.range = operator.range\n self.linear = operator.linear\n self.rhs_vector = rhs.as_range_array() if not rhs.parametric else None\n\n def apply(self, U, U_old, mu=None):\n V = self.operator.apply(U, mu=mu)\n V.axpy(1./self.dt, self.mass.apply(U, mu=mu))\n V.axpy(-1./self.dt, self.mass.apply(U_old, mu=mu))\n if not isinstance(self.rhs, ZeroOperator):\n F = self.rhs_vector or self.rhs.as_range_array(mu)\n if len(V) > 1:\n V -= F[[0]*len(V)]\n else:\n V -= F\n return V\n\n def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):\n return ImplicitEulerResidualOperator(project_to_subbasis(self.operator, dim_range, dim_source),\n project_to_subbasis(self.mass, dim_range, dim_source),\n project_to_subbasis(self.rhs, dim_range, None),\n self.dt,\n name=name)\n\n\nclass NonProjectedImplicitEulerResidualOperator(ImplicitEulerResidualOperator):\n \"\"\"Instantiated by :class:`ImplicitEulerResidualReductor`.\n\n Not to be used directly.\n \"\"\"\n\n def __init__(self, operator, mass, rhs, dt, product):\n super().__init__(operator, mass, rhs, dt)\n self.product = product\n\n def apply(self, U, U_old, mu=None):\n R = super().apply(U, U_old, mu=mu)\n if self.product:\n R_riesz = self.product.apply_inverse(R)\n # divide by norm, except when norm is zero:\n inversel2 = 1./R_riesz.norm()\n inversel2 = np.nan_to_num(inversel2)\n R_riesz.scal(np.sqrt(R_riesz.pairwise_inner(R)) * inversel2)\n return R_riesz\n else:\n return R\n\n def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):\n return self.with_(operator=project_to_subbasis(self.operator, None, dim_source),\n mass=project_to_subbasis(self.mass, None, dim_source))\n", "# This file is part of the pyMOR project (https://www.pymor.org).\n# Copyright 2013-2021 pyMOR developers and contributors. All rights reserved.\n# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)\nfrom pathlib import Path\n\nfrom pymor.core.config import config\nfrom pymor.core.defaults import defaults\nfrom pymor.tools.io import change_to_directory\n\nif config.HAVE_NGSOLVE:\n import ngsolve as ngs\n import numpy as np\n\n from pymor.core.base import ImmutableObject\n from pymor.operators.list import LinearComplexifiedListVectorArrayOperatorBase\n from pymor.vectorarrays.interface import VectorArray\n from pymor.vectorarrays.numpy import NumpyVectorSpace\n from pymor.vectorarrays.list import CopyOnWriteVector, ComplexifiedVector, ComplexifiedListVectorSpace\n\n class NGSolveVectorCommon:\n def amax(self):\n A = np.abs(self.to_numpy())\n max_ind = np.argmax(A)\n max_val = A[max_ind]\n return max_ind, max_val\n\n def dofs(self, dof_indices):\n return self.to_numpy()[dof_indices]\n\n class NGSolveVector(NGSolveVectorCommon, CopyOnWriteVector):\n \"\"\"Wraps a NGSolve BaseVector to make it usable with ListVectorArray.\"\"\"\n\n def __init__(self, impl):\n self.impl = impl\n\n @classmethod\n def from_instance(cls, instance):\n return cls(instance.impl)\n\n def _copy_data(self):\n new_impl = ngs.GridFunction(self.impl.space)\n new_impl.vec.data = self.impl.vec\n self.impl = new_impl\n\n def to_numpy(self, ensure_copy=False):\n if ensure_copy:\n return self.impl.vec.FV().NumPy().copy()\n self._copy_data_if_needed()\n return self.impl.vec.FV().NumPy()\n\n def _scal(self, alpha):\n self.impl.vec.data = float(alpha) * self.impl.vec\n\n def _axpy(self, alpha, x):\n self.impl.vec.data = self.impl.vec + float(alpha) * x.impl.vec\n\n def inner(self, other):\n return self.impl.vec.InnerProduct(other.impl.vec)\n\n def norm(self):\n return self.impl.vec.Norm()\n\n def norm2(self):\n return self.impl.vec.Norm() ** 2\n\n class ComplexifiedNGSolveVector(NGSolveVectorCommon, ComplexifiedVector):\n pass\n\n class NGSolveVectorSpace(ComplexifiedListVectorSpace):\n\n complexified_vector_type = ComplexifiedNGSolveVector\n\n def __init__(self, V, id='STATE'):\n self.__auto_init(locals())\n\n def __eq__(self, other):\n return type(other) is NGSolveVectorSpace and self.V == other.V and self.id == other.id\n\n def __hash__(self):\n return hash(self.V) + hash(self.id)\n\n @property\n def value_dim(self):\n u = self.V.TrialFunction()\n if isinstance(u, list):\n return u[0].dim\n else:\n return u.dim\n\n @property\n def dim(self):\n return self.V.ndofglobal * self.value_dim\n\n @classmethod\n def space_from_vector_obj(cls, vec, id):\n return cls(vec.space, id)\n\n def real_zero_vector(self):\n impl = ngs.GridFunction(self.V)\n return NGSolveVector(impl)\n\n def real_make_vector(self, obj):\n return NGSolveVector(obj)\n\n def real_vector_from_numpy(self, data, ensure_copy=False):\n v = self.real_zero_vector()\n v.to_numpy()[:] = data\n return v\n\n class NGSolveMatrixOperator(LinearComplexifiedListVectorArrayOperatorBase):\n \"\"\"Wraps a NGSolve matrix as an |Operator|.\"\"\"\n\n def __init__(self, matrix, range, source, solver_options=None, name=None):\n self.__auto_init(locals())\n\n @defaults('default_solver')\n def _prepare_apply(self, U, mu, kind, least_squares=False, default_solver=''):\n if kind == 'apply_inverse':\n if least_squares:\n raise NotImplementedError\n solver = self.solver_options.get('inverse', default_solver) if self.solver_options else default_solver\n inv = self.matrix.Inverse(self.source.V.FreeDofs(), inverse=solver)\n return inv\n\n def _real_apply_one_vector(self, u, mu=None, prepare_data=None):\n r = self.range.real_zero_vector()\n self.matrix.Mult(u.impl.vec, r.impl.vec)\n return r\n\n def _real_apply_adjoint_one_vector(self, v, mu=None, prepare_data=None):\n u = self.source.real_zero_vector()\n try:\n mat = self.matrix.Transpose()\n except AttributeError:\n mat = self.matrix.T\n mat.Mult(v.impl.vec, u.impl.vec)\n return u\n\n def _real_apply_inverse_one_vector(self, v, mu=None, initial_guess=None,\n least_squares=False, prepare_data=None):\n inv = prepare_data\n r = self.source.real_zero_vector()\n r.impl.vec.data = inv * v.impl.vec\n return r\n\n def _assemble_lincomb(self, operators, coefficients, identity_shift=0., solver_options=None, name=None):\n if not all(isinstance(op, NGSolveMatrixOperator) for op in operators):\n return None\n if identity_shift != 0:\n return None\n\n matrix = operators[0].matrix.CreateMatrix()\n matrix.AsVector().data = float(coefficients[0]) * matrix.AsVector()\n for op, c in zip(operators[1:], coefficients[1:]):\n matrix.AsVector().data += float(c) * op.matrix.AsVector()\n return NGSolveMatrixOperator(matrix, self.range, self.source, solver_options=solver_options, name=name)\n\n def as_vector(self, copy=True):\n vec = self.matrix.AsVector().FV().NumPy()\n return NumpyVectorSpace.make_array(vec.copy() if copy else vec)\n\n class NGSolveVisualizer(ImmutableObject):\n \"\"\"Visualize an NGSolve grid function.\"\"\"\n\n def __init__(self, mesh, fespace):\n self.__auto_init(locals())\n self.space = NGSolveVectorSpace(fespace)\n\n def visualize(self, U, legend=None, separate_colorbars=True, filename=None, block=True):\n \"\"\"Visualize the provided data.\"\"\"\n if isinstance(U, VectorArray):\n U = (U,)\n assert all(u in self.space for u in U)\n if any(len(u) != 1 for u in U):\n raise NotImplementedError\n if any(u._list[0].imag_part is not None for u in U):\n raise NotImplementedError\n if legend is None:\n legend = [f'VectorArray{i}' for i in range(len(U))]\n if isinstance(legend, str):\n legend = [legend]\n assert len(legend) == len(U)\n legend = [l.replace(' ', '_') for l in legend] # NGSolve GUI will fail otherwise\n\n if filename:\n # ngsolve unconditionnaly appends \".vtk\"\n filename = Path(filename).resolve()\n if filename.suffix == '.vtk':\n filename = filename.parent / filename.stem\n else:\n self.logger.warning(f'NGSolve set VTKOutput filename to {filename}.vtk')\n coeffs = [u._list[0].real_part.impl for u in U]\n # ngsolve cannot handle full paths for filenames\n with change_to_directory(filename.parent):\n vtk = ngs.VTKOutput(ma=self.mesh, coefs=coeffs, names=legend, filename=str(filename), subdivision=0)\n vtk.Do()\n else:\n if not separate_colorbars:\n raise NotImplementedError\n\n for u, name in zip(U, legend):\n ngs.Draw(u._list[0].real_part.impl, self.mesh, name=name)\n", "# This file is part of the pyMOR project (https://www.pymor.org).\n# Copyright 2013-2021 pyMOR developers and contributors. All rights reserved.\n# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)\n\nimport scipy.linalg as spla\nimport numpy as np\n\nfrom pymor.algorithms.genericsolvers import _parse_options\nfrom pymor.algorithms.riccati import _solve_ricc_check_args\nfrom pymor.vectorarrays.constructions import cat_arrays\nfrom pymor.core.defaults import defaults\nfrom pymor.core.logger import getLogger\nfrom pymor.operators.constructions import IdentityOperator\nfrom pymor.algorithms.gram_schmidt import gram_schmidt\nfrom pymor.tools.random import get_random_state\n\n\n@defaults('lrradi_tol', 'lrradi_maxiter', 'lrradi_shifts', 'hamiltonian_shifts_init_maxiter',\n 'hamiltonian_shifts_init_seed', 'hamiltonian_shifts_subspace_columns')\ndef ricc_lrcf_solver_options(lrradi_tol=1e-10,\n lrradi_maxiter=500,\n lrradi_shifts='hamiltonian_shifts',\n hamiltonian_shifts_init_maxiter=20,\n hamiltonian_shifts_init_seed=None,\n hamiltonian_shifts_subspace_columns=6):\n \"\"\"Returns available Riccati equation solvers with default solver options.\n\n Parameters\n ----------\n lrradi_tol\n See :func:`solve_ricc_lrcf`.\n lrradi_maxiter\n See :func:`solve_ricc_lrcf`.\n lrradi_shifts\n See :func:`solve_ricc_lrcf`.\n hamiltonian_shifts_init_maxiter\n See :func:`hamiltonian_shifts_init`.\n hamiltonian_shifts_init_seed\n See :func:`hamiltonian_shifts_init`.\n hamiltonian_shifts_subspace_columns\n See :func:`hamiltonian_shifts`.\n\n Returns\n -------\n A dict of available solvers with default solver options.\n \"\"\"\n return {'lrradi': {'type': 'lrradi',\n 'tol': lrradi_tol,\n 'maxiter': lrradi_maxiter,\n 'shifts': lrradi_shifts,\n 'shift_options':\n {'hamiltonian_shifts': {'type': 'hamiltonian_shifts',\n 'init_maxiter': hamiltonian_shifts_init_maxiter,\n 'init_seed': hamiltonian_shifts_init_seed,\n 'subspace_columns': hamiltonian_shifts_subspace_columns}}}}\n\n\ndef solve_ricc_lrcf(A, E, B, C, R=None, trans=False, options=None):\n \"\"\"Compute an approximate low-rank solution of a Riccati equation.\n\n See :func:`pymor.algorithms.riccati.solve_ricc_lrcf` for a\n general description.\n\n This function is an implementation of Algorithm 2 in :cite:`BBKS18`.\n\n Parameters\n ----------\n A\n The |Operator| A.\n E\n The |Operator| E or `None`.\n B\n The operator B as a |VectorArray| from `A.source`.\n C\n The operator C as a |VectorArray| from `A.source`.\n R\n The operator R as a 2D |NumPy array| or `None`.\n trans\n Whether the first |Operator| in the Riccati equation is\n transposed.\n options\n The solver options to use. (see\n :func:`ricc_lrcf_solver_options`)\n\n Returns\n -------\n Z\n Low-rank Cholesky factor of the Riccati equation solution,\n |VectorArray| from `A.source`.\n \"\"\"\n _solve_ricc_check_args(A, E, B, C, R, trans)\n options = _parse_options(options, ricc_lrcf_solver_options(), 'lrradi', None, False)\n logger = getLogger('pymor.algorithms.lrradi.solve_ricc_lrcf')\n\n shift_options = options['shift_options'][options['shifts']]\n if shift_options['type'] == 'hamiltonian_shifts':\n init_shifts = hamiltonian_shifts_init\n iteration_shifts = hamiltonian_shifts\n else:\n raise ValueError('Unknown lrradi shift strategy.')\n\n if E is None:\n E = IdentityOperator(A.source)\n\n if R is not None:\n Rc = spla.cholesky(R) # R = Rc^T * Rc\n Rci = spla.solve_triangular(Rc, np.eye(Rc.shape[0])) # R^{-1} = Rci * Rci^T\n if not trans:\n C = C.lincomb(Rci.T) # C <- Rci^T * C = (C^T * Rci)^T\n else:\n B = B.lincomb(Rci.T) # B <- B * Rci\n\n if not trans:\n B, C = C, B\n\n Z = A.source.empty(reserve=len(C) * options['maxiter'])\n Y = np.empty((0, 0))\n\n K = A.source.zeros(len(B))\n RF = C.copy()\n\n j = 0\n j_shift = 0\n shifts = init_shifts(A, E, B, C, shift_options)\n\n res = np.linalg.norm(RF.gramian(), ord=2)\n init_res = res\n Ctol = res * options['tol']\n\n while res > Ctol and j < options['maxiter']:\n if not trans:\n AsE = A + shifts[j_shift] * E\n else:\n AsE = A + np.conj(shifts[j_shift]) * E\n if j == 0:\n if not trans:\n V = AsE.apply_inverse(RF) * np.sqrt(-2 * shifts[j_shift].real)\n else:\n V = AsE.apply_inverse_adjoint(RF) * np.sqrt(-2 * shifts[j_shift].real)\n else:\n if not trans:\n LN = AsE.apply_inverse(cat_arrays([RF, K]))\n else:\n LN = AsE.apply_inverse_adjoint(cat_arrays([RF, K]))\n L = LN[:len(RF)]\n N = LN[-len(K):]\n ImBN = np.eye(len(K)) - B.inner(N)\n ImBNKL = spla.solve(ImBN, B.inner(L))\n V = (L + N.lincomb(ImBNKL.T)) * np.sqrt(-2 * shifts[j_shift].real)\n\n if np.imag(shifts[j_shift]) == 0:\n Z.append(V)\n VB = V.inner(B)\n Yt = np.eye(len(C)) - (VB @ VB.T) / (2 * shifts[j_shift].real)\n Y = spla.block_diag(Y, Yt)\n if not trans:\n EVYt = E.apply(V).lincomb(np.linalg.inv(Yt))\n else:\n EVYt = E.apply_adjoint(V).lincomb(np.linalg.inv(Yt))\n RF.axpy(np.sqrt(-2*shifts[j_shift].real), EVYt)\n K += EVYt.lincomb(VB.T)\n j += 1\n else:\n Z.append(V.real)\n Z.append(V.imag)\n Vr = V.real.inner(B)\n Vi = V.imag.inner(B)\n sa = np.abs(shifts[j_shift])\n F1 = np.vstack((\n -shifts[j_shift].real/sa * Vr - shifts[j_shift].imag/sa * Vi,\n shifts[j_shift].imag/sa * Vr - shifts[j_shift].real/sa * Vi\n ))\n F2 = np.vstack((\n Vr,\n Vi\n ))\n F3 = np.vstack((\n shifts[j_shift].imag/sa * np.eye(len(C)),\n shifts[j_shift].real/sa * np.eye(len(C))\n ))\n Yt = spla.block_diag(np.eye(len(C)), 0.5 * np.eye(len(C))) \\\n - (F1 @ F1.T) / (4 * shifts[j_shift].real) \\\n - (F2 @ F2.T) / (4 * shifts[j_shift].real) \\\n - (F3 @ F3.T) / 2\n Y = spla.block_diag(Y, Yt)\n EVYt = E.apply(cat_arrays([V.real, V.imag])).lincomb(np.linalg.inv(Yt))\n RF.axpy(np.sqrt(-2 * shifts[j_shift].real), EVYt[:len(C)])\n K += EVYt.lincomb(F2.T)\n j += 2\n j_shift += 1\n res = np.linalg.norm(RF.gramian(), ord=2)\n logger.info(f'Relative residual at step {j}: {res/init_res:.5e}')\n if j_shift >= shifts.size:\n shifts = iteration_shifts(A, E, B, RF, K, Z, shift_options)\n j_shift = 0\n # transform solution to lrcf\n cf = spla.cholesky(Y)\n Z_cf = Z.lincomb(spla.solve_triangular(cf, np.eye(len(Z))).T)\n return Z_cf\n\n\ndef hamiltonian_shifts_init(A, E, B, C, shift_options):\n \"\"\"Compute initial shift parameters for low-rank RADI iteration.\n\n Compute Galerkin projection of Hamiltonian matrix on space spanned by :math:`C` and return the\n eigenvalue of the projected Hamiltonian with the most impact on convergence as the next shift\n parameter.\n\n See :cite:`BBKS18`, pp. 318-321.\n\n Parameters\n ----------\n A\n The |Operator| A from the corresponding Riccati equation.\n E\n The |Operator| E from the corresponding Riccati equation.\n B\n The |VectorArray| B from the corresponding Riccati equation.\n C\n The |VectorArray| C from the corresponding Riccati equation.\n shift_options\n The shift options to use (see :func:`ricc_lrcf_solver_options`).\n\n Returns\n -------\n shifts\n A |NumPy array| containing a set of stable shift parameters.\n \"\"\"\n random_state = get_random_state(seed=shift_options['init_seed'])\n for _ in range(shift_options['init_maxiter']):\n Q = gram_schmidt(C, atol=0, rtol=0)\n Ap = A.apply2(Q, Q)\n QB = Q.inner(B)\n Gp = QB.dot(QB.T)\n QR = Q.inner(C)\n Rp = QR.dot(QR.T)\n Hp = np.block([\n [Ap, Gp],\n [Rp, -Ap.T]\n ])\n Ep = E.apply2(Q, Q)\n EEp = spla.block_diag(Ep, Ep.T)\n eigvals, eigvecs = spla.eig(Hp, EEp)\n eigpairs = zip(eigvals, eigvecs)\n # filter stable eigenvalues\n eigpairs = list(filter(lambda e: e[0].real < 0, eigpairs))\n if len(eigpairs) == 0:\n # use random subspace instead of span{C} (with same dimensions)\n C = C.random(len(C), distribution='normal', random_state=random_state)\n continue\n # find shift with most impact on convergence\n maxval = -1\n maxind = 0\n for i in range(len(eigpairs)):\n eig = eigpairs[i][1]\n y_eig = eig[-len(Q):]\n x_eig = eig[:len(Q)]\n Ey = Ep.T.dot(y_eig)\n xEy = np.abs(np.dot(x_eig, Ey))\n currval = np.linalg.norm(y_eig)**2 / xEy\n if currval > maxval:\n maxval = currval\n maxind = i\n shift = eigpairs[maxind][0]\n # remove imaginary part if it is relatively small\n if np.abs(shift.imag) / np.abs(shift) < 1e-8:\n shift = shift.real\n return np.array([shift])\n raise RuntimeError('Could not generate initial shifts for low-rank RADI iteration.')\n\n\ndef hamiltonian_shifts(A, E, B, R, K, Z, shift_options):\n \"\"\"Compute further shift parameters for low-rank RADI iteration.\n\n Compute Galerkin projection of Hamiltonian matrix on space spanned by last few columns of\n :math:`Z` and return the eigenvalue of the projected Hamiltonian with the most impact on\n convergence as the next shift parameter.\n\n See :cite:`BBKS18`, pp. 318-321.\n\n Parameters\n ----------\n A\n The |Operator| A from the corresponding Riccati equation.\n E\n The |Operator| E from the corresponding Riccati equation.\n B\n The |VectorArray| B from the corresponding Riccati equation.\n R\n A |VectorArray| representing the currently computed residual factor.\n K\n A |VectorArray| representing the currently computed iterate.\n Z\n A |VectorArray| representing the currently computed solution factor.\n shift_options\n The shift options to use (see :func:`ricc_lrcf_solver_options`).\n\n Returns\n -------\n shifts\n A |NumPy array| containing a set of stable shift parameters.\n \"\"\"\n l = shift_options['subspace_columns']\n # always use multiple of len(R) columns\n l = l // len(R) * len(R)\n if len(Z) < l:\n l = len(Z)\n\n Q = gram_schmidt(Z[-l:], atol=0, rtol=0)\n Ap = A.apply2(Q, Q)\n KBp = Q.inner(K) @ Q.inner(B).T\n AAp = Ap - KBp\n QB = Q.inner(B)\n Gp = QB.dot(QB.T)\n QR = Q.inner(R)\n Rp = QR.dot(QR.T)\n Hp = np.block([\n [AAp, Gp],\n [Rp, -AAp.T]\n ])\n Ep = E.apply2(Q, Q)\n EEp = spla.block_diag(Ep, Ep.T)\n eigvals, eigvecs = spla.eig(Hp, EEp)\n eigpairs = zip(eigvals, eigvecs)\n # filter stable eigenvalues\n eigpairs = list(filter(lambda e: e[0].real < 0, eigpairs))\n # find shift with most impact on convergence\n maxval = -1\n maxind = 0\n for i in range(len(eigpairs)):\n eig = eigpairs[i][1]\n y_eig = eig[-len(Q):]\n x_eig = eig[:len(Q)]\n Ey = Ep.T.dot(y_eig)\n xEy = np.abs(np.dot(x_eig, Ey))\n currval = np.linalg.norm(y_eig)**2 / xEy\n if currval > maxval:\n maxval = currval\n maxind = i\n shift = eigpairs[maxind][0]\n # remove imaginary part if it is relatively small\n if np.abs(shift.imag) / np.abs(shift) < 1e-8:\n shift = shift.real\n return np.array([shift])\n", "# This file is part of the pyMOR project (https://www.pymor.org).\n# Copyright 2013-2021 pyMOR developers and contributors. All rights reserved.\n# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)\n\nimport numpy as np\nfrom numpy.random import uniform\n\nfrom pymor.algorithms.randrangefinder import rrf, adaptive_rrf\nfrom pymor.operators.numpy import NumpyMatrixOperator\nfrom pymor.operators.constructions import VectorArrayOperator\n\n\nnp.random.seed(0)\nA = uniform(low=-1.0, high=1.0, size=(100, 100))\nA = A.dot(A.T)\nrange_product = NumpyMatrixOperator(A)\n\nnp.random.seed(1)\nA = uniform(low=-1.0, high=1.0, size=(10, 10))\nA = A.dot(A.T)\nsource_product = NumpyMatrixOperator(A)\n\nB = range_product.range.random(10, seed=10)\nop = VectorArrayOperator(B)\n\nC = range_product.range.random(10, seed=11)+1j*range_product.range.random(10, seed=12)\nop_complex = VectorArrayOperator(C)\n\n\ndef test_rrf():\n Q = rrf(op, source_product, range_product)\n assert Q in op.range\n assert len(Q) == 8\n\n Q = rrf(op_complex, iscomplex=True)\n assert np.iscomplexobj(Q.to_numpy())\n assert Q in op.range\n assert len(Q) == 8\n\n\ndef test_adaptive_rrf():\n B = adaptive_rrf(op, source_product, range_product)\n assert B in op.range\n\n B = adaptive_rrf(op_complex, iscomplex=True)\n assert np.iscomplexobj(B.to_numpy())\n assert B in op.range\n" ]
[ [ "numpy.nan_to_num" ], [ "numpy.argmax" ], [ "numpy.dot", "numpy.imag", "numpy.abs", "scipy.linalg.block_diag", "numpy.sqrt", "numpy.linalg.inv", "numpy.conj", "numpy.eye", "numpy.vstack", "numpy.linalg.norm", "numpy.empty", "numpy.block", "scipy.linalg.cholesky", "numpy.array", "scipy.linalg.eig" ], [ "numpy.random.uniform", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Mishrasubha/napari
[ "c4d1038fc3ed30dc228949cbdedf12826ec2efc2", "c4d1038fc3ed30dc228949cbdedf12826ec2efc2", "c4d1038fc3ed30dc228949cbdedf12826ec2efc2" ]
[ "napari/_qt/layer_controls/qt_vectors_controls.py", "napari/components/dims.py", "napari/layers/utils/interactivity_utils.py" ]
[ "import numpy as np\nfrom qtpy.QtCore import Qt\nfrom qtpy.QtWidgets import QComboBox, QDoubleSpinBox, QLabel\n\nfrom ...layers.utils._color_manager_constants import ColorMode\nfrom ...utils.translations import trans\nfrom ..utils import qt_signals_blocked\nfrom ..widgets.qt_color_swatch import QColorSwatchEdit\nfrom .qt_layer_controls_base import QtLayerControls\n\n\nclass QtVectorsControls(QtLayerControls):\n \"\"\"Qt view and controls for the napari Vectors layer.\n\n Parameters\n ----------\n layer : napari.layers.Vectors\n An instance of a napari Vectors layer.\n\n Attributes\n ----------\n edge_color_label : qtpy.QtWidgets.QLabel\n Label for edgeColorSwatch\n edgeColorSwatch : qtpy.QtWidgets.QFrame\n Color swatch showing display color of vectors.\n edgeComboBox : qtpy.QtWidgets.QComboBox\n Dropdown widget to select display color for vectors.\n color_mode_comboBox : qtpy.QtWidgets.QComboBox\n Dropdown widget to select edge_color_mode for the vectors.\n color_prop_box : qtpy.QtWidgets.QComboBox\n Dropdown widget to select _edge_color_property for the vectors.\n edge_prop_label : qtpy.QtWidgets.QLabel\n Label for color_prop_box\n grid_layout : qtpy.QtWidgets.QGridLayout\n Layout of Qt widget controls for the layer.\n layer : napari.layers.Vectors\n An instance of a napari Vectors layer.\n lengthSpinBox : qtpy.QtWidgets.QDoubleSpinBox\n Spin box widget controlling line length of vectors.\n Multiplicative factor on projections for length of all vectors.\n widthSpinBox : qtpy.QtWidgets.QDoubleSpinBox\n Spin box widget controlling edge line width of vectors.\n \"\"\"\n\n def __init__(self, layer):\n super().__init__(layer)\n\n self.layer.events.edge_width.connect(self._on_edge_width_change)\n self.layer.events.length.connect(self._on_length_change)\n self.layer.events.edge_color_mode.connect(\n self._on_edge_color_mode_change\n )\n self.layer.events.edge_color.connect(self._on_edge_color_change)\n\n # dropdown to select the property for mapping edge_color\n color_properties = self._get_property_values()\n color_prop_box = QComboBox(self)\n color_prop_box.activated[str].connect(self.change_edge_color_property)\n color_prop_box.addItems(color_properties)\n self.color_prop_box = color_prop_box\n self.edge_prop_label = QLabel(trans._('edge property:'))\n\n # vector direct color mode adjustment and widget\n self.edgeColorEdit = QColorSwatchEdit(\n initial_color=self.layer.edge_color,\n tooltip=trans._(\n 'click to set current edge color',\n ),\n )\n self.edgeColorEdit.color_changed.connect(self.change_edge_color_direct)\n self.edge_color_label = QLabel(trans._('edge color:'))\n self._on_edge_color_change()\n\n # dropdown to select the edge color mode\n colorModeComboBox = QComboBox(self)\n color_modes = [e.value for e in ColorMode]\n colorModeComboBox.addItems(color_modes)\n colorModeComboBox.activated[str].connect(self.change_edge_color_mode)\n self.color_mode_comboBox = colorModeComboBox\n self._on_edge_color_mode_change()\n\n # line width in pixels\n self.widthSpinBox = QDoubleSpinBox()\n self.widthSpinBox.setKeyboardTracking(False)\n self.widthSpinBox.setSingleStep(0.1)\n self.widthSpinBox.setMinimum(0.1)\n self.widthSpinBox.setMaximum(np.inf)\n self.widthSpinBox.setValue(self.layer.edge_width)\n self.widthSpinBox.valueChanged.connect(self.change_width)\n\n # line length\n self.lengthSpinBox = QDoubleSpinBox()\n self.lengthSpinBox.setKeyboardTracking(False)\n self.lengthSpinBox.setSingleStep(0.1)\n self.lengthSpinBox.setValue(self.layer.length)\n self.lengthSpinBox.setMinimum(0.1)\n self.lengthSpinBox.setMaximum(np.inf)\n self.lengthSpinBox.valueChanged.connect(self.change_length)\n\n # grid_layout created in QtLayerControls\n # addWidget(widget, row, column, [row_span, column_span])\n self.grid_layout.addWidget(QLabel(trans._('opacity:')), 0, 0)\n self.grid_layout.addWidget(self.opacitySlider, 0, 1, 1, 2)\n self.grid_layout.addWidget(QLabel(trans._('width:')), 1, 0)\n self.grid_layout.addWidget(self.widthSpinBox, 1, 1, 1, 2)\n self.grid_layout.addWidget(QLabel(trans._('length:')), 2, 0)\n self.grid_layout.addWidget(self.lengthSpinBox, 2, 1, 1, 2)\n self.grid_layout.addWidget(QLabel(trans._('blending:')), 3, 0)\n self.grid_layout.addWidget(self.blendComboBox, 3, 1, 1, 2)\n self.grid_layout.addWidget(QLabel(trans._('edge color mode:')), 4, 0)\n self.grid_layout.addWidget(self.color_mode_comboBox, 4, 1, 1, 2)\n self.grid_layout.addWidget(self.edge_color_label, 5, 0)\n self.grid_layout.addWidget(self.edgeColorEdit, 5, 1, 1, 2)\n self.grid_layout.addWidget(self.edge_prop_label, 6, 0)\n self.grid_layout.addWidget(self.color_prop_box, 6, 1, 1, 2)\n self.grid_layout.setRowStretch(7, 1)\n self.grid_layout.setColumnStretch(1, 1)\n self.grid_layout.setSpacing(4)\n\n def change_edge_color_property(self, property: str):\n \"\"\"Change edge_color_property of vectors on the layer model.\n This property is the property the edge color is mapped to.\n\n Parameters\n ----------\n property : str\n property to map the edge color to\n \"\"\"\n mode = self.layer.edge_color_mode\n try:\n self.layer.edge_color = property\n self.layer.edge_color_mode = mode\n except TypeError:\n # if the selected property is the wrong type for the current color mode\n # the color mode will be changed to the appropriate type, so we must update\n self._on_edge_color_mode_change()\n raise\n\n def change_edge_color_mode(self, mode: str):\n \"\"\"Change edge color mode of vectors on the layer model.\n\n Parameters\n ----------\n mode : str\n Edge color for vectors. Must be: 'direct', 'cycle', or 'colormap'\n \"\"\"\n old_mode = self.layer.edge_color_mode\n with self.layer.events.edge_color_mode.blocker():\n try:\n self.layer.edge_color_mode = mode\n self._update_edge_color_gui(mode)\n\n except ValueError:\n # if the color mode was invalid, revert to the old mode\n self.layer.edge_color_mode = old_mode\n raise\n\n def change_edge_color_direct(self, color: np.ndarray):\n \"\"\"Change edge color of vectors on the layer model.\n\n Parameters\n ----------\n color : np.ndarray\n Edge color for vectors, in an RGBA array\n \"\"\"\n self.layer.edge_color = color\n\n def change_width(self, value):\n \"\"\"Change edge line width of vectors on the layer model.\n\n Parameters\n ----------\n value : float\n Line width of vectors.\n \"\"\"\n self.layer.edge_width = value\n self.widthSpinBox.clearFocus()\n self.setFocus()\n\n def change_length(self, value):\n \"\"\"Change length of vectors on the layer model.\n\n Multiplicative factor on projections for length of all vectors.\n\n Parameters\n ----------\n value : float\n Length of vectors.\n \"\"\"\n self.layer.length = value\n self.lengthSpinBox.clearFocus()\n self.setFocus()\n\n def _update_edge_color_gui(self, mode: str):\n \"\"\"Update the GUI element associated with edge_color.\n This is typically used when edge_color_mode changes\n\n Parameters\n ----------\n mode : str\n The new edge_color mode the GUI needs to be updated for.\n Should be: 'direct', 'cycle', 'colormap'\n \"\"\"\n if mode in ('cycle', 'colormap'):\n self.edgeColorEdit.setHidden(True)\n self.edge_color_label.setHidden(True)\n self.color_prop_box.setHidden(False)\n self.edge_prop_label.setHidden(False)\n\n elif mode == 'direct':\n self.edgeColorEdit.setHidden(False)\n self.edge_color_label.setHidden(False)\n self.color_prop_box.setHidden(True)\n self.edge_prop_label.setHidden(True)\n\n def _get_property_values(self):\n \"\"\"Get the current property values from the Vectors layer\n\n Returns\n -------\n property_values : np.ndarray\n array of all of the union of the property names (keys)\n in Vectors.properties and Vectors._property_choices\n\n \"\"\"\n property_choices = [*self.layer._property_choices]\n properties = [*self.layer.properties]\n property_values = np.union1d(property_choices, properties)\n\n return property_values\n\n def _on_length_change(self):\n \"\"\"Change length of vectors.\"\"\"\n with self.layer.events.length.blocker():\n self.lengthSpinBox.setValue(self.layer.length)\n\n def _on_edge_width_change(self):\n \"\"\"Receive layer model width change event and update width spinbox.\"\"\"\n with self.layer.events.edge_width.blocker():\n self.widthSpinBox.setValue(self.layer.edge_width)\n\n def _on_edge_color_mode_change(self):\n \"\"\"Receive layer model edge color mode change event & update dropdown.\"\"\"\n with qt_signals_blocked(self.color_mode_comboBox):\n mode = self.layer._edge.color_mode\n index = self.color_mode_comboBox.findText(\n mode, Qt.MatchFixedString\n )\n self.color_mode_comboBox.setCurrentIndex(index)\n\n self._update_edge_color_gui(mode)\n\n def _on_edge_color_change(self):\n \"\"\"Receive layer model edge color change event & update dropdown.\"\"\"\n if (\n self.layer._edge.color_mode == ColorMode.DIRECT\n and len(self.layer.data) > 0\n ):\n with qt_signals_blocked(self.edgeColorEdit):\n self.edgeColorEdit.setColor(self.layer.edge_color[0])\n elif self.layer._edge.color_mode in (\n ColorMode.CYCLE,\n ColorMode.COLORMAP,\n ):\n with qt_signals_blocked(self.color_prop_box):\n prop = self.layer._edge.color_properties.name\n index = self.color_prop_box.findText(prop, Qt.MatchFixedString)\n self.color_prop_box.setCurrentIndex(index)\n", "from typing import Sequence, Tuple, Union\n\nimport numpy as np\nfrom pydantic import root_validator, validator\nfrom typing_extensions import Literal # Added to typing in 3.8\n\nfrom ..utils.events import EventedModel\nfrom ..utils.translations import trans\n\n\nclass Dims(EventedModel):\n \"\"\"Dimensions object modeling slicing and displaying.\n\n Parameters\n ----------\n ndim : int\n Number of dimensions.\n ndisplay : int\n Number of displayed dimensions.\n last_used : int\n Dimension which was last used.\n range : tuple of 3-tuple of float\n List of tuples (min, max, step), one for each dimension. In a world\n coordinates space. As with Python's `range` and `slice`, max is not\n included.\n current_step : tuple of int\n Tuple of the slider position for each dims slider, in slider coordinates.\n order : tuple of int\n Tuple of ordering the dimensions, where the last dimensions are rendered.\n axis_labels : tuple of str\n Tuple of labels for each dimension.\n\n Attributes\n ----------\n ndim : int\n Number of dimensions.\n ndisplay : int\n Number of displayed dimensions.\n last_used : int\n Dimension which was last used.\n range : tuple of 3-tuple of float\n List of tuples (min, max, step), one for each dimension. In a world\n coordinates space. As with Python's `range` and `slice`, max is not\n included.\n current_step : tuple of int\n Tuple the slider position for each dims slider, in slider coordinates.\n order : tuple of int\n Tuple of ordering the dimensions, where the last dimensions are rendered.\n axis_labels : tuple of str\n Tuple of labels for each dimension.\n nsteps : tuple of int\n Number of steps available to each slider. These are calculated from\n the ``range``.\n point : tuple of float\n List of floats setting the current value of the range slider when in\n POINT mode, one for each dimension. In a world coordinates space. These\n are calculated from the ``current_step`` and ``range``.\n displayed : tuple of int\n List of dimensions that are displayed. These are calculated from the\n ``order`` and ``ndisplay``.\n not_displayed : tuple of int\n List of dimensions that are not displayed. These are calculated from the\n ``order`` and ``ndisplay``.\n displayed_order : tuple of int\n Order of only displayed dimensions. These are calculated from the\n ``displayed`` dimensions.\n \"\"\"\n\n # fields\n ndim: int = 2\n ndisplay: Literal[2, 3] = 2\n last_used: int = 0\n range: Tuple[Tuple[float, float, float], ...] = ()\n current_step: Tuple[int, ...] = ()\n order: Tuple[int, ...] = ()\n axis_labels: Tuple[str, ...] = ()\n\n # private vars\n _scroll_progress: int = 0\n\n # validators\n @validator('axis_labels', pre=True)\n def _string_to_list(v):\n if isinstance(v, str):\n return list(v)\n return v\n\n @root_validator\n def _check_dims(cls, values):\n \"\"\"Check the consitency of dimensionaity for all attributes\n\n Parameters\n ----------\n values : dict\n Values dictionary to update dims model with.\n \"\"\"\n ndim = values['ndim']\n\n # Check the range tuple has same number of elements as ndim\n if len(values['range']) < ndim:\n values['range'] = ((0, 2, 1),) * (\n ndim - len(values['range'])\n ) + values['range']\n elif len(values['range']) > ndim:\n values['range'] = values['range'][-ndim:]\n\n # Check the current step tuple has same number of elements as ndim\n if len(values['current_step']) < ndim:\n values['current_step'] = (0,) * (\n ndim - len(values['current_step'])\n ) + values['current_step']\n elif len(values['current_step']) > ndim:\n values['current_step'] = values['current_step'][-ndim:]\n\n # Check the order tuple has same number of elements as ndim\n if len(values['order']) < ndim:\n values['order'] = tuple(\n range(ndim - len(values['order']))\n ) + tuple(o + ndim - len(values['order']) for o in values['order'])\n elif len(values['order']) > ndim:\n values['order'] = reorder_after_dim_reduction(\n values['order'][-ndim:]\n )\n\n # Check the order is a permutation of 0, ..., ndim - 1\n if not set(values['order']) == set(range(ndim)):\n raise ValueError(\n trans._(\n \"Invalid ordering {order} for {ndim} dimensions\",\n deferred=True,\n order=values['order'],\n ndim=ndim,\n )\n )\n\n # Check the axis labels tuple has same number of elements as ndim\n if len(values['axis_labels']) < ndim:\n # Append new \"default\" labels to existing ones\n if values['axis_labels'] == tuple(\n map(str, range(len(values['axis_labels'])))\n ):\n values['axis_labels'] = tuple(map(str, range(ndim)))\n else:\n values['axis_labels'] = (\n tuple(map(str, range(ndim - len(values['axis_labels']))))\n + values['axis_labels']\n )\n elif len(values['axis_labels']) > ndim:\n values['axis_labels'] = values['axis_labels'][-ndim:]\n\n return values\n\n @property\n def nsteps(self) -> Tuple[int, ...]:\n \"\"\"Tuple of int: Number of slider steps for each dimension.\"\"\"\n return tuple(\n int((max_val - min_val) // step_size)\n for min_val, max_val, step_size in self.range\n )\n\n @property\n def point(self) -> Tuple[int, ...]:\n \"\"\"Tuple of float: Value of each dimension.\"\"\"\n # The point value is computed from the range and current_step\n point = tuple(\n min_val + step_size * value\n for (min_val, max_val, step_size), value in zip(\n self.range, self.current_step\n )\n )\n return point\n\n @property\n def displayed(self) -> Tuple[int, ...]:\n \"\"\"Tuple: Dimensions that are displayed.\"\"\"\n return self.order[-self.ndisplay :]\n\n @property\n def not_displayed(self) -> Tuple[int, ...]:\n \"\"\"Tuple: Dimensions that are not displayed.\"\"\"\n return self.order[: -self.ndisplay]\n\n @property\n def displayed_order(self) -> Tuple[int, ...]:\n \"\"\"Tuple: Order of only displayed dimensions.\"\"\"\n order = np.array(self.displayed)\n order[np.argsort(order)] = list(range(len(order)))\n return tuple(order)\n\n def set_range(self, axis: int, _range: Sequence[Union[int, float]]):\n \"\"\"Sets the range (min, max, step) for a given dimension.\n\n Parameters\n ----------\n axis : int\n Dimension index.\n _range : tuple\n Range specified as (min, max, step).\n \"\"\"\n axis = assert_axis_in_bounds(axis, self.ndim)\n if self.range[axis] != _range:\n full_range = list(self.range)\n full_range[axis] = _range\n self.range = full_range\n self.last_used = axis\n\n def set_point(self, axis: int, value: Union[int, float]):\n \"\"\"Sets point to slice dimension in world coordinates.\n\n The desired point gets transformed into an integer step\n of the slider and stored in the current_step.\n\n Parameters\n ----------\n axis : int\n Dimension index.\n value : int or float\n Value of the point.\n \"\"\"\n axis = assert_axis_in_bounds(axis, self.ndim)\n (min_val, max_val, step_size) = self.range[axis]\n raw_step = (value - min_val) / step_size\n self.set_current_step(axis, raw_step)\n\n def set_current_step(self, axis: int, value: int):\n \"\"\"Sets the slider step at which to slice this dimension.\n\n The position of the slider in world coordinates gets\n calculated from the current_step of the slider.\n\n Parameters\n ----------\n axis : int\n Dimension index.\n value : int or float\n Value of the point.\n \"\"\"\n axis = assert_axis_in_bounds(axis, self.ndim)\n step = np.round(np.clip(value, 0, self.nsteps[axis] - 1)).astype(int)\n\n if self.current_step[axis] != step:\n full_current_step = list(self.current_step)\n full_current_step[axis] = step\n self.current_step = full_current_step\n self.last_used = axis\n\n def set_axis_label(self, axis: int, label: str):\n \"\"\"Sets a new axis label for the given axis.\n\n Parameters\n ----------\n axis : int\n Dimension index\n label : str\n Given label\n \"\"\"\n axis = assert_axis_in_bounds(axis, self.ndim)\n if self.axis_labels[axis] != str(label):\n full_axis_labels = list(self.axis_labels)\n full_axis_labels[axis] = str(label)\n self.axis_labels = full_axis_labels\n self.last_used = axis\n\n def reset(self):\n \"\"\"Reset dims values to initial states.\"\"\"\n # Don't reset axis labels\n self.range = ((0, 2, 1),) * self.ndim\n self.current_step = (0,) * self.ndim\n self.order = tuple(range(self.ndim))\n\n def _increment_dims_right(self, axis: int = None):\n \"\"\"Increment dimensions to the right along given axis, or last used axis if None\n\n Parameters\n ----------\n axis : int, optional\n Axis along which to increment dims, by default None\n \"\"\"\n if axis is None:\n axis = self.last_used\n self.set_current_step(axis, self.current_step[axis] + 1)\n\n def _increment_dims_left(self, axis: int = None):\n \"\"\"Increment dimensions to the left along given axis, or last used axis if None\n\n Parameters\n ----------\n axis : int, optional\n Axis along which to increment dims, by default None\n \"\"\"\n if axis is None:\n axis = self.last_used\n self.set_current_step(axis, self.current_step[axis] - 1)\n\n def _focus_up(self):\n \"\"\"Shift focused dimension slider to be the next slider above.\"\"\"\n sliders = [d for d in self.not_displayed if self.nsteps[d] > 1]\n if len(sliders) == 0:\n return\n\n index = (sliders.index(self.last_used) + 1) % len(sliders)\n self.last_used = sliders[index]\n\n def _focus_down(self):\n \"\"\"Shift focused dimension slider to be the next slider bellow.\"\"\"\n sliders = [d for d in self.not_displayed if self.nsteps[d] > 1]\n if len(sliders) == 0:\n return\n\n index = (sliders.index(self.last_used) - 1) % len(sliders)\n self.last_used = sliders[index]\n\n def _roll(self):\n \"\"\"Roll order of dimensions for display.\"\"\"\n order = np.array(self.order)\n nsteps = np.array(self.nsteps)\n order[nsteps > 1] = np.roll(order[nsteps > 1], 1)\n self.order = order.tolist()\n\n def _transpose(self):\n \"\"\"Transpose displayed dimensions.\"\"\"\n order = list(self.order)\n order[-2], order[-1] = order[-1], order[-2]\n self.order = order\n\n\ndef reorder_after_dim_reduction(order):\n \"\"\"Ensure current dimension order is preserved after dims are dropped.\n\n Parameters\n ----------\n order : tuple\n The data to reorder.\n\n Returns\n -------\n arr : tuple\n The original array with the unneeded dimension\n thrown away.\n \"\"\"\n arr = np.array(order)\n arr[np.argsort(arr)] = range(len(arr))\n return tuple(arr.tolist())\n\n\ndef assert_axis_in_bounds(axis: int, ndim: int) -> int:\n \"\"\"Assert a given value is inside the existing axes of the image.\n\n Returns\n -------\n axis : int\n The axis which was checked for validity.\n ndim : int\n The dimensionality of the layer.\n\n Raises\n ------\n ValueError\n The given axis index is out of bounds.\n \"\"\"\n if axis not in range(-ndim, ndim):\n msg = trans._(\n 'Axis {axis} not defined for dimensionality {ndim}. Must be in [{ndim_lower}, {ndim}).',\n deferred=True,\n axis=axis,\n ndim=ndim,\n ndim_lower=-ndim,\n )\n raise ValueError(msg)\n\n return axis % ndim\n", "import numpy as np\n\nfrom napari.utils.geometry import project_point_onto_plane\n\n\ndef drag_data_to_projected_distance(\n start_position, end_position, view_direction, vector\n):\n \"\"\"Calculate the projected distance between two mouse events.\n\n Project the drag vector between two mouse events onto a 3D vector\n specified in data coordinates.\n\n The general strategy is to\n 1) find mouse drag start and end positions, project them onto a\n pseudo-canvas (a plane aligned with the canvas) in data coordinates.\n 2) project the mouse drag vector onto the (normalised) vector in data\n coordinates\n Parameters\n ----------\n start_position : np.ndarray\n Starting point of the drag vector in data coordinates\n end_position : np.ndarray\n End point of the drag vector in data coordinates\n view_direction : np.ndarray\n Vector defining the plane normal of the plane onto which the drag\n vector is projected.\n vector : np.ndarray\n (3,) unit vector or (n, 3) array thereof on which to project the drag\n vector from start_event to end_event. This argument is defined in data\n coordinates.\n Returns\n -------\n projected_distance : (1, ) or (n, ) np.ndarray of float\n \"\"\"\n # enforce at least 2d input\n vector = np.atleast_2d(vector)\n\n # Store the start and end positions in world coordinates\n start_position = np.asarray(start_position)\n end_position = np.asarray(end_position)\n\n # Project the start and end positions onto a pseudo-canvas, a plane\n # parallel to the rendered canvas in data coordinates.\n end_position_canvas = project_point_onto_plane(\n end_position, start_position, view_direction\n )\n # Calculate the drag vector on the pseudo-canvas.\n drag_vector_canvas = np.squeeze(end_position_canvas - start_position)\n\n # Project the drag vector onto the specified vector(s), return the distance\n return np.einsum('j, ij -> i', drag_vector_canvas, vector).squeeze()\n" ]
[ [ "numpy.union1d" ], [ "numpy.argsort", "numpy.array", "numpy.roll", "numpy.clip" ], [ "numpy.asarray", "numpy.atleast_2d", "numpy.squeeze", "numpy.einsum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
XuboGU/neorl
[ "066cdbd9e9cdbfe371278dba3ece116f25edab2d" ]
[ "neorl/tune/runners/estune.py" ]
[ "import numpy as np\nimport pandas as pd\nimport os\nimport random\nimport math\nfrom itertools import repeat\nimport itertools\nimport sys, copy, shutil\nimport subprocess\nfrom multiprocessing.dummy import Pool\nfrom collections import defaultdict\nimport copy\n\nimport random\nimport matplotlib.pyplot as plt\n\ntry: \n from collections.abc import Sequence\nexcept ImportError:\n from collections import Sequence\n\nclass ESTUNE:\n \"\"\"\n A class to parse neorl input template and construct cases for evolution strategy (ES) hyperparameter optimisation\n\n inputs: \n The template input file\n Class object from PARSER.py, featuring user input for TUNE\n neorl logo\n \"\"\"\n\n def __init__(self, tuneclass, inputfile, tuneblock, logo):\n self.logo=logo\n self.inputfile=inputfile\n self.tuneblock=tuneblock\n self.n_last_episodes=int(self.tuneblock[\"n_last_episodes\"])\n self.ncores=int(self.tuneblock[\"ncores\"])\n self.ncases=int(self.tuneblock[\"ncases\"])\n\n #---------------------------------------\n # define genetic algorithm parameters\n #---------------------------------------\n self.popsize=10\n if self.ncases < self.popsize:\n self.ngens=1\n else:\n self.ngens=int(self.ncases/self.popsize)\n self.MU=5\n if tuneclass == 'gatune': # ES/GA tune\n print(\"Performing semi-GA Tune\")\n self.INDPB=0.1\n elif tuneclass == 'estune': # ES tune\n print(\"Performing ES Tune\")\n self.INDPB=1.0\n else: # default setting is ES tune\n print(\"Performing ES Tune\")\n self.INDPB=1.0\n self.CXPB=0.5\n self.MUTPB=0.2\n self.ETA=0.6\n self.SMAX=0.5\n self.paramvals=dict()\n self.paraminds=dict()\n self.datatypes=[]\n\n #-------------------------------\n # construct results directory\n #-------------------------------\n if os.path.exists('./tunecases/'):\n shutil.rmtree('./tunecases/')\n os.makedirs('./tunecases/', exist_ok=True)\n else:\n os.makedirs('./tunecases/', exist_ok=True)\n self.csvlogger='tune.csv'\n self.tunesummary='tunesummary.txt'\n\n #---------------------------------\n # parse the input template\n #---------------------------------\n with open (self.inputfile, 'r') as input_file_text:\n self.template=input_file_text.readlines()\n \n first=0; last=0\n for i in range(len(self.template)):\n if ('READ TUNE' in self.template[i]):\n first=i\n if ('END TUNE' in self.template[i]):\n last=i\n if first == 0 and last ==0:\n raise ('TUNE card cannot be found')\n\n del self.template[first: last+1]\n self.template=\"\".join(self.template)\n\n def tune_count(self):\n \n \"\"\"\n 1- This function uses self.tuneblock, parse it, infer all parameters to be tuned and thier distribution\n 2- This function creates GA engine and instantiates the initial population for evolution algorithm\n \"\"\"\n \n self.param_dict={}\n for item in self.tuneblock:\n if '{' in item and '}' in item and item[0] != '#':\n #-----------------------------------------------------\n # check the existence of the name in the template\n #-----------------------------------------------------\n if item not in self.template:\n raise ValueError('parameter {} in TUNE block cannot be found in any other block, e.g. DQN, GA, PPO, etc.'.format(item)) \n\n item_lst=self.tuneblock[item].split(\",\")\n item_lst=[item.strip() for item in item_lst] # get rid of white spaces in the splitted values\n #-------------------------------------------------------\n # check if a uniform distribution of floats is identified\n #-------------------------------------------------------\n try:\n if \"float\" in item_lst:\n item_lst[0]=float(item_lst[0])\n item_lst[1]=float(item_lst[1])\n self.datatypes.append(\"float\")\n print ('-- debug: parameter {} has uniform distribution of type --float-- between {} and {}'.format(item,item_lst[0],item_lst[1]))\n elif \"u\" in item_lst: \n item_lst[0]=float(item_lst[0])\n item_lst[1]=float(item_lst[1])\n self.datatypes.append(\"float\")\n print ('-- debug: parameter {} has uniform distribution of type --float-- between {} and {}'.format(item,item_lst[0],item_lst[1]))\n except:\n raise Exception ('--error: TUNE cannot construct the user-given uniform distribution of --floats-- for {} according to (low, high, u) syntax'.format(item))\n \n #---------------------------------------------------\n # check if a random integer distribution is identified\n #---------------------------------------------------\n try:\n if \"int\" in item_lst:\n item_lst[0]=int(item_lst[0])\n item_lst[1]=int(item_lst[1])\n self.datatypes.append(\"int\")\n print ('-- debug: parameter {} has uniform distribution of type --int-- between {} and {}'.format(item,item_lst[0],item_lst[1]))\n elif \"randint\" in item_lst:\n item_lst[0]=int(item_lst[0])\n item_lst[1]=int(item_lst[1])\n self.datatypes.append(\"int\")\n print ('-- debug: parameter {} has uniform distribution of type --int-- between {} and {}'.format(item,item_lst[0],item_lst[1]))\n except:\n raise Exception ('--error: TUNE cannot construct the user-given uniform distribution of --int-- for {} according to (low, high, u) syntax'.format(item))\n \n #-----------------------------------------------------\n # check if a grid is identified\n #-----------------------------------------------------\n try:\n if \"grid\" in item_lst:\n element_lst=[]\n for element in item_lst:\n # check if it is an integer\n not_int=0\n try:\n element_lst.append(int(element.strip()))\n except Exception:\n not_int=1\n \n # else check if the elment is float\n if not_int:\n try:\n element_lst.append(float(element.strip()))\n # else consider it a string\n except Exception:\n element_lst.append(str(element.strip()))\n \n item_lst=element_lst\n self.datatypes.append(\"grid\")\n print ('-- debug: parameter {} has grid type with values {}'.format(item,item_lst))\n except:\n raise Exception ('--error: TUNE cannot construct the user-given grid for {} according to the comma-seperated syntax'.format(item))\n\n self.param_dict[item]=item_lst # Save the final parsed list for parameter {XXX} \n \n #-----------------------------------------------------\n # infer the bounds for strategy vector \n #-----------------------------------------------------\n if len(self.param_dict.keys()) <= 10:\n self.SMIN=0.1\n else:\n self.SMIN=1/(len(self.param_dict.keys()))\n\n def gen_cases(self, x=0):\n \"\"\"\n This function infers neorl.py path\n \"\"\"\n self.tune_count()\n self.param_names=list(self.param_dict.keys())\n #----------------------- \n # Infer neorl.py path\n #-----------------------\n # Find neorl path\n #self.here=os.path.dirname(os.path.abspath(__file__))\n #self.neorl_path=self.here.replace('src/tune','neorl.py') #try to infer neorl.py internally to call neorl inside or neorl\n #self.python_path=self.here.replace('neorl/src/tune','anaconda3/bin/python3') #try to infer python3 path to call neorl inside or neorl\n\n self.neorl_path=sys.argv[0]\n self.python_path=sys.executable\n print('--debug: NEORLPATH=', self.neorl_path)\n print('--debug: PYTHONPATH=', self.python_path)\n \n def GenES(self):\n \"\"\"\n Individual generator:\n 1- This function uses self.param_dict to obtain bounds for individual parameters\n Returns:\n -ind (list): an individual vector with values samples from inferred distribution \n -strategy (list): the strategy vector with values between smin and smax \n \"\"\" \n size=len(self.param_dict.keys()) # size of individual\n content=[]\n self.LOW=[] # Lower bounds for the parameters to be tuned\n self.UP=[] # Upper bounds for parameters to be tuned\n for key in list(self.param_dict.keys()):\n if 'int' in self.param_dict[key]:\n content.append(random.randint(self.param_dict[key][0], self.param_dict[key][1]))\n elif 'randint' in self.param_dict[key]:\n content.append(random.randint(self.param_dict[key][0], self.param_dict[key][1]))\n elif 'float' in self.param_dict[key]:\n content.append(random.uniform(self.param_dict[key][0], self.param_dict[key][1]))\n elif 'u' in self.param_dict[key]:\n content.append(random.uniform(self.param_dict[key][0], self.param_dict[key][1]))\n elif 'grid' in self.param_dict[key]:\n self.real_grid=list(self.param_dict[key])\n self.real_grid.remove('grid') # get rid of the 'grid' to avoid sampling it\n self.paramvals[key]=self.real_grid\n content.append(random.sample(self.real_grid, 1)[0])\n self.paraminds[len(content)-1]=key\n else:\n raise Exception('unknown data type is given, either int/randint, float/u, or grid are allowed for parameter distribution types')\n self.LOW.append(self.param_dict[key][0])\n self.UP.append(self.param_dict[key][1])\n ind=list(content)\n size = len(list(self.param_dict.keys()))\n strategy= [random.uniform(self.SMIN, self.SMAX) for _ in range(size)]\n return ind, strategy\n \n def init_pop(self):\n \"\"\"\n Population initializer\n Returns:\n -pop (dict): initial population in a dictionary form \n \"\"\"\n # initialize the population and strategy and run them in parallel (these samples will be used to initialize the memory)\n pop=defaultdict(list)\n \n for i in range(self.popsize):\n #caseid='es_gen{}_ind{}'.format(0,i+1)\n data=self.GenES()\n pop[i].append(data[0])\n pop[i].append(data[1])\n \n if self.ncores > 1: # evaluate warmup in parallel\n core_list=[]\n for key in pop:\n caseid='ind{}'.format(key+1)\n core_list.append([pop[key][0], caseid])\n p=Pool(self.ncores)\n fitness=p.map(self.gen_object, core_list)\n p.close(); p.join()\n\n [pop[ind].append(fitness[ind]) for ind in range(len(pop))]\n \n else: # evaluate warmup in series\n for key in pop:\n caseid='ind{}'.format(key+1)\n fitness=self.fit(pop[key][0], caseid)\n pop[key].append(fitness)\n return pop # return final pop dictionary with ind, strategy, and fitness\n\n def fit(self, ind, caseid):\n \"\"\"\n This function evaluates an individual's fitness\n Inputs:\n -ind (list): an individual whose fitness to evaluate\n -caseid (str): a string that specifies the given individual\n Returns: \n -mean_reward (float): fitness value \n \"\"\"\n try:\n #---------------------------------------------\n # Prepares directories and files for one case\n # --------------------------------------------\n self.param_names=list(self.param_dict.keys())\n i = caseid[3:]\n\n os.makedirs('./tunecases/case{}'.format(i), exist_ok=True)\n self.new_template=copy.deepcopy(self.template)\n for j in range (len(self.param_names)):\n self.new_template=self.new_template.replace(str(self.param_names[j]), str(ind[j]))\n \n filename='./tunecases/case{}/case{}.inp'.format(i, i)\n with open (filename, 'w') as fout:\n fout.writelines(self.new_template)\n \n # copy external files into the new directory, if extfiles card exists\n if 'extfiles' in self.tuneblock.keys():\n if self.tuneblock['extfiles']:\n print('--debug: external files are identified, copying them into each case directory')\n for item in self.tuneblock['extfiles']:\n os.system('cp -r {} ./tunecases/case{}/'.format(item, i))\n\n casenum = caseid[3:]\n print('--------------------------------------------------')\n print('Running TUNE Case {}/{}: {}'.format(casenum, self.ncases, ind))\n subprocess.call([self.python_path, self.neorl_path, '-i', 'case{}.inp'.format(casenum)], cwd='./tunecases/case{}/'.format(casenum)) # this exceutes neorl for this case.inp\n print('--------------------------------------------------')\n \n #--------------------------------------------------------------------------------------------------------------\n # Try to infer the _out.csv file in the directory since only one method is allowed\n csvfile=[f for f in os.listdir('./tunecases/case{}/case{}_log/'.format(casenum, casenum)) if f.endswith('_out.csv')]\n if len(csvfile) > 1:\n raise Exception ('multiple *_out.csv files can be found in the logger of TUNE, only one is allowed')\n #--------------------------------------------------------------------------------------------------------------\n reward_lst=pd.read_csv('./tunecases/case{}/case{}_log/{}'.format(casenum,casenum, csvfile[0]), usecols=['reward']).values\n mean_reward=np.mean(reward_lst[-self.n_last_episodes:])\n max_reward=np.max(reward_lst)\n \n with open (self.csvlogger, 'a') as fout:\n fout.write(str(casenum) +',')\n [fout.write(str(item) + ',') for item in ind]\n fout.write(str(mean_reward) + ',' + str(max_reward) + '\\n')\n \n return mean_reward\n \n except:\n print('--error: case{}.inp failed during execution'.format(casenum))\n \n return 'case{}.inp:failed'.format(casenum)\n \n def gen_object(self, inp):\n \"\"\"\n This is a worker for the multiprocess Pool \n Inputs:\n -inp (list of lists): contains data for each core [[ind1, caseid1], ..., [indN, caseidN]]\n Returns: \n -fitness value (float)\n \"\"\"\n return self.fit(inp[0], inp[1])\n \n def select(self, pop):\n \"\"\"\n Selection function sorts the population from max to min based on fitness and selects the k best\n Inputs:\n -pop (dict): population in dictionary structure\n -k (int): top k individuals are selected\n Returns:\n -best_dict (dict): the new orded dictionary with top k selected\n \"\"\"\n k=self.MU\n pop=list(pop.items())\n pop.sort(key=lambda e: e[1][2], reverse=True)\n sorted_dict=dict(pop[:k])\n\n # This block creates a new dict where keys are reset to 0 ... k in order to avoid unordered keys after sort \n best_dict=defaultdict(list)\n index=0\n for key in sorted_dict:\n best_dict[index].append(sorted_dict[key][0])\n best_dict[index].append(sorted_dict[key][1])\n best_dict[index].append(sorted_dict[key][2])\n index+=1 \n\n sorted_dict.clear()\n return best_dict\n \n def cx(self, ind1, ind2, strat1, strat2):\n \"\"\"\n Executes a classical two points crossover on both the individuals and their strategy. \n The individuals/strategies should be a list. The crossover points for the individual and the \n strategy are the same. \n\n Inputs:\n -ind1 (list): The first individual participating in the crossover. \n -ind2 (list): The second individual participating in the crossover.\n -strat1 (list): The first evolution strategy participating in the crossover. \n -strat2 (list): The second evolution strategy \n Returns:\n - The new ind1, ind2, strat1, strat2, after crossover in list form\n \"\"\"\n \n #for item in ind1:\n # print('individual 1', type(item))\n #for item in ind2:\n # print('individual 2', type(item))\n #for item in strat1:\n # print('strategy 1', type(item))\n #for item in strat2:\n # print('strategy 2', type(item))\n \n size = min(len(ind1), len(ind2))\n\n pt1 = random.randint(1, size)\n pt2 = random.randint(1, size-1)\n if pt2 >= pt1:\n pt2 +=1\n else:\n pt1, pt2 = pt2, pt1\n \n ind1[pt1:pt2], ind2[pt1:pt2] = ind2[pt1:pt2], ind1[pt1:pt2]\n strat1[pt1:pt2], strat2[pt1:pt2] = strat2[pt1:pt2], strat1[pt1:pt2]\n\n return ind1, ind2, strat1, strat2 \n \n def mutES(self, ind, strat):\n \"\"\"\n Mutate an evolution strategy according to mixed Discrete/Continuous mutation rules \n Input:\n -ind (list): individual to be mutated\n -strat (list): individual strategy to be mutated \n Returns:\n -ind (list): new individual after mutation\n -strat (list): individual strategy after mutation\n \"\"\"\n size=len(ind)\n tau=1/np.sqrt(2*size)\n tau_prime=1/np.sqrt(2*np.sqrt(size))\n \n for i in range(size):\n # Grid distribution received\n if self.datatypes[i] == \"grid\":\n #if i in self.paraminds.keys():\n norm=random.gauss(0,1)\n # modify the ind strategy\n strat[i] = 1/(1+(1-strat[i])/strat[i]*np.exp(-tau*norm-tau_prime*random.gauss(0,1)))\n # make a transformation of strategy to ensure it is between smin, smax \n y=(strat[i]-self.SMIN)/(self.SMAX-self.SMIN)\n if np.floor(y) % 2 == 0:\n y_prime=np.abs(y-np.floor(y))\n else:\n y_prime=1-np.abs(y-np.floor(y))\n strat[i] = self.SMIN + (self.SMAX-self.SMIN)*y_prime\n\n # check if this attribute is mutated based on the updated strategy\n if random.random() < strat[i]:\n # make a list of possibilities after excluding the current value to enforce mutation\n paramname=self.paraminds[i]\n ind[i]=random.sample(self.paramvals[paramname], 1)[0]\n\n # Random integer distribution received\n elif self.datatypes[i] == \"int\":\n norm=random.gauss(0,1)\n # modify the ind strategy \n strat[i] = 1/(1+(1-strat[i])/strat[i]*np.exp(-tau*norm-tau_prime*random.gauss(0,1)))\n # make a transformation of strategy to ensure it is between smin, smax \n y=(strat[i]-self.SMIN)/(self.SMAX-self.SMIN)\n if np.floor(y) % 2 == 0:\n y_prime=np.abs(y-np.floor(y))\n else:\n y_prime=1-np.abs(y-np.floor(y))\n strat[i] = self.SMIN + (self.SMAX-self.SMIN)*y_prime\n\n # check if this attribute is mutated based on the updated strategy \n #if random.random() < strat[i]:\n # make a list of possibilities after excluding the current value to enforce mutation\n choices=list(range(self.LOW[i], self.UP[i]+1))\n choices.remove(ind[i])\n ind[i] = random.choice(choices)\n\n # Uniform float distribution received\n elif self.datatypes[i] == \"float\":\n norm=random.gauss(0,1)\n if random.random() < self.INDPB: # this indicates whether ind/strategy to be mutated or not for this float variable\n strat[i] *= np.exp(tau*norm + tau_prime * random.gauss(0,1)) # normal mutation strategy\n ind[i] += strat[i] * random.gauss(0,1) # update the individual position\n \n # check if the new individual falls within lower/uppder boundaries\n if ind[i] < self.LOW[i]:\n ind[i] = self.LOW[i]\n if ind[i] > self.UP[i]:\n ind[i] = self.UP[i]\n \n else:\n raise Exception('ES mutation strategy works with int, float, or grid distributions, the type provided cannot be interpreted')\n \n return ind, strat\n \n def GenOffspring(self, pop):\n \"\"\"\n This function generates the offspring by applying crossover, mutation, OR reproduction. \n Inputs:\n -pop (dict): population in dictionary structure\n Returns:\n -offspring (dict): new modified population in dictionary structure\n \"\"\"\n\n pop_indices=list(range(0,len(pop)))\n offspring=defaultdict(list)\n for i in range(self.popsize):\n alpha=random.random()\n #----------------------\n # Crossover\n #----------------------\n if alpha < self.CXPB:\n index1, index2=random.sample(pop_indices,2)\n ind1, ind2, strat1, strat2=self.cx(ind1=list(pop[index1][0]), ind2=list(pop[index2][0]),\n strat1=list(pop[index1][1]), strat2=list(pop[index2][1]))\n offspring[i].append(ind1)\n offspring[i].append(strat1)\n #print('crossover is done for sample {} between {} and {}'.format(i,index1,index2))\n #----------------------\n # Mutation\n #----------------------\n elif alpha < self.CXPB + self.MUTPB: # Apply mutation\n index = random.choice(pop_indices)\n \n ind, strat=self.mutES(ind=list(pop[index][0]), strat=list(pop[index][1]))\n offspring[i].append(ind)\n offspring[i].append(strat)\n #print('mutation is done for sample {} based on {}'.format(i,index))\n #------------------------------\n # Reproduction from population\n #------------------------------\n else:\n index=random.choice(pop_indices)\n offspring[i].append(pop[index][0])\n offspring[i].append(pop[index][1])\n #print('reproduction is done for sample {} based on {}'.format(i,index))\n return offspring \n\n def run_cases(self):\n \"\"\"\n This function runs the evolutioanry algorithm over self.ngens generations. \n \"\"\"\n #------------------------------\n # Begin the evolution process\n #------------------------------\n with open (self.csvlogger, 'w') as fout:\n fout.write('caseid, ')\n [fout.write(item + ',') for item in self.param_names]\n fout.write('mean_reward,max_reward\\n')\n\n #print('PARAM dict', self.param_dict)\n #print('PARAM types', self.datatypes)\n self.population=self.init_pop()\n case_idx=0\n self.currentcase=self.popsize+1\n for gen in range(1, self.ngens): \n case_idx=0\n caseids=['ind{}'.format(ind) for ind in range(self.currentcase, self.currentcase+self.popsize+1)]\n # Vary the population and generate new offspring\n offspring=self.GenOffspring(pop=self.population)\n\n # Evaluate the individuals with invalid fitness using multiprocessing Pool\n if self.ncores > 1:\n core_list=[]\n for key in offspring:\n core_list.append([offspring[key][0], caseids[case_idx]])\n case_idx+=1\n # initialize a pool\n p=Pool(self.ncores)\n fitness=p.map(self.gen_object, core_list)\n p.close(); p.join()\n\n [offspring[ind].append(fitness[ind]) for ind in range(len(offspring))]\n else:\n for ind in range(len(offspring)):\n fitness=self.fit(offspring[ind][0], caseids[case_idx])\n case_idx+=1\n offspring[ind].append(fitness)\n \n self.currentcase+=self.popsize\n # Select the next generation population \n self.population = copy.deepcopy(self.select(pop=offspring))\n \n\n csvdata=pd.read_csv('tune.csv')\n asc_data=csvdata.sort_values(by=['caseid'],ascending=True)\n des_data=csvdata.sort_values(by=['mean_reward'],ascending=False)\n des_data2=csvdata.sort_values(by=['max_reward'],ascending=False)\n asc_data.to_csv('tune.csv', index=False)\n\n mean = np.mean(des_data.iloc[:,4:5])\n totalmean=mean.tolist()[0]\n \n try:\n failed_cases=len([print ('failed') for item in self.population if isinstance(item, str)])\n except:\n failed_cases='NA'\n \n print ('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')\n print('Mean Rewards for all cases=', totalmean)\n print ('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')\n print ('All TUNE CASES ARE COMPLETED')\n print ('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')\n print('--debug: Check tunesummary.txt file for best hyperparameters found')\n print('--debug: Check tune.csv file for complete csv logger of all cases results')\n print('--debug: Check tunecases directory for case-by-case detailed results')\n \n with open ('tunesummary.txt', 'w') as fout:\n \n fout.write(self.logo)\n fout.write('*****************************************************\\n')\n fout.write('Summary for the TUNE case \\n')\n fout.write('*****************************************************\\n')\n fout.write('Number of cases evaluated: {} \\n'.format(self.ncases))\n fout.write('Number of failed cases: {} \\n'.format(failed_cases))\n fout.write('Parameter names: {} \\n'.format(self.param_names))\n fout.write('Parameter values: {} \\n '.format(self.param_dict))\n fout.write ('--------------------------------------------------------------------------------------\\n')\n \n if des_data.shape[0] < 20:\n top=des_data.shape[0]\n fout.write ('Top {} hyperparameter configurations ranked according to MEAN reward \\n'.format(top))\n fout.write(des_data.iloc[:top].to_string(index=False))\n else:\n top=20\n fout.write ('Top {} hyperparameter configurations ranked according to MEAN reward \\n'.format(top))\n fout.write(des_data.iloc[:top].to_string(index=False))\n fout.write ('\\n')\n fout.write ('--------------------------------------------------------------------------------------\\n')\n if des_data2.shape[0] < 20:\n top=des_data2.shape[0]\n fout.write ('Top {} hyperparameter configurations ranked according to MAX reward \\n'.format(top))\n fout.write(des_data2.iloc[:top].to_string(index=False))\n else:\n top=20\n fout.write ('Top {} hyperparameter configurations ranked according to MAX reward \\n'.format(top))\n fout.write(des_data2.iloc[:top].to_string(index=False))" ]
[ [ "pandas.read_csv", "numpy.sqrt", "numpy.max", "numpy.mean", "numpy.floor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
zhuriheng/faster-rcnn.pytorch
[ "7536b0f5eee254350fb4dce5c4a077ac6d29db16" ]
[ "test_net.py" ]
[ "# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nimport os\nimport sys\nimport numpy as np\nimport argparse\nimport pprint\nimport pdb\nimport time\n\nimport cv2\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport pickle\nfrom roi_data_layer.roidb import combined_roidb\nfrom roi_data_layer.roibatchLoader import roibatchLoader\nfrom model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\nfrom model.rpn.bbox_transform import clip_boxes\nfrom model.nms.nms_wrapper import nms\nfrom model.rpn.bbox_transform import bbox_transform_inv\nfrom model.utils.net_utils import save_net, load_net, vis_detections\n\nfrom model.faster_rcnn.vgg16 import vgg16\nfrom model.faster_rcnn.resnet import resnet\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')\n parser.add_argument('--dataset', dest='dataset',\n help='training dataset',\n default='pascal_voc', type=str)\n parser.add_argument('--cfg', dest='cfg_file',\n help='optional config file',\n default='cfgs/vgg16.yml', type=str)\n parser.add_argument('--net', dest='net',\n help='vgg16, res50, res101, res152',\n default='res101', type=str)\n parser.add_argument('--set', dest='set_cfgs',\n help='set config keys', default=None,\n nargs=argparse.REMAINDER)\n parser.add_argument('--load_dir', dest='load_dir',\n help='directory to load models', default=\"models\",\n type=str)\n parser.add_argument('--cuda', dest='cuda',\n help='whether use CUDA',\n action='store_true')\n parser.add_argument('--ls', dest='large_scale',\n help='whether use large imag scale',\n action='store_true')\n parser.add_argument('--mGPUs', dest='mGPUs',\n help='whether use multiple GPUs',\n action='store_true')\n parser.add_argument('--cag', dest='class_agnostic',\n help='whether perform class_agnostic bbox regression',\n action='store_true')\n parser.add_argument('--parallel_type', dest='parallel_type',\n help='which part of model to parallel, 0: all, 1: model before roi pooling',\n default=0, type=int)\n parser.add_argument('--checksession', dest='checksession',\n help='checksession to load model',\n default=1, type=int)\n parser.add_argument('--checkepoch', dest='checkepoch',\n help='checkepoch to load network',\n default=1, type=int)\n parser.add_argument('--checkpoint', dest='checkpoint',\n help='checkpoint to load network',\n default=10021, type=int)\n parser.add_argument('--vis', dest='vis',\n help='visualization mode',\n action='store_true')\n parser.add_argument('--input_dir', dest='input_dir',\n help='directory to save models',\n type=str)\n args = parser.parse_args()\n return args\n\nlr = cfg.TRAIN.LEARNING_RATE\nmomentum = cfg.TRAIN.MOMENTUM\nweight_decay = cfg.TRAIN.WEIGHT_DECAY\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n print('Called with args:')\n print(args)\n\n if torch.cuda.is_available() and not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\n np.random.seed(cfg.RNG_SEED)\n if args.dataset == \"pascal_voc\":\n args.imdb_name = \"voc_2007_trainval\"\n args.imdbval_name = \"voc_2007_test\"\n args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"pascal_voc_0712\":\n args.imdb_name = \"voc_2007_trainval+voc_2012_trainval\"\n args.imdbval_name = \"voc_2007_test\"\n args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"coco\":\n args.imdb_name = \"coco_2014_train+coco_2014_valminusminival\"\n args.imdbval_name = \"coco_2014_minival\"\n args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"imagenet\":\n args.imdb_name = \"imagenet_train\"\n args.imdbval_name = \"imagenet_val\"\n args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"vg\":\n args.imdb_name = \"vg_150-50-50_minitrain\"\n args.imdbval_name = \"vg_150-50-50_minival\"\n args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n\n args.cfg_file = \"cfgs/{}/{}_ls.yml\".format(args.dataset, args.net) if args.large_scale else \"cfgs/{}/{}.yml\".format(\n args.dataset, args.net)\n\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n print('Using config:')\n pprint.pprint(cfg)\n\n cfg.TRAIN.USE_FLIPPED = False\n imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)\n imdb.competition_mode(on=True)\n\n print('{:d} roidb entries'.format(len(roidb)))\n\n input_dir = args.input_dir\n if not os.path.exists(input_dir):\n raise Exception('There is no input directory for loading network from ' + input_dir)\n load_name = os.path.join(input_dir,\n 'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))\n\n # initilize the network here.\n if args.net == 'vgg16':\n fasterRCNN = vgg16(imdb.classes, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res101':\n fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res50':\n fasterRCNN = resnet(imdb.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res152':\n fasterRCNN = resnet(imdb.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)\n else:\n print(\"network is not defined\")\n pdb.set_trace()\n\n fasterRCNN.create_architecture()\n\n print(\"load checkpoint %s\" % (load_name))\n checkpoint = torch.load(load_name)\n fasterRCNN.load_state_dict(checkpoint['model'])\n if 'pooling_mode' in checkpoint.keys():\n cfg.POOLING_MODE = checkpoint['pooling_mode']\n\n\n print('load model successfully!')\n # initilize the tensor holder here.\n im_data = torch.FloatTensor(1)\n im_info = torch.FloatTensor(1)\n num_boxes = torch.LongTensor(1)\n gt_boxes = torch.FloatTensor(1)\n\n # ship to cuda\n if args.cuda:\n im_data = im_data.cuda()\n im_info = im_info.cuda()\n num_boxes = num_boxes.cuda()\n gt_boxes = gt_boxes.cuda()\n\n # make variable\n im_data = Variable(im_data)\n im_info = Variable(im_info)\n num_boxes = Variable(num_boxes)\n gt_boxes = Variable(gt_boxes)\n\n if args.cuda:\n cfg.CUDA = True\n\n if args.cuda:\n fasterRCNN.cuda()\n\n start = time.time()\n max_per_image = 100\n\n vis = args.vis\n\n if vis:\n thresh = 0.05\n else:\n thresh = 0.0\n\n save_name = 'faster_rcnn_10'\n num_images = len(imdb.image_index)\n all_boxes = [[[] for _ in xrange(num_images)]\n for _ in xrange(imdb.num_classes)]\n\n output_dir = get_output_dir(imdb, save_name)\n dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1, \\\n imdb.num_classes, training=False, normalize = False)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,\n shuffle=False, num_workers=0,\n pin_memory=True)\n\n data_iter = iter(dataloader)\n\n _t = {'im_detect': time.time(), 'misc': time.time()}\n det_file = os.path.join(output_dir, 'detections.pkl')\n\n fasterRCNN.eval()\n empty_array = np.transpose(np.array([[],[],[],[],[]]), (1,0))\n for i in range(num_images):\n\n data = next(data_iter)\n im_data.data.resize_(data[0].size()).copy_(data[0])\n im_info.data.resize_(data[1].size()).copy_(data[1])\n gt_boxes.data.resize_(data[2].size()).copy_(data[2])\n num_boxes.data.resize_(data[3].size()).copy_(data[3])\n\n det_tic = time.time()\n rois, cls_prob, bbox_pred, \\\n rpn_loss_cls, rpn_loss_box, \\\n RCNN_loss_cls, RCNN_loss_bbox, \\\n rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)\n\n scores = cls_prob.data\n boxes = rois.data[:, :, 1:5]\n\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = bbox_pred.data\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n if args.class_agnostic:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_deltas = box_deltas.view(1, -1, 4)\n else:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))\n\n pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)\n pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)\n else:\n # Simply repeat the boxes, once for each class\n _ = torch.from_numpy(np.tile(boxes, (1, scores.shape[1])))\n pred_boxes = _.cuda() if args.cuda > 0 else _\n\n pred_boxes /= data[1][0][2].item()\n\n scores = scores.squeeze()\n pred_boxes = pred_boxes.squeeze()\n det_toc = time.time()\n detect_time = det_toc - det_tic\n misc_tic = time.time()\n if vis:\n im = cv2.imread(imdb.image_path_at(i))\n im2show = np.copy(im)\n for j in xrange(1, imdb.num_classes):\n inds = torch.nonzero(scores[:,j]>thresh).view(-1)\n # if there is det\n if inds.numel() > 0:\n cls_scores = scores[:,j][inds]\n _, order = torch.sort(cls_scores, 0, True)\n if args.class_agnostic:\n cls_boxes = pred_boxes[inds, :]\n else:\n cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]\n\n cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)\n # cls_dets = torch.cat((cls_boxes, cls_scores), 1)\n cls_dets = cls_dets[order]\n keep = nms(cls_dets, cfg.TEST.NMS)\n cls_dets = cls_dets[keep.view(-1).long()]\n if vis:\n im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)\n all_boxes[j][i] = cls_dets.cpu().numpy()\n else:\n all_boxes[j][i] = empty_array\n\n # Limit to max_per_image detections *over all classes*\n if max_per_image > 0:\n image_scores = np.hstack([all_boxes[j][i][:, -1]\n for j in xrange(1, imdb.num_classes)])\n if len(image_scores) > max_per_image:\n image_thresh = np.sort(image_scores)[-max_per_image]\n for j in xrange(1, imdb.num_classes):\n keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]\n all_boxes[j][i] = all_boxes[j][i][keep, :]\n\n misc_toc = time.time()\n nms_time = misc_toc - misc_tic\n\n sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \\r' \\\n .format(i + 1, num_images, detect_time, nms_time))\n sys.stdout.flush()\n\n if vis:\n cv2.imwrite('result.png', im2show)\n pdb.set_trace()\n #cv2.imshow('test', im2show)\n #cv2.waitKey(0)\n\n with open(det_file, 'wb') as f:\n pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)\n\n print('Evaluating detections')\n imdb.evaluate_detections(all_boxes, output_dir)\n\n end = time.time()\n print(\"test time: %0.4fs\" % (end - start))\n" ]
[ [ "torch.LongTensor", "numpy.random.seed", "torch.load", "torch.utils.data.DataLoader", "numpy.tile", "numpy.sort", "numpy.copy", "torch.FloatTensor", "torch.sort", "torch.cuda.is_available", "torch.nonzero", "numpy.array", "numpy.where", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hirune924/kaggle-HuBMAP
[ "e4c2008378eb773db551cee52380bfccdf3a10fa" ]
[ "system/system.py" ]
[ "import pytorch_lightning as pl\nfrom loss.loss import get_loss\nfrom optimizer.optimizer import get_optimizer\nfrom scheduler.scheduler import get_scheduler\n\nimport torch\nimport numpy as np\nfrom pytorch_lightning.metrics import Accuracy\nimport segmentation_models_pytorch as smp\n\nfrom utils.utils import load_obj\nimport albumentations as A\nfrom utils.preprocessing import *\nimport shutil\n\n\n\nclass LitClassifier(pl.LightningModule):\n def __init__(self, hparams, model):\n super().__init__()\n self.save_hyperparameters(hparams)\n self.model = model\n self.criteria = get_loss(hparams.training.loss)\n #self.accuracy = Accuracy()\n self.dice = smp.utils.losses.DiceLoss(activation='sigmoid')\n\n def forward(self, x):\n # use forward for inference/predictions\n return self.model(x)\n\n def configure_optimizers(self):\n optimizer = get_optimizer(self.model.parameters(), self.hparams.training.optimizer)\n\n scheduler = get_scheduler(optimizer, self.hparams.training.scheduler)\n \n return [optimizer], [scheduler]\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n if self.hparams.dataset.mixup:\n num_batch = self.hparams.dataset.batch_size\n alpha = 0.2\n #rnd = torch.from_numpy(np.random.beta(alpha,alpha,int(num_batch/2))).type_as(x)\n #rnd = rnd.reshape(int(num_batch/2), 1, 1, 1)\n #x = x[:int(num_batch/2)]*rnd + x[int(num_batch/2):]*(1-rnd)\n #y = y[:int(num_batch/2)]*rnd + y[int(num_batch/2):]*(1-rnd)\n rnd = torch.from_numpy(np.random.beta(alpha,alpha,1)).type_as(x)\n x = x[:int(num_batch/2)]*rnd + x[int(num_batch/2):]*(1-rnd)\n y_hat = self.model(x)\n if self.hparams.dataset.mixup:\n loss = self.criteria(y_hat, y[:int(num_batch/2)])*rnd + self.criteria(y_hat, y[int(num_batch/2):])*(1-rnd)\n else:\n loss = self.criteria(y_hat, y)\n self.log('train_loss', loss, on_epoch=True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self.model(x)\n loss = self.criteria(y_hat, y)\n dice = 1-self.dice(y_hat, y)\n\n #self.log('val_loss', loss)\n #self.log('val_dice', dice)\n\n return {\n \"val_loss\": loss,\n \"val_dice\": dice\n }\n \n def validation_epoch_end(self, outputs):\n avg_val_loss = torch.stack([x[\"val_loss\"] for x in outputs]).mean()\n avg_val_dice = torch.stack([x[\"val_dice\"] for x in outputs]).mean()\n\n self.log('val_loss', avg_val_loss)\n self.log('val_dice', avg_val_dice)\n #y = torch.cat([x[\"y\"] for x in outputs]).cpu()\n #y_hat = torch.cat([x[\"y_hat\"] for x in outputs]).cpu()\n\n #preds = np.argmax(y_hat, axis=1)\n\n #val_accuracy = self.accuracy(y, preds)\n\n #self.log('avg_val_loss', avg_val_loss)\n #self.log('val_acc', val_accuracy)\n\n def test_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self.model(x)\n loss = self.criteria(y_hat, y)\n self.log('test_loss', loss)\n\n \n" ]
[ [ "torch.stack", "numpy.random.beta" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
han-kwang/coronatest-scandata
[ "98fd49f4fdcda10561bce41e769bbbb70ecfe94e" ]
[ "coronatest_analyze_csv.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Analyze CSV file into scores.\n\nCreated on Sat Feb 12 22:15:29 2022 // @hk_nien\n\"\"\"\nfrom pathlib import Path\nimport os\nimport re\nimport sys\nimport pandas as pd\nimport numpy as np\n\nPCODES = dict([\n # Regio Noord\n (1011, 'Amsterdam'),\n (1625, 'Hoorn|Zwaag'),\n (1811, 'Alkmaar'),\n (7471, 'Goor'),\n (7556, 'Hengelo'),\n (7903, 'Hoogeveen'),\n (7942, 'Meppel'),\n (8011, 'Zwolle'),\n (8232, 'Lelystad'),\n (8442, 'Heerenveen'),\n (8911, 'Leeuwarden'),\n (9291, 'Kollum'),\n (9501, 'Stadskanaal'),\n (9726, 'Groningen'),\n\n # Regio Midden\n (2406, 'Alphen a/d Rijn'),\n (2515, 'Den Haag'),\n (3013, 'Rotterdam'),\n (3511, 'Utrecht'),\n (3901, 'Veenendaal'),\n ((7137, 7131), 'Lichtenvoorde|Groenlo'),\n (7311, 'Apeldoorn'),\n\n # Regio Zuid\n (4325, 'Renesse'),\n (4462, 'Goes'),\n (4701, 'Roosendaal'),\n (5038, 'Tilburg'),\n (5401, 'Uden'),\n (5611, 'Eindhoven'),\n (5801, 'Oostrum'),\n (6101, 'Echt'),\n (6229, 'Maastricht'),\n (6541, 'Nijmegen'),\n ])\n\n\ndef get_bad_scan_times():\n \"\"\"Return list of Timestamps with bad scan times, from CSV data.\"\"\"\n df = pd.read_csv('data-ggd/ggd_bad_scans.txt', comment='#')\n tstamps = pd.to_datetime(df['Timestamp']).to_list()\n return tstamps\n\ndef _mean_time(ts_list):\n \"\"\"Return mean timestamp value from list of timestamps.\"\"\"\n ts0 = ts_list[0]\n delta_sum = pd.Timedelta(0)\n for ts in ts_list:\n delta_sum += (ts -ts0)\n ts_mean = ts0 + delta_sum / len(ts_list)\n return ts_mean\n\n\ndef _delta_time_hhmm(hm):\n \"\"\"Convert 'hh:mm' string to TimeDelta.\"\"\"\n return pd.Timedelta(f'{hm}:00')\n\n\ndef _summary_to_scores(summary):\n \"\"\"Convert summary from _read_log to scores dict and effective timestamp.\n\n Parameters:\n\n - summary: dict with int(pc4) -> [(query_time, appt_time), ...]\n\n Return:\n\n - scores dict: int(pc4) -> score (int or float or '?')\n - timestamp: middle query timestamp of this run.\n \"\"\"\n\n # Convert to number codes.\n scores = {k: '?' for k in PCODES}\n multi_pcs = {} # pc4 -> (pc4[0], pc4[1], ...)\n for pc in PCODES:\n if isinstance(pc, tuple):\n for pc1 in pc:\n multi_pcs[pc1] = pc\n\n qtms = []\n dhm = _delta_time_hhmm\n for pc4, vlist in summary.items():\n pc4 = int(pc4)\n if pc4 not in scores:\n if pc4 in multi_pcs:\n pc4_key = multi_pcs[pc4]\n else:\n print(f'{pc4} not in list...')\n continue\n else:\n pc4_key = pc4\n if len(vlist) == 0:\n scores[pc4_key] = 7\n continue\n qtm = _mean_time([v[0] for v in vlist]) # query time\n qtms.append(qtm)\n atm = min(v[1] for v in vlist) # earliest appointment time\n qtm_00 = pd.Timestamp(qtm.strftime('%Y-%m-%dT00:00'))\n thresholds = [\n (3, qtm_00 + dhm('23:59')),\n (4, qtm + dhm('24:00')),\n (5, qtm_00 + dhm('48:00')),\n (6, qtm + dhm('48:00')),\n (6.3, qtm_00 + dhm('72:00')),\n (6.7, qtm + dhm('72:00')),\n (7, atm)\n ]\n if qtm.hour < 9:\n thresholds.insert(0, (1, qtm_00 + dhm('13:00')))\n elif qtm.hour < 13:\n thresholds.insert(0, (1, qtm + dhm('4:00')))\n elif qtm.hour < 17:\n thresholds.insert(0, (1, qtm_00 + dhm('24:00')))\n thresholds.insert(1, (2, qtm + dhm('20:00')))\n else:\n thresholds.insert(0, (1, qtm_00 + dhm('24:00')))\n thresholds.insert(1, (2, qtm_00 + dhm('37:00')))\n\n for s, tm in thresholds:\n if atm < tm:\n scores[pc4_key] = s\n break\n if len(qtms) == 0:\n qtm_mid = pd.Timestamp(None)\n else:\n qtm_min = min(qtms)\n qtm_mid = qtm_min + (max(qtms) - qtm_min)/2\n return scores, qtm_mid\n\n\ndef _get_min_wait(summary):\n \"\"\"Return minimum and median wait Timedelta between scan time and appointment.\n\n summary is dict of pc4 -> list of timestamps\n No data -> 999 h.\n\n For the median, NaT is counted as infinite.\n \"\"\"\n wtimes = []\n for _, vlist in summary.items():\n wtimes_this = [atm - qtm for qtm, atm in vlist]\n wtimes.append(\n min(wtimes_this) if wtimes_this else pd.Timedelta(99, 'h')\n )\n minwait = min(wtimes) if wtimes else 999\n medwait = pd.Timedelta(np.median(wtimes))\n return minwait, medwait\n\n\ndef load_csv(csv_fname):\n \"\"\"Return DataFrame and list of start times (+1).\"\"\"\n df = pd.read_csv(csv_fname, comment='#')\n df['req_pc4'] = df['req_pc4'].astype(int)\n\n for c in df.columns:\n if c.endswith('_time') or c.endswith('_date'):\n df[c] = pd.to_datetime(df[c])\n else:\n df.loc[df[c].isna(), c] = None\n\n # start_tms: list of scan start times (plus one extra at the end)\n start_tms = df.loc[df['scan_time'].diff() > pd.Timedelta('10 min'), 'scan_time']\n start_tms = [df.iloc[0]['scan_time']] + list(start_tms)\n start_tms += [df.iloc[-1]['scan_time'] + pd.Timedelta('1 min')]\n return df, start_tms\n\ndef load_multi_csvs(csv_fnames):\n \"\"\"Return DataFrame and list of start times (+1)\"\"\"\n dfs = []\n start_tms = []\n for f in csv_fnames:\n df, st = load_csv(f)\n dfs.append(df)\n start_tms.extend(st[:-1])\n df = pd.concat(dfs).reset_index()\n start_tms.append(df.iloc[-1]['scan_time'] + pd.Timedelta('1 min'))\n return df, start_tms\n\n\ndef get_scan_scores(df, tm_range):\n \"\"\"Get scan scores as pc4 -> score dict.\n\n Parameters:\n\n - df: DataFrame with scan_time, req_date, req_pc4, opt0_short_addr,\n opt0_time, opt0_loc_id, etc.\n - tm_range: (tm_start, tm_stop) timestamps.\n\n Return:\n\n - tstamp: timestamp of the scan (mid-point)\n - scores: dict of pc4->score\n - min_wait: Timedelta of minimum wait time from scan to appointment\n \"\"\"\n mask = (df['scan_time'] >= tm_range[0]) & (df['scan_time'] < tm_range[1])\n df1 = df.loc[mask]\n summary = {}\n for pc4, city_re in PCODES.items():\n pc4_tup = (pc4,) if isinstance(pc4, int) else pc4\n options = []\n req_pc4 = None\n for _, row in df1.loc[df1['req_pc4'].isin(pc4_tup)].iterrows():\n req_pc4 = int(row['req_pc4'])\n for i in range(3):\n addr = row[f'opt{i}_short_addr']\n if addr and re.match(f'{city_re}$', addr[5:]):\n options.append((row['scan_time'], row[f'opt{i}_time']))\n if req_pc4 is not None:\n summary[req_pc4] = options\n scores, tstamp = _summary_to_scores(summary)\n if pd.isna(tstamp):\n tstamp = df1.iloc[len(df1)//2]['scan_time']\n minwait, medwait = _get_min_wait(summary)\n if medwait == 999:\n medwait = pd.Timedelta(None)\n return tstamp, scores, minwait, medwait\n\n\ndef get_scan_scores_df(df, tm_ranges, decimal_comma=True):\n \"\"\"Get scan scores as dataframe, from csv dataframe.\n\n Blacklisted scan times are dropped.\n\n Parameters:\n\n - df: DataFrame with scan_time, req_date, req_pc4, opt0_short_addr,\n opt0_time, opt0_loc_id, etc.\n - tm_ranges: list of timestamps (+one at the end) with boundaries\n of timestamp ranges.\n - decimal_comma: True to have string values 6,3 rather than float 6.3.\n\n Return:\n\n - Dataframe with scores, date_str, time_str, pc4, min_wait, med_wait as columns.\n \"\"\"\n n = len(tm_ranges)\n records = []\n index = []\n minwait_hs = []\n medwait_hs = []\n bad_stimes = get_bad_scan_times()\n for i in range(n-1):\n tm_ra = tm_ranges[i:i+2]\n is_ok = True\n for tm in bad_stimes:\n if tm_ra[0] <= tm < tm_ra[1]:\n is_ok = False\n break\n if not is_ok:\n print(f'Dropped scan at {tm_ra[0].strftime(\"%Y-%m-%d %H:%M\")}')\n continue\n tm, scores, minwait, medwait = get_scan_scores(df, tm_ra)\n records.append(scores)\n index.append(tm)\n minwait_hs.append(minwait.total_seconds() / 3600)\n medwait_hs.append(medwait.total_seconds() / 3600)\n\n dates = [t.strftime('%Y-%m-%d') for t in index]\n times = [t.strftime('%H:%M') for t in index]\n sdf = pd.DataFrame.from_records(records)\n sdf.insert(0, 'Time', times)\n sdf.insert(0, 'Date', dates)\n sdf['min_wait_h'] = np.around(minwait_hs, 2)\n sdf['med_wait_h'] = np.around(medwait_hs, 2)\n sdf.loc[sdf['min_wait_h'].isna(), 'min_wait_h'] = 999\n sdf.columns = [\n ('/'.join([str(x) for x in c]) if isinstance(c, tuple) else c)\n for c in sdf.columns\n ]\n if decimal_comma:\n for c in sdf.columns[2:]:\n sdf[c] = sdf[c].astype(str)\n sdf[c] = sdf[c].str.replace('.', ',', regex=False)\n sdf[c] = sdf[c].str.replace(',0$', '', regex=False)\n sdf[c] = sdf[c].str.replace('?', '', regex=False)\n\n return sdf\n\n\nif __name__ == '__main__':\n\n in_spyder = ('SPYDER_ARGS' in os.environ)\n csv_fnames = sorted(Path('data-ggd').glob('ggd_scan-????-W??.csv'))\n do_all = ('--all' in sys.argv)\n do_all = do_all or in_spyder and input('(A)ll or latest?').lower() == 'a'\n if do_all:\n df, start_tms = load_multi_csvs(csv_fnames)\n sdf = get_scan_scores_df(df, start_tms).iloc[::-1]\n else:\n df, start_tms = load_csv(csv_fnames[-1])\n sdf = get_scan_scores_df(df, start_tms[-2:])\n print(sdf)\n if len(sdf) > 1:\n sdf.to_clipboard(index=False)\n print('Copied to clipboard including headers')\n elif len(sdf) == 1:\n sdf.iloc[[0], 2:].to_clipboard(header=False, index=False)\n print('Copied to clipboard, scores only.')\n else:\n print('No output.')\n\n if not in_spyder:\n # Note: in Spyder, copy/paste will stall while input is blocked.\n input('Press Enter to quit and clear clipboard.')\n\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.to_datetime", "numpy.around", "numpy.median", "pandas.Timedelta", "pandas.DataFrame.from_records", "pandas.isna", "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hardywu/vnpy
[ "81ab73dc57d12a3ff7c74c73665513b46fc0f668" ]
[ "vnpy/app/portfolio_strategy/backtesting.py" ]
[ "from collections import defaultdict\nfrom datetime import date, datetime, timedelta\nfrom typing import Dict, List, Set, Tuple\nfrom functools import lru_cache\nfrom copy import copy\nimport traceback\n\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nfrom pandas import DataFrame\n\nfrom vnpy.trader.constant import Direction, Offset, Interval, Status\nfrom vnpy.trader.database import database_manager\nfrom vnpy.trader.object import OrderData, TradeData, BarData\nfrom vnpy.trader.utility import round_to, extract_vt_symbol\n\nfrom .template import StrategyTemplate\n\n\nINTERVAL_DELTA_MAP = {\n Interval.MINUTE: timedelta(minutes=1),\n Interval.HOUR: timedelta(hours=1),\n Interval.DAILY: timedelta(days=1),\n}\n\n\nclass BacktestingEngine:\n \"\"\"\"\"\"\n\n gateway_name = \"BACKTESTING\"\n\n def __init__(self):\n \"\"\"\"\"\"\n self.vt_symbols: List[str] = []\n self.start: datetime = None\n self.end: datetime = None\n\n self.rates: Dict[str, float] = 0\n self.slippages: Dict[str, float] = 0\n self.sizes: Dict[str, float] = 1\n self.priceticks: Dict[str, float] = 0\n\n self.capital: float = 1_000_000\n self.risk_free: float = 0.02\n\n self.strategy: StrategyTemplate = None\n self.bars: Dict[str, BarData] = {}\n self.datetime: datetime = None\n\n self.interval: Interval = None\n self.days: int = 0\n self.history_data: Dict[Tuple, BarData] = {}\n self.dts: Set[datetime] = set()\n\n self.limit_order_count = 0\n self.limit_orders = {}\n self.active_limit_orders = {}\n\n self.trade_count = 0\n self.trades = {}\n\n self.logs = []\n\n self.daily_results = {}\n self.daily_df = None\n\n def clear_data(self) -> None:\n \"\"\"\n Clear all data of last backtesting.\n \"\"\"\n self.strategy = None\n self.bars = {}\n self.datetime = None\n\n self.limit_order_count = 0\n self.limit_orders.clear()\n self.active_limit_orders.clear()\n\n self.trade_count = 0\n self.trades.clear()\n\n self.logs.clear()\n self.daily_results.clear()\n self.daily_df = None\n\n def set_parameters(\n self,\n vt_symbols: List[str],\n interval: Interval,\n start: datetime,\n rates: Dict[str, float],\n slippages: Dict[str, float],\n sizes: Dict[str, float],\n priceticks: Dict[str, float],\n capital: int = 0,\n end: datetime = None,\n risk_free: float = 0\n ) -> None:\n \"\"\"\"\"\"\n self.vt_symbols = vt_symbols\n self.interval = interval\n\n self.rates = rates\n self.slippages = slippages\n self.sizes = sizes\n self.priceticks = priceticks\n\n self.start = start\n self.end = end\n self.capital = capital\n self.risk_free = risk_free\n\n def add_strategy(self, strategy_class: type, setting: dict) -> None:\n \"\"\"\"\"\"\n self.strategy = strategy_class(\n self, strategy_class.__name__, copy(self.vt_symbols), setting\n )\n\n def load_data(self) -> None:\n \"\"\"\"\"\"\n self.output(\"开始加载历史数据\")\n\n if not self.end:\n self.end = datetime.now()\n\n if self.start >= self.end:\n self.output(\"起始日期必须小于结束日期\")\n return\n\n # Clear previously loaded history data\n self.history_data.clear()\n self.dts.clear()\n\n # Load 30 days of data each time and allow for progress update\n progress_delta = timedelta(days=30)\n total_delta = self.end - self.start\n interval_delta = INTERVAL_DELTA_MAP[self.interval]\n\n for vt_symbol in self.vt_symbols:\n start = self.start\n end = self.start + progress_delta\n progress = 0\n\n data_count = 0\n while start < self.end:\n end = min(end, self.end) # Make sure end time stays within set range\n\n data = load_bar_data(\n vt_symbol,\n self.interval,\n start,\n end\n )\n\n for bar in data:\n self.dts.add(bar.datetime)\n self.history_data[(bar.datetime, vt_symbol)] = bar\n data_count += 1\n\n progress += progress_delta / total_delta\n progress = min(progress, 1)\n progress_bar = \"#\" * int(progress * 10)\n self.output(f\"{vt_symbol}加载进度:{progress_bar} [{progress:.0%}]\")\n\n start = end + interval_delta\n end += (progress_delta + interval_delta)\n\n self.output(f\"{vt_symbol}历史数据加载完成,数据量:{data_count}\")\n\n self.output(\"所有历史数据加载完成\")\n\n def run_backtesting(self) -> None:\n \"\"\"\"\"\"\n self.strategy.on_init()\n\n # Generate sorted datetime list\n dts = list(self.dts)\n dts.sort()\n\n # Use the first [days] of history data for initializing strategy\n day_count = 0\n ix = 0\n\n for ix, dt in enumerate(dts):\n if self.datetime and dt.day != self.datetime.day:\n day_count += 1\n if day_count >= self.days:\n break\n\n try:\n self.new_bars(dt)\n except Exception:\n self.output(\"触发异常,回测终止\")\n self.output(traceback.format_exc())\n return\n\n self.strategy.inited = True\n self.output(\"策略初始化完成\")\n\n self.strategy.on_start()\n self.strategy.trading = True\n self.output(\"开始回放历史数据\")\n\n # Use the rest of history data for running backtesting\n for dt in dts[ix:]:\n try:\n self.new_bars(dt)\n except Exception:\n self.output(\"触发异常,回测终止\")\n self.output(traceback.format_exc())\n return\n\n self.output(\"历史数据回放结束\")\n\n def calculate_result(self) -> None:\n \"\"\"\"\"\"\n self.output(\"开始计算逐日盯市盈亏\")\n\n if not self.trades:\n self.output(\"成交记录为空,无法计算\")\n return\n\n # Add trade data into daily reuslt.\n for trade in self.trades.values():\n d = trade.datetime.date()\n daily_result = self.daily_results[d]\n daily_result.add_trade(trade)\n\n # Calculate daily result by iteration.\n pre_closes = {}\n start_poses = {}\n\n for daily_result in self.daily_results.values():\n daily_result.calculate_pnl(\n pre_closes,\n start_poses,\n self.sizes,\n self.rates,\n self.slippages,\n )\n\n pre_closes = daily_result.close_prices\n start_poses = daily_result.end_poses\n\n # Generate dataframe\n results = defaultdict(list)\n\n for daily_result in self.daily_results.values():\n fields = [\n \"date\", \"trade_count\", \"turnover\",\n \"commission\", \"slippage\", \"trading_pnl\",\n \"holding_pnl\", \"total_pnl\", \"net_pnl\"\n ]\n for key in fields:\n value = getattr(daily_result, key)\n results[key].append(value)\n\n self.daily_df = DataFrame.from_dict(results).set_index(\"date\")\n\n self.output(\"逐日盯市盈亏计算完成\")\n return self.daily_df\n\n def calculate_statistics(self, df: DataFrame = None, output=True) -> None:\n \"\"\"\"\"\"\n self.output(\"开始计算策略统计指标\")\n\n # Check DataFrame input exterior\n if df is None:\n df = self.daily_df\n\n # Check for init DataFrame\n if df is None:\n # Set all statistics to 0 if no trade.\n start_date = \"\"\n end_date = \"\"\n total_days = 0\n profit_days = 0\n loss_days = 0\n end_balance = 0\n max_drawdown = 0\n max_ddpercent = 0\n max_drawdown_duration = 0\n total_net_pnl = 0\n daily_net_pnl = 0\n total_commission = 0\n daily_commission = 0\n total_slippage = 0\n daily_slippage = 0\n total_turnover = 0\n daily_turnover = 0\n total_trade_count = 0\n daily_trade_count = 0\n total_return = 0\n annual_return = 0\n daily_return = 0\n return_std = 0\n sharpe_ratio = 0\n return_drawdown_ratio = 0\n else:\n # Calculate balance related time series data\n df[\"balance\"] = df[\"net_pnl\"].cumsum() + self.capital\n df[\"return\"] = np.log(df[\"balance\"] / df[\"balance\"].shift(1)).fillna(0)\n df[\"highlevel\"] = (\n df[\"balance\"].rolling(\n min_periods=1, window=len(df), center=False).max()\n )\n df[\"drawdown\"] = df[\"balance\"] - df[\"highlevel\"]\n df[\"ddpercent\"] = df[\"drawdown\"] / df[\"highlevel\"] * 100\n\n # Calculate statistics value\n start_date = df.index[0]\n end_date = df.index[-1]\n\n total_days = len(df)\n profit_days = len(df[df[\"net_pnl\"] > 0])\n loss_days = len(df[df[\"net_pnl\"] < 0])\n\n end_balance = df[\"balance\"].iloc[-1]\n max_drawdown = df[\"drawdown\"].min()\n max_ddpercent = df[\"ddpercent\"].min()\n max_drawdown_end = df[\"drawdown\"].idxmin()\n\n if isinstance(max_drawdown_end, date):\n max_drawdown_start = df[\"balance\"][:max_drawdown_end].idxmax()\n max_drawdown_duration = (max_drawdown_end - max_drawdown_start).days\n else:\n max_drawdown_duration = 0\n\n total_net_pnl = df[\"net_pnl\"].sum()\n daily_net_pnl = total_net_pnl / total_days\n\n total_commission = df[\"commission\"].sum()\n daily_commission = total_commission / total_days\n\n total_slippage = df[\"slippage\"].sum()\n daily_slippage = total_slippage / total_days\n\n total_turnover = df[\"turnover\"].sum()\n daily_turnover = total_turnover / total_days\n\n total_trade_count = df[\"trade_count\"].sum()\n daily_trade_count = total_trade_count / total_days\n\n total_return = (end_balance / self.capital - 1) * 100\n annual_return = total_return / total_days * 240\n daily_return = df[\"return\"].mean() * 100\n return_std = df[\"return\"].std() * 100\n\n if return_std:\n daily_risk_free = self.risk_free / np.sqrt(240)\n sharpe_ratio = (daily_return - daily_risk_free) / return_std * np.sqrt(240)\n else:\n sharpe_ratio = 0\n\n return_drawdown_ratio = -total_net_pnl / max_drawdown\n\n # Output\n if output:\n self.output(\"-\" * 30)\n self.output(f\"首个交易日:\\t{start_date}\")\n self.output(f\"最后交易日:\\t{end_date}\")\n\n self.output(f\"总交易日:\\t{total_days}\")\n self.output(f\"盈利交易日:\\t{profit_days}\")\n self.output(f\"亏损交易日:\\t{loss_days}\")\n\n self.output(f\"起始资金:\\t{self.capital:,.2f}\")\n self.output(f\"结束资金:\\t{end_balance:,.2f}\")\n\n self.output(f\"总收益率:\\t{total_return:,.2f}%\")\n self.output(f\"年化收益:\\t{annual_return:,.2f}%\")\n self.output(f\"最大回撤: \\t{max_drawdown:,.2f}\")\n self.output(f\"百分比最大回撤: {max_ddpercent:,.2f}%\")\n self.output(f\"最长回撤天数: \\t{max_drawdown_duration}\")\n\n self.output(f\"总盈亏:\\t{total_net_pnl:,.2f}\")\n self.output(f\"总手续费:\\t{total_commission:,.2f}\")\n self.output(f\"总滑点:\\t{total_slippage:,.2f}\")\n self.output(f\"总成交金额:\\t{total_turnover:,.2f}\")\n self.output(f\"总成交笔数:\\t{total_trade_count}\")\n\n self.output(f\"日均盈亏:\\t{daily_net_pnl:,.2f}\")\n self.output(f\"日均手续费:\\t{daily_commission:,.2f}\")\n self.output(f\"日均滑点:\\t{daily_slippage:,.2f}\")\n self.output(f\"日均成交金额:\\t{daily_turnover:,.2f}\")\n self.output(f\"日均成交笔数:\\t{daily_trade_count}\")\n\n self.output(f\"日均收益率:\\t{daily_return:,.2f}%\")\n self.output(f\"收益标准差:\\t{return_std:,.2f}%\")\n self.output(f\"Sharpe Ratio:\\t{sharpe_ratio:,.2f}\")\n self.output(f\"收益回撤比:\\t{return_drawdown_ratio:,.2f}\")\n\n statistics = {\n \"start_date\": start_date,\n \"end_date\": end_date,\n \"total_days\": total_days,\n \"profit_days\": profit_days,\n \"loss_days\": loss_days,\n \"capital\": self.capital,\n \"end_balance\": end_balance,\n \"max_drawdown\": max_drawdown,\n \"max_ddpercent\": max_ddpercent,\n \"max_drawdown_duration\": max_drawdown_duration,\n \"total_net_pnl\": total_net_pnl,\n \"daily_net_pnl\": daily_net_pnl,\n \"total_commission\": total_commission,\n \"daily_commission\": daily_commission,\n \"total_slippage\": total_slippage,\n \"daily_slippage\": daily_slippage,\n \"total_turnover\": total_turnover,\n \"daily_turnover\": daily_turnover,\n \"total_trade_count\": total_trade_count,\n \"daily_trade_count\": daily_trade_count,\n \"total_return\": total_return,\n \"annual_return\": annual_return,\n \"daily_return\": daily_return,\n \"return_std\": return_std,\n \"sharpe_ratio\": sharpe_ratio,\n \"return_drawdown_ratio\": return_drawdown_ratio,\n }\n\n # Filter potential error infinite value\n for key, value in statistics.items():\n if value in (np.inf, -np.inf):\n value = 0\n statistics[key] = np.nan_to_num(value)\n\n self.output(\"策略统计指标计算完成\")\n return statistics\n\n def show_chart(self, df: DataFrame = None) -> None:\n \"\"\"\"\"\"\n # Check DataFrame input exterior\n if df is None:\n df = self.daily_df\n\n # Check for init DataFrame\n if df is None:\n return\n\n fig = make_subplots(\n rows=4,\n cols=1,\n subplot_titles=[\"Balance\", \"Drawdown\", \"Daily Pnl\", \"Pnl Distribution\"],\n vertical_spacing=0.06\n )\n\n balance_line = go.Scatter(\n x=df.index,\n y=df[\"balance\"],\n mode=\"lines\",\n name=\"Balance\"\n )\n drawdown_scatter = go.Scatter(\n x=df.index,\n y=df[\"drawdown\"],\n fillcolor=\"red\",\n fill='tozeroy',\n mode=\"lines\",\n name=\"Drawdown\"\n )\n pnl_bar = go.Bar(y=df[\"net_pnl\"], name=\"Daily Pnl\")\n pnl_histogram = go.Histogram(x=df[\"net_pnl\"], nbinsx=100, name=\"Days\")\n\n fig.add_trace(balance_line, row=1, col=1)\n fig.add_trace(drawdown_scatter, row=2, col=1)\n fig.add_trace(pnl_bar, row=3, col=1)\n fig.add_trace(pnl_histogram, row=4, col=1)\n\n fig.update_layout(height=1000, width=1000)\n fig.show()\n\n def update_daily_close(self, bars: Dict[str, BarData], dt: datetime) -> None:\n \"\"\"\"\"\"\n d = dt.date()\n\n close_prices = {}\n for bar in bars.values():\n close_prices[bar.vt_symbol] = bar.close_price\n\n daily_result = self.daily_results.get(d, None)\n\n if daily_result:\n daily_result.update_close_prices(close_prices)\n else:\n self.daily_results[d] = PortfolioDailyResult(d, close_prices)\n\n def new_bars(self, dt: datetime) -> None:\n \"\"\"\"\"\"\n self.datetime = dt\n\n bars: Dict[str, BarData] = {}\n for vt_symbol in self.vt_symbols:\n bar = self.history_data.get((dt, vt_symbol), None)\n\n # If bar data of vt_symbol at dt exists\n if bar:\n # Update bar data for crossing order\n self.bars[vt_symbol] = bar\n\n # Put bar into dict for strategy.on_bars update\n bars[vt_symbol] = bar\n # Otherwise, use previous close to backfill\n elif vt_symbol in self.bars:\n old_bar = self.bars[vt_symbol]\n\n bar = BarData(\n symbol=old_bar.symbol,\n exchange=old_bar.exchange,\n datetime=dt,\n open_price=old_bar.close_price,\n high_price=old_bar.close_price,\n low_price=old_bar.close_price,\n close_price=old_bar.close_price,\n gateway_name=old_bar.gateway_name\n )\n self.bars[vt_symbol] = bar\n\n self.cross_limit_order()\n self.strategy.on_bars(bars)\n\n self.update_daily_close(self.bars, dt)\n\n def cross_limit_order(self) -> None:\n \"\"\"\n Cross limit order with last bar/tick data.\n \"\"\"\n for order in list(self.active_limit_orders.values()):\n bar = self.bars[order.vt_symbol]\n\n long_cross_price = bar.low_price\n short_cross_price = bar.high_price\n long_best_price = bar.open_price\n short_best_price = bar.open_price\n\n # Push order update with status \"not traded\" (pending).\n if order.status == Status.SUBMITTING:\n order.status = Status.NOTTRADED\n self.strategy.update_order(order)\n\n # Check whether limit orders can be filled.\n long_cross = (\n order.direction == Direction.LONG\n and order.price >= long_cross_price\n and long_cross_price > 0\n )\n\n short_cross = (\n order.direction == Direction.SHORT\n and order.price <= short_cross_price\n and short_cross_price > 0\n )\n\n if not long_cross and not short_cross:\n continue\n\n # Push order update with status \"all traded\" (filled).\n order.traded = order.volume\n order.status = Status.ALLTRADED\n self.strategy.update_order(order)\n\n self.active_limit_orders.pop(order.vt_orderid)\n\n # Push trade update\n self.trade_count += 1\n\n if long_cross:\n trade_price = min(order.price, long_best_price)\n else:\n trade_price = max(order.price, short_best_price)\n\n trade = TradeData(\n symbol=order.symbol,\n exchange=order.exchange,\n orderid=order.orderid,\n tradeid=str(self.trade_count),\n direction=order.direction,\n offset=order.offset,\n price=trade_price,\n volume=order.volume,\n datetime=self.datetime,\n gateway_name=self.gateway_name,\n )\n\n self.strategy.update_trade(trade)\n self.trades[trade.vt_tradeid] = trade\n\n def load_bars(\n self,\n strategy: StrategyTemplate,\n days: int,\n interval: Interval\n ) -> None:\n \"\"\"\"\"\"\n self.days = days\n\n def send_order(\n self,\n strategy: StrategyTemplate,\n vt_symbol: str,\n direction: Direction,\n offset: Offset,\n price: float,\n volume: float,\n lock: bool,\n net: bool\n ) -> List[str]:\n \"\"\"\"\"\"\n price = round_to(price, self.priceticks[vt_symbol])\n symbol, exchange = extract_vt_symbol(vt_symbol)\n\n self.limit_order_count += 1\n\n order = OrderData(\n symbol=symbol,\n exchange=exchange,\n orderid=str(self.limit_order_count),\n direction=direction,\n offset=offset,\n price=price,\n volume=volume,\n status=Status.SUBMITTING,\n datetime=self.datetime,\n gateway_name=self.gateway_name,\n )\n\n self.active_limit_orders[order.vt_orderid] = order\n self.limit_orders[order.vt_orderid] = order\n\n return [order.vt_orderid]\n\n def cancel_order(self, strategy: StrategyTemplate, vt_orderid: str) -> None:\n \"\"\"\n Cancel order by vt_orderid.\n \"\"\"\n if vt_orderid not in self.active_limit_orders:\n return\n order = self.active_limit_orders.pop(vt_orderid)\n\n order.status = Status.CANCELLED\n self.strategy.update_order(order)\n\n def write_log(self, msg: str, strategy: StrategyTemplate = None) -> None:\n \"\"\"\n Write log message.\n \"\"\"\n msg = f\"{self.datetime}\\t{msg}\"\n self.logs.append(msg)\n\n def send_email(self, msg: str, strategy: StrategyTemplate = None) -> None:\n \"\"\"\n Send email to default receiver.\n \"\"\"\n pass\n\n def sync_strategy_data(self, strategy: StrategyTemplate) -> None:\n \"\"\"\n Sync strategy data into json file.\n \"\"\"\n pass\n\n def put_strategy_event(self, strategy: StrategyTemplate) -> None:\n \"\"\"\n Put an event to update strategy status.\n \"\"\"\n pass\n\n def output(self, msg) -> None:\n \"\"\"\n Output message of backtesting engine.\n \"\"\"\n print(f\"{datetime.now()}\\t{msg}\")\n\n def get_all_trades(self) -> List[TradeData]:\n \"\"\"\n Return all trade data of current backtesting result.\n \"\"\"\n return list(self.trades.values())\n\n def get_all_orders(self) -> List[OrderData]:\n \"\"\"\n Return all limit order data of current backtesting result.\n \"\"\"\n return list(self.limit_orders.values())\n\n def get_all_daily_results(self) -> List[\"PortfolioDailyResult\"]:\n \"\"\"\n Return all daily result data.\n \"\"\"\n return list(self.daily_results.values())\n\n\nclass ContractDailyResult:\n \"\"\"\"\"\"\n\n def __init__(self, result_date: date, close_price: float):\n \"\"\"\"\"\"\n self.date: date = result_date\n self.close_price: float = close_price\n self.pre_close: float = 0\n\n self.trades: List[TradeData] = []\n self.trade_count: int = 0\n\n self.start_pos: float = 0\n self.end_pos: float = 0\n\n self.turnover: float = 0\n self.commission: float = 0\n self.slippage: float = 0\n\n self.trading_pnl: float = 0\n self.holding_pnl: float = 0\n self.total_pnl: float = 0\n self.net_pnl: float = 0\n\n def add_trade(self, trade: TradeData) -> None:\n \"\"\"\"\"\"\n self.trades.append(trade)\n\n def calculate_pnl(\n self,\n pre_close: float,\n start_pos: float,\n size: int,\n rate: float,\n slippage: float\n ) -> None:\n \"\"\"\"\"\"\n # If no pre_close provided on the first day,\n # use value 1 to avoid zero division error\n if pre_close:\n self.pre_close = pre_close\n else:\n self.pre_close = 1\n\n # Holding pnl is the pnl from holding position at day start\n self.start_pos = start_pos\n self.end_pos = start_pos\n\n self.holding_pnl = self.start_pos * (self.close_price - self.pre_close) * size\n\n # Trading pnl is the pnl from new trade during the day\n self.trade_count = len(self.trades)\n\n for trade in self.trades:\n if trade.direction == Direction.LONG:\n pos_change = trade.volume\n else:\n pos_change = -trade.volume\n\n self.end_pos += pos_change\n\n turnover = trade.volume * size * trade.price\n\n self.trading_pnl += pos_change * (self.close_price - trade.price) * size\n self.slippage += trade.volume * size * slippage\n self.turnover += turnover\n self.commission += turnover * rate\n\n # Net pnl takes account of commission and slippage cost\n self.total_pnl = self.trading_pnl + self.holding_pnl\n self.net_pnl = self.total_pnl - self.commission - self.slippage\n\n def update_close_price(self, close_price: float) -> None:\n \"\"\"\"\"\"\n self.close_price = close_price\n\n\nclass PortfolioDailyResult:\n \"\"\"\"\"\"\n\n def __init__(self, result_date: date, close_prices: Dict[str, float]):\n \"\"\"\"\"\"\n self.date: date = result_date\n self.close_prices: Dict[str, float] = close_prices\n self.pre_closes: Dict[str, float] = {}\n self.start_poses: Dict[str, float] = {}\n self.end_poses: Dict[str, float] = {}\n\n self.contract_results: Dict[str, ContractDailyResult] = {}\n\n for vt_symbol, close_price in close_prices.items():\n self.contract_results[vt_symbol] = ContractDailyResult(result_date, close_price)\n\n self.trade_count: int = 0\n self.turnover: float = 0\n self.commission: float = 0\n self.slippage: float = 0\n self.trading_pnl: float = 0\n self.holding_pnl: float = 0\n self.total_pnl: float = 0\n self.net_pnl: float = 0\n\n def add_trade(self, trade: TradeData) -> None:\n \"\"\"\"\"\"\n contract_result = self.contract_results[trade.vt_symbol]\n contract_result.add_trade(trade)\n\n def calculate_pnl(\n self,\n pre_closes: Dict[str, float],\n start_poses: Dict[str, float],\n sizes: Dict[str, float],\n rates: Dict[str, float],\n slippages: Dict[str, float],\n ) -> None:\n \"\"\"\"\"\"\n self.pre_closes = pre_closes\n\n for vt_symbol, contract_result in self.contract_results.items():\n contract_result.calculate_pnl(\n pre_closes.get(vt_symbol, 0),\n start_poses.get(vt_symbol, 0),\n sizes[vt_symbol],\n rates[vt_symbol],\n slippages[vt_symbol]\n )\n\n self.trade_count += contract_result.trade_count\n self.turnover += contract_result.turnover\n self.commission += contract_result.commission\n self.slippage += contract_result.slippage\n self.trading_pnl += contract_result.trading_pnl\n self.holding_pnl += contract_result.holding_pnl\n self.total_pnl += contract_result.total_pnl\n self.net_pnl += contract_result.net_pnl\n\n self.end_poses[vt_symbol] = contract_result.end_pos\n\n def update_close_prices(self, close_prices: Dict[str, float]) -> None:\n \"\"\"\"\"\"\n self.close_prices = close_prices\n\n for vt_symbol, close_price in close_prices.items():\n contract_result = self.contract_results.get(vt_symbol, None)\n if contract_result:\n contract_result.update_close_price(close_price)\n\n\n@lru_cache(maxsize=999)\ndef load_bar_data(\n vt_symbol: str,\n interval: Interval,\n start: datetime,\n end: datetime\n):\n \"\"\"\"\"\"\n symbol, exchange = extract_vt_symbol(vt_symbol)\n\n return database_manager.load_bar_data(\n symbol, exchange, interval, start, end\n )\n" ]
[ [ "numpy.sqrt", "numpy.nan_to_num", "pandas.DataFrame.from_dict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
rohit-konda/markovGames
[ "d6dd1b8a11f1c95658a468f9e471aecfcf0e6839" ]
[ "markovGames/learning/bruteSearch.py" ]
[ "import numpy as np\nfrom itertools import product\nfrom markovGames.gameDefs.mdpDefs import Policy\n\n\ndef getAllDetPol(numStates, numActions):\n detProbs = [np.array([1 if j == i else 0 for j in range(numActions)]) for i in range(numActions)]\n return product(detProbs, repeat=numStates)\n\n\ndef getPolList(states, acSet):\n # list of possible deterministic policies\n numStates = len(states)\n numActions = len(acSet)\n detPol = getAllDetPol(numStates, numActions)\n return [Policy(states, pol, acSet) for pol in detPol]\n\n\ndef prodPolList(states, listActions):\n # get policies for each action Set\n polList = [getPolList(states, ac) for ac in listActions]\n return polList\n\n\ndef getPayoff(utilMap, listAcSet):\n # utilMap: maps list of agent policies to real numbers,\n # allPolicyList: list of agent i (list of possible policies)\n def utilInd(index):\n jointAc = [listAcSet[j][ind] for j, ind in enumerate(index)]\n val = utilMap(jointAc)\n return val\n\n numPL = [len(pL) for pL in listAcSet]\n payoff = np.zeros(numPL)\n for ind in product(*[range(nI) for nI in numPL]):\n payoff[ind] = utilInd(ind)\n return payoff\n\n\ndef getArgOpt(tensor):\n return np.unravel_index(np.argmax(tensor), tensor.shape)\n\n\ndef bruteFindNash(payoffList):\n TOLERANCE = 1e-7\n cpnes = list(np.argwhere(payoffList[0] > np.amax(payoffList[0], 0) - TOLERANCE))\n cpnes = [tuple(cpne) for cpne in cpnes]\n N = len(payoffList)\n\n for i in range(1, N):\n pMat = payoffList[i]\n for cpne in cpnes[:]:\n ind = cpne[:i] + (slice(None),) + cpne[i + 1:]\n if pMat[cpne] < np.max(pMat[ind]) - TOLERANCE:\n cpnes.pop(cpnes.index(cpne))\n return cpnes\n\n\ndef getEfficiency(cpnes, welfareMat):\n # welfareMat - matrix form of welfare\n pneWelf = [welfareMat[cpne] for cpne in cpnes]\n opt = np.max(welfareMat)\n priceRatios = [float(pne) / opt for pne in pneWelf]\n return priceRatios\n\n\ndef getPoA(cpnes, welfareMat):\n return min(getEfficiency(cpnes, welfareMat))\n" ]
[ [ "numpy.max", "numpy.amax", "numpy.argmax", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
M155K4R4/Tensorflow
[ "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25", "e5e03ef3148303b3dfed89a1492dedf92b45be25" ]
[ "tensorflow/contrib/rnn/python/ops/lstm_ops.py", "tensorflow/contrib/bayesflow/python/ops/hmc_impl.py", "tensorflow/python/kernel_tests/depthwise_conv_op_test.py", "tensorflow/contrib/eager/python/checkpointable_test.py", "tensorflow/python/kernel_tests/distributions/dirichlet_multinomial_test.py", "tensorflow/contrib/gan/python/eval/python/summaries_test.py", "tensorflow/python/keras/_impl/keras/layers/local.py", "tensorflow/contrib/linear_optimizer/python/sdca_estimator_test.py", "tensorflow/python/keras/_impl/keras/callbacks_test.py", "tensorflow/contrib/training/python/training/tensor_queue_dataset_test.py", "tensorflow/contrib/estimator/python/estimator/multi_head.py", "tensorflow/python/ops/distributions/dirichlet_multinomial.py", "tensorflow/contrib/signal/python/kernel_tests/spectral_ops_test.py", "tensorflow/examples/learn/iris.py", "tensorflow/contrib/timeseries/python/timeseries/ar_model.py", "tensorflow/contrib/data/python/ops/dataset_ops.py", "tensorflow/python/ops/distributions/bijector_test_util.py", "tensorflow/contrib/py2tf/pyct/anno_test.py", "tensorflow/contrib/opt/python/training/powersign_test.py", "tensorflow/python/framework/tensor_shape_test.py", "tensorflow/python/tools/optimize_for_inference_test.py", "tensorflow/python/keras/_impl/keras/applications/nasnet.py", "tensorflow/contrib/signal/python/kernel_tests/shape_ops_test.py", "tensorflow/contrib/data/python/ops/scan_ops.py", "tensorflow/contrib/py2tf/pyct/static_analysis/activity.py", "tensorflow/tools/docs/generate_lib_test.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"LSTM Block Cell ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\nfrom tensorflow.contrib.rnn.ops import gen_lstm_ops\nfrom tensorflow.contrib.util import loader\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.layers import base as base_layer\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import rnn_cell_impl\nfrom tensorflow.python.platform import resource_loader\n\n_lstm_ops_so = loader.load_op_library(\n resource_loader.get_path_to_datafile(\"_lstm_ops.so\"))\n\nLayerRNNCell = rnn_cell_impl.LayerRNNCell # pylint: disable=invalid-name\n\n\n# pylint: disable=invalid-name\ndef _lstm_block_cell(x,\n cs_prev,\n h_prev,\n w,\n b,\n wci=None,\n wcf=None,\n wco=None,\n forget_bias=None,\n cell_clip=None,\n use_peephole=None,\n name=None):\n r\"\"\"Computes the LSTM cell forward propagation for 1 time step.\n\n This implementation uses 1 weight matrix and 1 bias vector, and there's an\n optional peephole connection.\n\n This kernel op implements the following mathematical equations:\n\n ```python\n xh = [x, h_prev]\n [i, ci, f, o] = xh * w + b\n f = f + forget_bias\n\n if not use_peephole:\n wci = wcf = wco = 0\n\n i = sigmoid(cs_prev * wci + i)\n f = sigmoid(cs_prev * wcf + f)\n ci = tanh(ci)\n\n cs = ci .* i + cs_prev .* f\n cs = clip(cs, cell_clip)\n\n o = sigmoid(cs * wco + o)\n co = tanh(cs)\n h = co .* o\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `float32`.\n The input to the LSTM cell, shape (batch_size, num_inputs).\n cs_prev: A `Tensor`. Must have the same type as `x`.\n Value of the cell state at previous time step.\n h_prev: A `Tensor`. Must have the same type as `x`.\n Output of the previous cell at previous time step.\n w: A `Tensor`. Must have the same type as `x`. The weight matrix.\n b: A `Tensor`. Must have the same type as `x`. The bias vector.\n wci: A `Tensor`. Must have the same type as `x`.\n The weight matrix for input gate peephole connection.\n wcf: A `Tensor`. Must have the same type as `x`.\n The weight matrix for forget gate peephole connection.\n wco: A `Tensor`. Must have the same type as `x`.\n The weight matrix for output gate peephole connection.\n forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.\n cell_clip: An optional `float`. Defaults to `-1` (no clipping).\n Value to clip the 'cs' value to. Disable by setting to negative value.\n use_peephole: An optional `bool`. Defaults to `False`.\n Whether to use peephole weights.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).\n i: A `Tensor`. Has the same type as `x`. The input gate.\n cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.\n f: A `Tensor`. Has the same type as `x`. The forget gate.\n o: A `Tensor`. Has the same type as `x`. The output gate.\n ci: A `Tensor`. Has the same type as `x`. The cell input.\n co: A `Tensor`. Has the same type as `x`. The cell after the tanh.\n h: A `Tensor`. Has the same type as `x`. The output h vector.\n\n Raises:\n ValueError: If cell_size is None.\n \"\"\"\n if wci is None:\n cell_size = cs_prev.get_shape().with_rank(2)[1].value\n if cell_size is None:\n raise ValueError(\"cell_size from `cs_prev` should not be None.\")\n wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])\n wcf = wci\n wco = wci\n\n # pylint: disable=protected-access\n return gen_lstm_ops.lstm_block_cell(\n x=x,\n cs_prev=cs_prev,\n h_prev=h_prev,\n w=w,\n wci=wci,\n wcf=wcf,\n wco=wco,\n b=b,\n forget_bias=forget_bias,\n cell_clip=cell_clip if cell_clip is not None else -1,\n use_peephole=use_peephole,\n name=name)\n # pylint: enable=protected-access\n\n\ndef _block_lstm(seq_len_max,\n x,\n w,\n b,\n cs_prev=None,\n h_prev=None,\n wci=None,\n wcf=None,\n wco=None,\n forget_bias=None,\n cell_clip=None,\n use_peephole=None,\n name=None):\n r\"\"\"TODO(williamchan): add doc.\n\n Args:\n seq_len_max: A `Tensor` of type `int64`.\n x: A list of at least 1 `Tensor` objects of the same type in: `float32`.\n w: A `Tensor`. Must have the same type as `x`.\n b: A `Tensor`. Must have the same type as `x`.\n cs_prev: A `Tensor`. Must have the same type as `x`.\n h_prev: A `Tensor`. Must have the same type as `x`.\n wci: A `Tensor`. Must have the same type as `x`.\n wcf: A `Tensor`. Must have the same type as `x`.\n wco: A `Tensor`. Must have the same type as `x`.\n forget_bias: An optional `float`. Defaults to `1`.\n cell_clip: An optional `float`. Defaults to `-1` (no clipping).\n use_peephole: An optional `bool`. Defaults to `False`.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).\n i: A list with the same number of `Tensor` objects as `x` of `Tensor`\n objects of the same type as x.\n cs: A list with the same number of `Tensor` objects as `x` of `Tensor`\n objects of the same type as x.\n f: A list with the same number of `Tensor` objects as `x` of `Tensor`\n objects of the same type as x.\n o: A list with the same number of `Tensor` objects as `x` of `Tensor`\n objects of the same type as x.\n ci: A list with the same number of `Tensor` objects as `x` of `Tensor`\n objects of the same type as x.\n co: A list with the same number of `Tensor` objects as `x` of `Tensor`\n objects of the same type as x.\n h: A list with the same number of `Tensor` objects as `x` of `Tensor`\n objects of the same type as x.\n\n Raises:\n ValueError: If `b` does not have a valid shape.\n \"\"\"\n batch_size = x[0].get_shape().with_rank(2)[0].value\n cell_size4 = b.get_shape().with_rank(1)[0].value\n if cell_size4 is None:\n raise ValueError(\"`b` shape must not be None.\")\n cell_size = cell_size4 / 4\n zero_state = None\n if cs_prev is None or h_prev is None:\n zero_state = array_ops.constant(\n 0, dtype=dtypes.float32, shape=[batch_size, cell_size])\n if cs_prev is None:\n cs_prev = zero_state\n if h_prev is None:\n h_prev = zero_state\n if wci is None:\n wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])\n wcf = wci\n wco = wci\n\n # pylint: disable=protected-access\n i, cs, f, o, ci, co, h = gen_lstm_ops.block_lstm(\n seq_len_max=seq_len_max,\n x=array_ops.stack(x),\n cs_prev=cs_prev,\n h_prev=h_prev,\n w=w,\n wci=wci,\n wcf=wcf,\n wco=wco,\n b=b,\n forget_bias=forget_bias,\n cell_clip=cell_clip if cell_clip is not None else -1,\n name=name,\n use_peephole=use_peephole)\n\n return array_ops.unstack(i), array_ops.unstack(cs), array_ops.unstack(\n f), array_ops.unstack(o), array_ops.unstack(ci), array_ops.unstack(\n co), array_ops.unstack(h)\n # pylint: enable=protected-access\n # pylint: enable=invalid-name\n\n\n_lstm_block_cell_grad_outputs = [\"cs_prev_grad\", \"dicfo\"]\n\n\[email protected](\"LSTMBlockCell\")\ndef _LSTMBlockCellGrad(op, *grad):\n \"\"\"Gradient for LSTMBlockCell.\"\"\"\n (x, cs_prev, h_prev, w, wci, wcf, wco, b) = op.inputs\n (i, cs, f, o, ci, co, _) = op.outputs\n (_, cs_grad, _, _, _, _, h_grad) = grad\n\n batch_size = x.get_shape().with_rank(2)[0].value\n if batch_size is None:\n batch_size = -1\n input_size = x.get_shape().with_rank(2)[1].value\n if input_size is None:\n raise ValueError(\"input_size from `x` should not be None.\")\n cell_size = cs_prev.get_shape().with_rank(2)[1].value\n if cell_size is None:\n raise ValueError(\"cell_size from `cs_prev` should not be None.\")\n\n (cs_prev_grad, dicfo, wci_grad, wcf_grad,\n wco_grad) = gen_lstm_ops.lstm_block_cell_grad(\n x,\n cs_prev,\n h_prev,\n w,\n wci,\n wcf,\n wco,\n b,\n i,\n cs,\n f,\n o,\n ci,\n co,\n cs_grad,\n h_grad,\n use_peephole=op.get_attr(\"use_peephole\"))\n\n # Backprop from dicfo to xh.\n xh_grad = math_ops.matmul(dicfo, w, transpose_b=True)\n\n x_grad = array_ops.slice(xh_grad, (0, 0), (batch_size, input_size))\n x_grad.get_shape().merge_with(x.get_shape())\n\n h_prev_grad = array_ops.slice(xh_grad, (0, input_size),\n (batch_size, cell_size))\n h_prev_grad.get_shape().merge_with(h_prev.get_shape())\n\n # Backprop from dicfo to w.\n xh = array_ops.concat([x, h_prev], 1)\n w_grad = math_ops.matmul(xh, dicfo, transpose_a=True)\n w_grad.get_shape().merge_with(w.get_shape())\n\n # Backprop from dicfo to b.\n b_grad = nn_ops.bias_add_grad(dicfo)\n b_grad.get_shape().merge_with(b.get_shape())\n\n return (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,\n wco_grad, b_grad)\n\n\[email protected](\"BlockLSTM\")\ndef _BlockLSTMGrad(op, *grad):\n \"\"\"Gradient for BlockLSTM.\"\"\"\n seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b = op.inputs\n i, cs, f, o, ci, co, h = op.outputs\n\n cs_grad = grad[1]\n h_grad = grad[6]\n\n (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad,\n b_grad) = gen_lstm_ops.block_lstm_grad(\n seq_len_max,\n x,\n cs_prev,\n h_prev,\n w,\n wci,\n wcf,\n wco,\n b,\n i,\n cs,\n f,\n o,\n ci,\n co,\n h,\n cs_grad,\n h_grad,\n use_peephole=op.get_attr(\"use_peephole\"))\n\n return [\n None, x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,\n wco_grad, b_grad\n ]\n\n\nclass LSTMBlockCell(LayerRNNCell):\n \"\"\"Basic LSTM recurrent network cell.\n\n The implementation is based on: http://arxiv.org/abs/1409.2329.\n\n We add `forget_bias` (default: 1) to the biases of the forget gate in order to\n reduce the scale of forgetting in the beginning of the training.\n\n Unlike `rnn_cell_impl.LSTMCell`, this is a monolithic op and should be much\n faster. The weight and bias matrices should be compatible as long as the\n variable scope matches.\n \"\"\"\n\n def __init__(self,\n num_units,\n forget_bias=1.0,\n cell_clip=None,\n use_peephole=False,\n reuse=None,\n name=\"lstm_cell\"):\n \"\"\"Initialize the basic LSTM cell.\n\n Args:\n num_units: int, The number of units in the LSTM cell.\n forget_bias: float, The bias added to forget gates (see above).\n cell_clip: An optional `float`. Defaults to `-1` (no clipping).\n use_peephole: Whether to use peephole connections or not.\n reuse: (optional) boolean describing whether to reuse variables in an\n existing scope. If not `True`, and the existing scope already has the\n given variables, an error is raised.\n name: String, the name of the layer. Layers with the same name will\n share weights, but to avoid mistakes we require reuse=True in such\n cases. By default this is \"lstm_cell\", for variable-name compatibility\n with `tf.nn.rnn_cell.LSTMCell`.\n\n When restoring from CudnnLSTM-trained checkpoints, must use\n CudnnCompatibleLSTMBlockCell instead.\n \"\"\"\n super(LSTMBlockCell, self).__init__(_reuse=reuse, name=name)\n self._num_units = num_units\n self._forget_bias = forget_bias\n self._use_peephole = use_peephole\n self._cell_clip = cell_clip if cell_clip is not None else -1\n self._names = {\n \"W\": \"kernel\",\n \"b\": \"bias\",\n \"wci\": \"w_i_diag\",\n \"wcf\": \"w_f_diag\",\n \"wco\": \"w_o_diag\",\n \"scope\": \"lstm_cell\"\n }\n # Inputs must be 2-dimensional.\n self.input_spec = base_layer.InputSpec(ndim=2)\n\n @property\n def state_size(self):\n return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)\n\n @property\n def output_size(self):\n return self._num_units\n\n def build(self, inputs_shape):\n if not inputs_shape[1].value:\n raise ValueError(\n \"Expecting inputs_shape[1] to be set: %s\" % str(inputs_shape))\n input_size = inputs_shape[1].value\n self._kernel = self.add_variable(\n self._names[\"W\"], [input_size + self._num_units, self._num_units * 4])\n self._bias = self.add_variable(\n self._names[\"b\"], [self._num_units * 4],\n initializer=init_ops.constant_initializer(0.0))\n if self._use_peephole:\n self._w_i_diag = self.add_variable(self._names[\"wci\"], [self._num_units])\n self._w_f_diag = self.add_variable(self._names[\"wcf\"], [self._num_units])\n self._w_o_diag = self.add_variable(self._names[\"wco\"], [self._num_units])\n\n self.built = True\n\n def call(self, inputs, state):\n \"\"\"Long short-term memory cell (LSTM).\"\"\"\n if len(state) != 2:\n raise ValueError(\"Expecting state to be a tuple with length 2.\")\n\n if self._use_peephole:\n wci = self._w_i_diag\n wcf = self._w_f_diag\n wco = self._w_o_diag\n else:\n wci = wcf = wco = array_ops.zeros([self._num_units])\n\n (cs_prev, h_prev) = state\n (_, cs, _, _, _, _, h) = _lstm_block_cell(\n inputs,\n cs_prev,\n h_prev,\n self._kernel,\n self._bias,\n wci=wci,\n wcf=wcf,\n wco=wco,\n forget_bias=self._forget_bias,\n cell_clip=self._cell_clip,\n use_peephole=self._use_peephole)\n\n new_state = rnn_cell_impl.LSTMStateTuple(cs, h)\n return h, new_state\n\n\nclass LSTMBlockWrapper(base_layer.Layer):\n \"\"\"This is a helper class that provides housekeeping for LSTM cells.\n\n This may be useful for alternative LSTM and similar type of cells.\n The subclasses must implement `_call_cell` method and `num_units` property.\n \"\"\"\n\n @abc.abstractproperty\n def num_units(self):\n \"\"\"Number of units in this cell (output dimension).\"\"\"\n pass\n\n @abc.abstractmethod\n def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,\n sequence_length):\n \"\"\"Run this LSTM on inputs, starting from the given state.\n\n This method must be implemented by subclasses and does the actual work\n of calling the cell.\n\n Args:\n inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`\n initial_cell_state: initial value for cell state, shape `[batch_size,\n self._num_units]`\n initial_output: initial value of cell output, shape `[batch_size,\n self._num_units]`\n dtype: The data type for the initial state and expected output.\n sequence_length: Specifies the length of each sequence in inputs. An int32\n or int64 vector (tensor) size [batch_size], values in [0, time_len) or\n None.\n\n Returns:\n A pair containing:\n\n - State: A `3-D` tensor of shape `[time_len, batch_size, output_size]`\n - Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`\n \"\"\"\n pass\n\n def call(self, inputs, initial_state=None, dtype=None, sequence_length=None):\n \"\"\"Run this LSTM on inputs, starting from the given state.\n\n Args:\n inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`\n or a list of `time_len` tensors of shape `[batch_size, input_size]`.\n initial_state: a tuple `(initial_cell_state, initial_output)` with tensors\n of shape `[batch_size, self._num_units]`. If this is not provided, the\n cell is expected to create a zero initial state of type `dtype`.\n dtype: The data type for the initial state and expected output. Required\n if `initial_state` is not provided or RNN state has a heterogeneous\n dtype.\n sequence_length: Specifies the length of each sequence in inputs. An\n `int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,\n time_len).`\n Defaults to `time_len` for each element.\n\n Returns:\n A pair containing:\n\n - Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`\n or a list of time_len tensors of shape `[batch_size, output_size]`,\n to match the type of the `inputs`.\n - Final state: a tuple `(cell_state, output)` matching `initial_state`.\n\n Raises:\n ValueError: in case of shape mismatches\n \"\"\"\n is_list = isinstance(inputs, list)\n if is_list:\n inputs = array_ops.stack(inputs)\n inputs_shape = inputs.get_shape().with_rank(3)\n if not inputs_shape[2]:\n raise ValueError(\"Expecting inputs_shape[2] to be set: %s\" % inputs_shape)\n batch_size = inputs_shape[1].value\n if batch_size is None:\n batch_size = array_ops.shape(inputs)[1]\n time_len = inputs_shape[0].value\n if time_len is None:\n time_len = array_ops.shape(inputs)[0]\n\n # Provide default values for initial_state and dtype\n if initial_state is None:\n if dtype is None:\n raise ValueError(\"Either initial_state or dtype needs to be specified\")\n z = array_ops.zeros(\n array_ops.stack([batch_size, self.num_units]), dtype=dtype)\n initial_state = z, z\n else:\n if len(initial_state) != 2:\n raise ValueError(\n \"Expecting initial_state to be a tuple with length 2 or None\")\n if dtype is None:\n dtype = initial_state[0].dtype\n\n # create the actual cell\n if sequence_length is not None:\n sequence_length = ops.convert_to_tensor(sequence_length)\n initial_cell_state, initial_output = initial_state # pylint: disable=unpacking-non-sequence\n cell_states, outputs = self._call_cell(\n inputs, initial_cell_state, initial_output, dtype, sequence_length)\n\n if sequence_length is not None:\n # Mask out the part beyond sequence_length\n mask = array_ops.transpose(\n array_ops.sequence_mask(sequence_length, time_len, dtype=dtype),\n [1, 0])\n mask = array_ops.tile(\n array_ops.expand_dims(mask, [-1]), [1, 1, self.num_units])\n outputs *= mask\n # Prepend initial states to cell_states and outputs for indexing to work\n # correctly,since we want to access the last valid state at\n # sequence_length - 1, which can even be -1, corresponding to the\n # initial state.\n mod_cell_states = array_ops.concat(\n [array_ops.expand_dims(initial_cell_state, [0]), cell_states], 0)\n mod_outputs = array_ops.concat(\n [array_ops.expand_dims(initial_output, [0]), outputs], 0)\n final_cell_state = self._gather_states(mod_cell_states, sequence_length,\n batch_size)\n final_output = self._gather_states(mod_outputs, sequence_length,\n batch_size)\n else:\n # No sequence_lengths used: final state is the last state\n final_cell_state = cell_states[-1]\n final_output = outputs[-1]\n\n if is_list:\n # Input was a list, so return a list\n outputs = array_ops.unstack(outputs)\n\n final_state = rnn_cell_impl.LSTMStateTuple(final_cell_state, final_output)\n return outputs, final_state\n\n def _gather_states(self, data, indices, batch_size):\n \"\"\"Produce `out`, s.t. out(i, j) = data(indices(i), i, j).\"\"\"\n mod_indices = indices * batch_size + math_ops.range(batch_size)\n return array_ops.gather(\n array_ops.reshape(data, [-1, self.num_units]), mod_indices)\n\n\nclass LSTMBlockFusedCell(LSTMBlockWrapper):\n \"\"\"FusedRNNCell implementation of LSTM.\n\n This is an extremely efficient LSTM implementation, that uses a single TF op\n for the entire LSTM. It should be both faster and more memory-efficient than\n LSTMBlockCell defined above.\n\n The implementation is based on: http://arxiv.org/abs/1409.2329.\n\n We add forget_bias (default: 1) to the biases of the forget gate in order to\n reduce the scale of forgetting in the beginning of the training.\n\n The variable naming is consistent with `rnn_cell_impl.LSTMCell`.\n \"\"\"\n\n def __init__(self,\n num_units,\n forget_bias=1.0,\n cell_clip=None,\n use_peephole=False,\n reuse=None,\n name=\"lstm_fused_cell\"):\n \"\"\"Initialize the LSTM cell.\n\n Args:\n num_units: int, The number of units in the LSTM cell.\n forget_bias: float, The bias added to forget gates (see above).\n cell_clip: clip the cell to this value. Default is no cell clipping.\n use_peephole: Whether to use peephole connections or not.\n reuse: (optional) boolean describing whether to reuse variables in an\n existing scope. If not `True`, and the existing scope already has the\n given variables, an error is raised.\n name: String, the name of the layer. Layers with the same name will\n share weights, but to avoid mistakes we require reuse=True in such\n cases. By default this is \"lstm_cell\", for variable-name compatibility\n with `tf.nn.rnn_cell.LSTMCell`.\n \"\"\"\n super(LSTMBlockFusedCell, self).__init__(_reuse=reuse, name=name)\n self._num_units = num_units\n self._forget_bias = forget_bias\n self._cell_clip = cell_clip if cell_clip is not None else -1\n self._use_peephole = use_peephole\n\n # Inputs must be 3-dimensional.\n self.input_spec = base_layer.InputSpec(ndim=3)\n\n @property\n def num_units(self):\n \"\"\"Number of units in this cell (output dimension).\"\"\"\n return self._num_units\n\n def build(self, input_shape):\n input_size = input_shape[2].value\n self._kernel = self.add_variable(\n \"kernel\", [input_size + self._num_units, self._num_units * 4])\n self._bias = self.add_variable(\n \"bias\", [self._num_units * 4],\n initializer=init_ops.constant_initializer(0.0))\n if self._use_peephole:\n self._w_i_diag = self.add_variable(\"w_i_diag\", [self._num_units])\n self._w_f_diag = self.add_variable(\"w_f_diag\", [self._num_units])\n self._w_o_diag = self.add_variable(\"w_o_diag\", [self._num_units])\n\n self.built = True\n\n def _call_cell(self,\n inputs,\n initial_cell_state=None,\n initial_output=None,\n dtype=None,\n sequence_length=None):\n \"\"\"Run this LSTM on inputs, starting from the given state.\n\n Args:\n inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`\n initial_cell_state: initial value for cell state, shape `[batch_size,\n self._num_units]`\n initial_output: initial value of cell output, shape `[batch_size,\n self._num_units]`\n dtype: The data type for the initial state and expected output.\n sequence_length: Specifies the length of each sequence in inputs. An\n `int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,\n time_len)` or None.\n\n Returns:\n A pair containing:\n\n - Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size,\n output_size]`\n - Output (h): A `3-D` tensor of shape `[time_len, batch_size,\n output_size]`\n \"\"\"\n\n inputs_shape = inputs.get_shape().with_rank(3)\n time_len = inputs_shape[0].value\n if time_len is None:\n time_len = array_ops.shape(inputs)[0]\n\n if self._use_peephole:\n wci = self._w_i_diag\n wco = self._w_o_diag\n wcf = self._w_f_diag\n else:\n wci = wcf = wco = array_ops.zeros([self._num_units], dtype=dtype)\n\n if sequence_length is None:\n max_seq_len = math_ops.to_int64(time_len)\n else:\n max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))\n\n _, cs, _, _, _, _, h = gen_lstm_ops.block_lstm(\n seq_len_max=max_seq_len,\n x=inputs,\n cs_prev=initial_cell_state,\n h_prev=initial_output,\n w=self._kernel,\n wci=wci,\n wcf=wcf,\n wco=wco,\n b=self._bias,\n forget_bias=self._forget_bias,\n cell_clip=self._cell_clip,\n use_peephole=self._use_peephole)\n return cs, h\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Hamiltonian Monte Carlo, a gradient-based MCMC algorithm.\n\n@@sample_chain\n@@sample_annealed_importance_chain\n@@kernel\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport numpy as np\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import functional_ops\nfrom tensorflow.python.ops import gradients_impl as gradients_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops.distributions import util as distributions_util\n\n__all__ = [\n \"sample_chain\",\n \"sample_annealed_importance_chain\",\n \"kernel\",\n]\n\n\nKernelResults = collections.namedtuple(\n \"KernelResults\",\n [\n \"acceptance_probs\",\n \"current_grads_target_log_prob\", # \"Current result\" means \"accepted\".\n \"current_target_log_prob\", # \"Current result\" means \"accepted\".\n \"energy_change\",\n \"is_accepted\",\n \"proposed_grads_target_log_prob\",\n \"proposed_state\",\n \"proposed_target_log_prob\",\n \"random_positive\",\n ])\n\n\ndef _make_dummy_kernel_results(\n dummy_state,\n dummy_target_log_prob,\n dummy_grads_target_log_prob):\n return KernelResults(\n acceptance_probs=dummy_target_log_prob,\n current_grads_target_log_prob=dummy_grads_target_log_prob,\n current_target_log_prob=dummy_target_log_prob,\n energy_change=dummy_target_log_prob,\n is_accepted=array_ops.ones_like(dummy_target_log_prob, dtypes.bool),\n proposed_grads_target_log_prob=dummy_grads_target_log_prob,\n proposed_state=dummy_state,\n proposed_target_log_prob=dummy_target_log_prob,\n random_positive=dummy_target_log_prob,\n )\n\n\ndef sample_chain(\n num_results,\n target_log_prob_fn,\n current_state,\n step_size,\n num_leapfrog_steps,\n num_burnin_steps=0,\n num_steps_between_results=0,\n seed=None,\n current_target_log_prob=None,\n current_grads_target_log_prob=None,\n name=None):\n \"\"\"Runs multiple iterations of one or more Hamiltonian Monte Carlo chains.\n\n Hamiltonian Monte Carlo (HMC) is a Markov chain Monte Carlo (MCMC) algorithm\n that takes a series of gradient-informed steps to produce a Metropolis\n proposal. This function samples from an HMC Markov chain at `current_state`\n and whose stationary distribution has log-unnormalized-density\n `target_log_prob_fn()`.\n\n This function samples from multiple chains in parallel. It assumes that the\n the leftmost dimensions of (each) `current_state` (part) index an independent\n chain. The function `target_log_prob_fn()` sums log-probabilities across\n event dimensions (i.e., current state (part) rightmost dimensions). Each\n element of the output of `target_log_prob_fn()` represents the (possibly\n unnormalized) log-probability of the joint distribution over (all) the current\n state (parts).\n\n The `current_state` can be represented as a single `Tensor` or a `list` of\n `Tensors` which collectively represent the current state. When specifying a\n `list`, one must also specify a list of `step_size`s.\n\n Note: `target_log_prob_fn` is called exactly twice.\n\n Only one out of every `num_steps_between_samples + 1` steps is included in the\n returned results. This \"thinning\" comes at a cost of reduced statistical\n power, while reducing memory requirements and autocorrelation. For more\n discussion see [1].\n\n [1]: \"Statistically efficient thinning of a Markov chain sampler.\"\n Art B. Owen. April 2017.\n http://statweb.stanford.edu/~owen/reports/bestthinning.pdf\n\n #### Examples:\n\n ##### Sample from a diagonal-variance Gaussian.\n\n ```python\n tfd = tf.contrib.distributions\n\n def make_likelihood(true_variances):\n return tfd.MultivariateNormalDiag(\n scale_diag=tf.sqrt(true_variances))\n\n dims = 10\n dtype = np.float32\n true_variances = tf.linspace(dtype(1), dtype(3), dims)\n likelihood = make_likelihood(true_variances)\n\n states, kernel_results = hmc.sample_chain(\n num_results=1000,\n target_log_prob_fn=likelihood.log_prob,\n current_state=tf.zeros(dims),\n step_size=0.5,\n num_leapfrog_steps=2,\n num_burnin_steps=500)\n\n # Compute sample stats.\n sample_mean = tf.reduce_mean(states, axis=0)\n sample_var = tf.reduce_mean(\n tf.squared_difference(states, sample_mean),\n axis=0)\n ```\n\n ##### Sampling from factor-analysis posteriors with known factors.\n\n I.e.,\n\n ```none\n for i=1..n:\n w[i] ~ Normal(0, eye(d)) # prior\n x[i] ~ Normal(loc=matmul(w[i], F)) # likelihood\n ```\n\n where `F` denotes factors.\n\n ```python\n tfd = tf.contrib.distributions\n\n def make_prior(dims, dtype):\n return tfd.MultivariateNormalDiag(\n loc=tf.zeros(dims, dtype))\n\n def make_likelihood(weights, factors):\n return tfd.MultivariateNormalDiag(\n loc=tf.tensordot(weights, factors, axes=[[0], [-1]]))\n\n # Setup data.\n num_weights = 10\n num_factors = 4\n num_chains = 100\n dtype = np.float32\n\n prior = make_prior(num_weights, dtype)\n weights = prior.sample(num_chains)\n factors = np.random.randn(num_factors, num_weights).astype(dtype)\n x = make_likelihood(weights, factors).sample(num_chains)\n\n def target_log_prob(w):\n # Target joint is: `f(w) = p(w, x | factors)`.\n return prior.log_prob(w) + make_likelihood(w, factors).log_prob(x)\n\n # Get `num_results` samples from `num_chains` independent chains.\n chains_states, kernels_results = hmc.sample_chain(\n num_results=1000,\n target_log_prob_fn=target_log_prob,\n current_state=tf.zeros([num_chains, dims], dtype),\n step_size=0.1,\n num_leapfrog_steps=2,\n num_burnin_steps=500)\n\n # Compute sample stats.\n sample_mean = tf.reduce_mean(chains_states, axis=[0, 1])\n sample_var = tf.reduce_mean(\n tf.squared_difference(chains_states, sample_mean),\n axis=[0, 1])\n ```\n\n Args:\n num_results: Integer number of Markov chain draws.\n target_log_prob_fn: Python callable which takes an argument like\n `current_state` (or `*current_state` if it's a list) and returns its\n (possibly unnormalized) log-density under the target distribution.\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s). The first `r` dimensions index\n independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.\n step_size: `Tensor` or Python `list` of `Tensor`s representing the step size\n for the leapfrog integrator. Must broadcast with the shape of\n `current_state`. Larger step sizes lead to faster progress, but too-large\n step sizes make rejection exponentially more likely. When possible, it's\n often helpful to match per-variable step sizes to the standard deviations\n of the target distribution in each variable.\n num_leapfrog_steps: Integer number of steps to run the leapfrog integrator\n for. Total progress per HMC step is roughly proportional to `step_size *\n num_leapfrog_steps`.\n num_burnin_steps: Integer number of chain steps to take before starting to\n collect results.\n Default value: 0 (i.e., no burn-in).\n num_steps_between_results: Integer number of chain steps between collecting\n a result. Only one out of every `num_steps_between_samples + 1` steps is\n included in the returned results. This \"thinning\" comes at a cost of\n reduced statistical power, while reducing memory requirements and\n autocorrelation. For more discussion see [1].\n Default value: 0 (i.e., no subsampling).\n seed: Python integer to seed the random number generator.\n current_target_log_prob: (Optional) `Tensor` representing the value of\n `target_log_prob_fn` at the `current_state`. The only reason to specify\n this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n current_grads_target_log_prob: (Optional) Python list of `Tensor`s\n representing gradient of `target_log_prob` at the `current_state` and wrt\n the `current_state`. Must have same shape as `current_state`. The only\n reason to specify this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"hmc_sample_chain\").\n\n Returns:\n accepted_states: Tensor or Python list of `Tensor`s representing the\n state(s) of the Markov chain(s) at each result step. Has same shape as\n input `current_state` but with a prepended `num_results`-size dimension.\n kernel_results: `collections.namedtuple` of internal calculations used to\n advance the chain.\n \"\"\"\n with ops.name_scope(\n name, \"hmc_sample_chain\",\n [num_results, current_state, step_size, num_leapfrog_steps,\n num_burnin_steps, num_steps_between_results, seed,\n current_target_log_prob, current_grads_target_log_prob]):\n with ops.name_scope(\"initialize\"):\n [\n current_state,\n step_size,\n current_target_log_prob,\n current_grads_target_log_prob,\n ] = _prepare_args(\n target_log_prob_fn,\n current_state,\n step_size,\n current_target_log_prob,\n current_grads_target_log_prob)\n num_results = ops.convert_to_tensor(\n num_results,\n dtype=dtypes.int32,\n name=\"num_results\")\n num_leapfrog_steps = ops.convert_to_tensor(\n num_leapfrog_steps,\n dtype=dtypes.int32,\n name=\"num_leapfrog_steps\")\n num_burnin_steps = ops.convert_to_tensor(\n num_burnin_steps,\n dtype=dtypes.int32,\n name=\"num_burnin_steps\")\n num_steps_between_results = ops.convert_to_tensor(\n num_steps_between_results,\n dtype=dtypes.int32,\n name=\"num_steps_between_results\")\n\n def _run_chain(num_steps, current_state, kernel_results):\n \"\"\"Runs the chain(s) for `num_steps`.\"\"\"\n def _loop_body(iter_, current_state, kernel_results):\n return [iter_ + 1] + list(kernel(\n target_log_prob_fn,\n current_state,\n step_size,\n num_leapfrog_steps,\n seed,\n kernel_results.current_target_log_prob,\n kernel_results.current_grads_target_log_prob))\n while_loop_kwargs = dict(\n cond=lambda iter_, *args: iter_ < num_steps,\n body=_loop_body,\n loop_vars=[\n np.int32(0),\n current_state,\n kernel_results,\n ],\n )\n if seed is not None:\n while_loop_kwargs[\"parallel_iterations\"] = 1\n return control_flow_ops.while_loop(\n **while_loop_kwargs)[1:] # Lop-off \"iter_\".\n\n def _scan_body(args_list, iter_):\n \"\"\"Closure which implements `tf.scan` body.\"\"\"\n current_state, kernel_results = args_list\n return _run_chain(\n 1 + array_ops.where(math_ops.equal(iter_, 0),\n num_burnin_steps,\n num_steps_between_results),\n current_state,\n kernel_results)\n\n scan_kwargs = dict(\n fn=_scan_body,\n elems=math_ops.range(num_results), # iter_: used to choose burnin.\n initializer=[\n current_state,\n _make_dummy_kernel_results(\n current_state,\n current_target_log_prob,\n current_grads_target_log_prob),\n ])\n if seed is not None:\n scan_kwargs[\"parallel_iterations\"] = 1\n return functional_ops.scan(**scan_kwargs)\n\n\ndef sample_annealed_importance_chain(\n proposal_log_prob_fn,\n num_steps,\n target_log_prob_fn,\n current_state,\n step_size,\n num_leapfrog_steps,\n seed=None,\n name=None):\n \"\"\"Runs annealed importance sampling (AIS) to estimate normalizing constants.\n\n This function uses Hamiltonian Monte Carlo to sample from a series of\n distributions that slowly interpolates between an initial \"proposal\"\n distribution:\n\n `exp(proposal_log_prob_fn(x) - proposal_log_normalizer)`\n\n and the target distribution:\n\n `exp(target_log_prob_fn(x) - target_log_normalizer)`,\n\n accumulating importance weights along the way. The product of these\n importance weights gives an unbiased estimate of the ratio of the\n normalizing constants of the initial distribution and the target\n distribution:\n\n `E[exp(ais_weights)] = exp(target_log_normalizer - proposal_log_normalizer)`.\n\n Note: `proposal_log_prob_fn` and `target_log_prob_fn` are called exactly three\n times (although this may be reduced to two times, in the future).\n\n #### Examples:\n\n ##### Estimate the normalizing constant of a log-gamma distribution.\n\n ```python\n tfd = tf.contrib.distributions\n\n # Run 100 AIS chains in parallel\n num_chains = 100\n dims = 20\n dtype = np.float32\n\n proposal = tfd.MultivatiateNormalDiag(\n loc=tf.zeros([dims], dtype=dtype))\n\n target = tfd.TransformedDistribution(\n distribution=tfd.Gamma(concentration=dtype(2),\n rate=dtype(3)),\n bijector=tfd.bijectors.Invert(tfd.bijectors.Exp()),\n event_shape=[dims])\n\n chains_state, ais_weights, kernels_results = (\n hmc.sample_annealed_importance_chain(\n proposal_log_prob_fn=proposal.log_prob,\n num_steps=1000,\n target_log_prob_fn=target.log_prob,\n step_size=0.2,\n current_state=proposal.sample(num_chains),\n num_leapfrog_steps=2))\n\n log_estimated_normalizer = (tf.reduce_logsumexp(ais_weights)\n - np.log(num_chains))\n log_true_normalizer = tf.lgamma(2.) - 2. * tf.log(3.)\n ```\n\n ##### Estimate marginal likelihood of a Bayesian regression model.\n\n ```python\n tfd = tf.contrib.distributions\n\n def make_prior(dims, dtype):\n return tfd.MultivariateNormalDiag(\n loc=tf.zeros(dims, dtype))\n\n def make_likelihood(weights, x):\n return tfd.MultivariateNormalDiag(\n loc=tf.tensordot(weights, x, axes=[[0], [-1]]))\n\n # Run 100 AIS chains in parallel\n num_chains = 100\n dims = 10\n dtype = np.float32\n\n # Make training data.\n x = np.random.randn(num_chains, dims).astype(dtype)\n true_weights = np.random.randn(dims).astype(dtype)\n y = np.dot(x, true_weights) + np.random.randn(num_chains)\n\n # Setup model.\n prior = make_prior(dims, dtype)\n def target_log_prob_fn(weights):\n return prior.log_prob(weights) + make_likelihood(weights, x).log_prob(y)\n\n proposal = tfd.MultivariateNormalDiag(\n loc=tf.zeros(dims, dtype))\n\n weight_samples, ais_weights, kernel_results = (\n hmc.sample_annealed_importance_chain(\n num_steps=1000,\n proposal_log_prob_fn=proposal.log_prob,\n target_log_prob_fn=target_log_prob_fn\n current_state=tf.zeros([num_chains, dims], dtype),\n step_size=0.1,\n num_leapfrog_steps=2))\n log_normalizer_estimate = (tf.reduce_logsumexp(ais_weights)\n - np.log(num_chains))\n ```\n\n Args:\n proposal_log_prob_fn: Python callable that returns the log density of the\n initial distribution.\n num_steps: Integer number of Markov chain updates to run. More\n iterations means more expense, but smoother annealing between q\n and p, which in turn means exponentially lower variance for the\n normalizing constant estimator.\n target_log_prob_fn: Python callable which takes an argument like\n `current_state` (or `*current_state` if it's a list) and returns its\n (possibly unnormalized) log-density under the target distribution.\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s). The first `r` dimensions index\n independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.\n step_size: `Tensor` or Python `list` of `Tensor`s representing the step size\n for the leapfrog integrator. Must broadcast with the shape of\n `current_state`. Larger step sizes lead to faster progress, but too-large\n step sizes make rejection exponentially more likely. When possible, it's\n often helpful to match per-variable step sizes to the standard deviations\n of the target distribution in each variable.\n num_leapfrog_steps: Integer number of steps to run the leapfrog integrator\n for. Total progress per HMC step is roughly proportional to `step_size *\n num_leapfrog_steps`.\n seed: Python integer to seed the random number generator.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"hmc_sample_annealed_importance_chain\").\n\n Returns:\n accepted_state: `Tensor` or Python list of `Tensor`s representing the\n state(s) of the Markov chain(s) at the final iteration. Has same shape as\n input `current_state`.\n ais_weights: Tensor with the estimated weight(s). Has shape matching\n `target_log_prob_fn(current_state)`.\n kernel_results: `collections.namedtuple` of internal calculations used to\n advance the chain.\n \"\"\"\n def make_convex_combined_log_prob_fn(iter_):\n def _fn(*args):\n p = proposal_log_prob_fn(*args)\n t = target_log_prob_fn(*args)\n dtype = p.dtype.base_dtype\n beta = (math_ops.cast(iter_ + 1, dtype)\n / math_ops.cast(num_steps, dtype))\n return (1. - beta) * p + beta * t\n return _fn\n\n with ops.name_scope(\n name, \"hmc_sample_annealed_importance_chain\",\n [num_steps, current_state, step_size, num_leapfrog_steps, seed]):\n with ops.name_scope(\"initialize\"):\n [\n current_state,\n step_size,\n current_log_prob,\n current_grads_log_prob,\n ] = _prepare_args(\n make_convex_combined_log_prob_fn(iter_=0),\n current_state,\n step_size,\n description=\"convex_combined_log_prob\")\n num_steps = ops.convert_to_tensor(\n num_steps,\n dtype=dtypes.int32,\n name=\"num_steps\")\n num_leapfrog_steps = ops.convert_to_tensor(\n num_leapfrog_steps,\n dtype=dtypes.int32,\n name=\"num_leapfrog_steps\")\n def _loop_body(iter_, ais_weights, current_state, kernel_results):\n \"\"\"Closure which implements `tf.while_loop` body.\"\"\"\n current_state_parts = (list(current_state)\n if _is_list_like(current_state)\n else [current_state])\n # TODO(b/72994218): Consider refactoring things to avoid this unecessary\n # call.\n ais_weights += ((target_log_prob_fn(*current_state_parts)\n - proposal_log_prob_fn(*current_state_parts))\n / math_ops.cast(num_steps, ais_weights.dtype))\n return [iter_ + 1, ais_weights] + list(kernel(\n make_convex_combined_log_prob_fn(iter_),\n current_state,\n step_size,\n num_leapfrog_steps,\n seed,\n kernel_results.current_target_log_prob,\n kernel_results.current_grads_target_log_prob))\n\n while_loop_kwargs = dict(\n cond=lambda iter_, *args: iter_ < num_steps,\n body=_loop_body,\n loop_vars=[\n np.int32(0), # iter_\n array_ops.zeros_like(current_log_prob), # ais_weights\n current_state,\n _make_dummy_kernel_results(current_state,\n current_log_prob,\n current_grads_log_prob),\n ])\n if seed is not None:\n while_loop_kwargs[\"parallel_iterations\"] = 1\n\n [ais_weights, current_state, kernel_results] = control_flow_ops.while_loop(\n **while_loop_kwargs)[1:] # Lop-off \"iter_\".\n\n return [current_state, ais_weights, kernel_results]\n\n\ndef kernel(target_log_prob_fn,\n current_state,\n step_size,\n num_leapfrog_steps,\n seed=None,\n current_target_log_prob=None,\n current_grads_target_log_prob=None,\n name=None):\n \"\"\"Runs one iteration of Hamiltonian Monte Carlo.\n\n Hamiltonian Monte Carlo (HMC) is a Markov chain Monte Carlo (MCMC)\n algorithm that takes a series of gradient-informed steps to produce\n a Metropolis proposal. This function applies one step of HMC to\n randomly update the variable `x`.\n\n This function can update multiple chains in parallel. It assumes that all\n leftmost dimensions of `current_state` index independent chain states (and are\n therefore updated independently). The output of `target_log_prob_fn()` should\n sum log-probabilities across all event dimensions. Slices along the rightmost\n dimensions may have different target distributions; for example,\n `current_state[0, :]` could have a different target distribution from\n `current_state[1, :]`. This is up to `target_log_prob_fn()`. (The number of\n independent chains is `tf.size(target_log_prob_fn(*current_state))`.)\n\n #### Examples:\n\n ##### Simple chain with warm-up.\n\n ```python\n tfd = tf.contrib.distributions\n\n # Tuning acceptance rates:\n dtype = np.float32\n target_accept_rate = 0.631\n num_warmup_iter = 500\n num_chain_iter = 500\n\n x = tf.get_variable(name=\"x\", initializer=dtype(1))\n step_size = tf.get_variable(name=\"step_size\", initializer=dtype(1))\n\n target = tfd.Normal(loc=dtype(0), scale=dtype(1))\n\n new_x, other_results = hmc.kernel(\n target_log_prob_fn=target.log_prob,\n current_state=x,\n step_size=step_size,\n num_leapfrog_steps=3)[:4]\n\n x_update = x.assign(new_x)\n\n step_size_update = step_size.assign_add(\n step_size * tf.where(\n other_results.acceptance_probs > target_accept_rate,\n 0.01, -0.01))\n\n warmup = tf.group([x_update, step_size_update])\n\n tf.global_variables_initializer().run()\n\n sess.graph.finalize() # No more graph building.\n\n # Warm up the sampler and adapt the step size\n for _ in xrange(num_warmup_iter):\n sess.run(warmup)\n\n # Collect samples without adapting step size\n samples = np.zeros([num_chain_iter])\n for i in xrange(num_chain_iter):\n _, x_, target_log_prob_, grad_ = sess.run([\n x_update,\n x,\n other_results.target_log_prob,\n other_results.grads_target_log_prob])\n samples[i] = x_\n\n print(samples.mean(), samples.std())\n ```\n\n ##### Sample from more complicated posterior.\n\n I.e.,\n\n ```none\n W ~ MVN(loc=0, scale=sigma * eye(dims))\n for i=1...num_samples:\n X[i] ~ MVN(loc=0, scale=eye(dims))\n eps[i] ~ Normal(loc=0, scale=1)\n Y[i] = X[i].T * W + eps[i]\n ```\n\n ```python\n tfd = tf.contrib.distributions\n\n def make_training_data(num_samples, dims, sigma):\n dt = np.asarray(sigma).dtype\n zeros = tf.zeros(dims, dtype=dt)\n x = tfd.MultivariateNormalDiag(\n loc=zeros).sample(num_samples, seed=1)\n w = tfd.MultivariateNormalDiag(\n loc=zeros,\n scale_identity_multiplier=sigma).sample(seed=2)\n noise = tfd.Normal(\n loc=dt(0),\n scale=dt(1)).sample(num_samples, seed=3)\n y = tf.tensordot(x, w, axes=[[1], [0]]) + noise\n return y, x, w\n\n def make_prior(sigma, dims):\n # p(w | sigma)\n return tfd.MultivariateNormalDiag(\n loc=tf.zeros([dims], dtype=sigma.dtype),\n scale_identity_multiplier=sigma)\n\n def make_likelihood(x, w):\n # p(y | x, w)\n return tfd.MultivariateNormalDiag(\n loc=tf.tensordot(x, w, axes=[[1], [0]]))\n\n # Setup assumptions.\n dtype = np.float32\n num_samples = 150\n dims = 10\n num_iters = int(5e3)\n\n true_sigma = dtype(0.5)\n y, x, true_weights = make_training_data(num_samples, dims, true_sigma)\n\n # Estimate of `log(true_sigma)`.\n log_sigma = tf.get_variable(name=\"log_sigma\", initializer=dtype(0))\n sigma = tf.exp(log_sigma)\n\n # State of the Markov chain.\n weights = tf.get_variable(\n name=\"weights\",\n initializer=np.random.randn(dims).astype(dtype))\n\n prior = make_prior(sigma, dims)\n\n def joint_log_prob_fn(w):\n # f(w) = log p(w, y | x)\n return prior.log_prob(w) + make_likelihood(x, w).log_prob(y)\n\n weights_update = weights.assign(\n hmc.kernel(target_log_prob_fn=joint_log_prob,\n current_state=weights,\n step_size=0.1,\n num_leapfrog_steps=5)[0])\n\n with tf.control_dependencies([weights_update]):\n loss = -prior.log_prob(weights)\n\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)\n log_sigma_update = optimizer.minimize(loss, var_list=[log_sigma])\n\n sess.graph.finalize() # No more graph building.\n\n tf.global_variables_initializer().run()\n\n sigma_history = np.zeros(num_iters, dtype)\n weights_history = np.zeros([num_iters, dims], dtype)\n\n for i in xrange(num_iters):\n _, sigma_, weights_, _ = sess.run([log_sigma_update, sigma, weights])\n weights_history[i, :] = weights_\n sigma_history[i] = sigma_\n\n true_weights_ = sess.run(true_weights)\n\n # Should converge to something close to true_sigma.\n plt.plot(sigma_history);\n plt.ylabel(\"sigma\");\n plt.xlabel(\"iteration\");\n ```\n\n Args:\n target_log_prob_fn: Python callable which takes an argument like\n `current_state` (or `*current_state` if it's a list) and returns its\n (possibly unnormalized) log-density under the target distribution.\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s). The first `r` dimensions index\n independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.\n step_size: `Tensor` or Python `list` of `Tensor`s representing the step size\n for the leapfrog integrator. Must broadcast with the shape of\n `current_state`. Larger step sizes lead to faster progress, but too-large\n step sizes make rejection exponentially more likely. When possible, it's\n often helpful to match per-variable step sizes to the standard deviations\n of the target distribution in each variable.\n num_leapfrog_steps: Integer number of steps to run the leapfrog integrator\n for. Total progress per HMC step is roughly proportional to `step_size *\n num_leapfrog_steps`.\n seed: Python integer to seed the random number generator.\n current_target_log_prob: (Optional) `Tensor` representing the value of\n `target_log_prob_fn` at the `current_state`. The only reason to\n specify this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n current_grads_target_log_prob: (Optional) Python list of `Tensor`s\n representing gradient of `current_target_log_prob` at the `current_state`\n and wrt the `current_state`. Must have same shape as `current_state`. The\n only reason to specify this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"hmc_kernel\").\n\n Returns:\n accepted_state: Tensor or Python list of `Tensor`s representing the state(s)\n of the Markov chain(s) at each result step. Has same shape as\n `current_state`.\n kernel_results: `collections.namedtuple` of internal calculations used to\n advance the chain.\n\n Raises:\n ValueError: if there isn't one `step_size` or a list with same length as\n `current_state`.\n \"\"\"\n with ops.name_scope(\n name, \"hmc_kernel\",\n [current_state, step_size, num_leapfrog_steps, seed,\n current_target_log_prob, current_grads_target_log_prob]):\n with ops.name_scope(\"initialize\"):\n [current_state_parts, step_sizes, current_target_log_prob,\n current_grads_target_log_prob] = _prepare_args(\n target_log_prob_fn, current_state, step_size,\n current_target_log_prob, current_grads_target_log_prob,\n maybe_expand=True)\n independent_chain_ndims = distributions_util.prefer_static_rank(\n current_target_log_prob)\n current_momentums = []\n for s in current_state_parts:\n current_momentums.append(random_ops.random_normal(\n shape=array_ops.shape(s),\n dtype=s.dtype.base_dtype,\n seed=seed))\n seed = distributions_util.gen_new_seed(\n seed, salt=\"hmc_kernel_momentums\")\n\n num_leapfrog_steps = ops.convert_to_tensor(\n num_leapfrog_steps,\n dtype=dtypes.int32,\n name=\"num_leapfrog_steps\")\n [\n proposed_momentums,\n proposed_state_parts,\n proposed_target_log_prob,\n proposed_grads_target_log_prob,\n ] = _leapfrog_integrator(current_momentums,\n target_log_prob_fn,\n current_state_parts,\n step_sizes,\n num_leapfrog_steps,\n current_target_log_prob,\n current_grads_target_log_prob)\n\n energy_change = _compute_energy_change(current_target_log_prob,\n current_momentums,\n proposed_target_log_prob,\n proposed_momentums,\n independent_chain_ndims)\n\n # u < exp(min(-energy, 0)), where u~Uniform[0,1)\n # ==> -log(u) >= max(e, 0)\n # ==> -log(u) >= e\n # (Perhaps surprisingly, we don't have a better way to obtain a random\n # uniform from positive reals, i.e., `tf.random_uniform(minval=0,\n # maxval=np.inf)` won't work.)\n random_uniform = random_ops.random_uniform(\n shape=array_ops.shape(energy_change),\n dtype=energy_change.dtype,\n seed=seed)\n random_positive = -math_ops.log(random_uniform)\n is_accepted = random_positive >= energy_change\n\n accepted_target_log_prob = array_ops.where(is_accepted,\n proposed_target_log_prob,\n current_target_log_prob)\n\n accepted_state_parts = [_choose(is_accepted,\n proposed_state_part,\n current_state_part,\n independent_chain_ndims)\n for current_state_part, proposed_state_part\n in zip(current_state_parts, proposed_state_parts)]\n\n accepted_grads_target_log_prob = [\n _choose(is_accepted,\n proposed_grad,\n grad,\n independent_chain_ndims)\n for proposed_grad, grad\n in zip(proposed_grads_target_log_prob, current_grads_target_log_prob)]\n\n maybe_flatten = lambda x: x if _is_list_like(current_state) else x[0]\n return [\n maybe_flatten(accepted_state_parts),\n KernelResults(\n acceptance_probs=math_ops.exp(math_ops.minimum(-energy_change, 0.)),\n current_grads_target_log_prob=accepted_grads_target_log_prob,\n current_target_log_prob=accepted_target_log_prob,\n energy_change=energy_change,\n is_accepted=is_accepted,\n proposed_grads_target_log_prob=proposed_grads_target_log_prob,\n proposed_state=maybe_flatten(proposed_state_parts),\n proposed_target_log_prob=proposed_target_log_prob,\n random_positive=random_positive,\n ),\n ]\n\n\ndef _leapfrog_integrator(current_momentums,\n target_log_prob_fn,\n current_state_parts,\n step_sizes,\n num_leapfrog_steps,\n current_target_log_prob=None,\n current_grads_target_log_prob=None,\n name=None):\n \"\"\"Applies `num_leapfrog_steps` of the leapfrog integrator.\n\n Assumes a simple quadratic kinetic energy function: `0.5 ||momentum||**2`.\n\n #### Examples:\n\n ##### Simple quadratic potential.\n\n ```python\n tfd = tf.contrib.distributions\n\n dims = 10\n num_iter = int(1e3)\n dtype = np.float32\n\n position = tf.placeholder(np.float32)\n momentum = tf.placeholder(np.float32)\n\n [\n new_momentums,\n new_positions,\n ] = hmc._leapfrog_integrator(\n current_momentums=[momentum],\n target_log_prob_fn=tfd.MultivariateNormalDiag(\n loc=tf.zeros(dims, dtype)).log_prob,\n current_state_parts=[position],\n step_sizes=0.1,\n num_leapfrog_steps=3)[:2]\n\n sess.graph.finalize() # No more graph building.\n\n momentum_ = np.random.randn(dims).astype(dtype)\n position_ = np.random.randn(dims).astype(dtype)\n\n positions = np.zeros([num_iter, dims], dtype)\n for i in xrange(num_iter):\n position_, momentum_ = sess.run(\n [new_momentums[0], new_position[0]],\n feed_dict={position: position_, momentum: momentum_})\n positions[i] = position_\n\n plt.plot(positions[:, 0]); # Sinusoidal.\n ```\n\n Args:\n current_momentums: Tensor containing the value(s) of the momentum\n variable(s) to update.\n target_log_prob_fn: Python callable which takes an argument like\n `*current_state_parts` and returns its (possibly unnormalized) log-density\n under the target distribution.\n current_state_parts: Python `list` of `Tensor`s representing the current\n state(s) of the Markov chain(s). The first `independent_chain_ndims` of\n the `Tensor`(s) index different chains.\n step_sizes: Python `list` of `Tensor`s representing the step size for the\n leapfrog integrator. Must broadcast with the shape of\n `current_state_parts`. Larger step sizes lead to faster progress, but\n too-large step sizes make rejection exponentially more likely. When\n possible, it's often helpful to match per-variable step sizes to the\n standard deviations of the target distribution in each variable.\n num_leapfrog_steps: Integer number of steps to run the leapfrog integrator\n for. Total progress per HMC step is roughly proportional to `step_size *\n num_leapfrog_steps`.\n current_target_log_prob: (Optional) `Tensor` representing the value of\n `target_log_prob_fn(*current_state_parts)`. The only reason to specify\n this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n current_grads_target_log_prob: (Optional) Python list of `Tensor`s\n representing gradient of `target_log_prob_fn(*current_state_parts`) wrt\n `current_state_parts`. Must have same shape as `current_state_parts`. The\n only reason to specify this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"hmc_leapfrog_integrator\").\n\n Returns:\n proposed_momentums: Updated value of the momentum.\n proposed_state_parts: Tensor or Python list of `Tensor`s representing the\n state(s) of the Markov chain(s) at each result step. Has same shape as\n input `current_state_parts`.\n proposed_target_log_prob: `Tensor` representing the value of\n `target_log_prob_fn` at `accepted_state`.\n proposed_grads_target_log_prob: Gradient of `proposed_target_log_prob` wrt\n `accepted_state`.\n\n Raises:\n ValueError: if `len(momentums) != len(state_parts)`.\n ValueError: if `len(state_parts) != len(step_sizes)`.\n ValueError: if `len(state_parts) != len(grads_target_log_prob)`.\n TypeError: if `not target_log_prob.dtype.is_floating`.\n \"\"\"\n def _loop_body(step,\n current_momentums,\n current_state_parts,\n ignore_current_target_log_prob, # pylint: disable=unused-argument\n current_grads_target_log_prob):\n return [step + 1] + list(_leapfrog_step(current_momentums,\n target_log_prob_fn,\n current_state_parts,\n step_sizes,\n current_grads_target_log_prob))\n\n with ops.name_scope(\n name, \"hmc_leapfrog_integrator\",\n [current_momentums, current_state_parts, step_sizes, num_leapfrog_steps,\n current_target_log_prob, current_grads_target_log_prob]):\n if len(current_momentums) != len(current_state_parts):\n raise ValueError(\"`momentums` must be in one-to-one correspondence \"\n \"with `state_parts`\")\n num_leapfrog_steps = ops.convert_to_tensor(num_leapfrog_steps,\n name=\"num_leapfrog_steps\")\n current_target_log_prob, current_grads_target_log_prob = (\n _maybe_call_fn_and_grads(\n target_log_prob_fn,\n current_state_parts,\n current_target_log_prob,\n current_grads_target_log_prob))\n return control_flow_ops.while_loop(\n cond=lambda iter_, *args: iter_ < num_leapfrog_steps,\n body=_loop_body,\n loop_vars=[\n np.int32(0), # iter_\n current_momentums,\n current_state_parts,\n current_target_log_prob,\n current_grads_target_log_prob,\n ],\n back_prop=False)[1:] # Lop-off \"iter_\".\n\n\ndef _leapfrog_step(current_momentums,\n target_log_prob_fn,\n current_state_parts,\n step_sizes,\n current_grads_target_log_prob,\n name=None):\n \"\"\"Applies one step of the leapfrog integrator.\"\"\"\n with ops.name_scope(\n name, \"_leapfrog_step\",\n [current_momentums, current_state_parts, step_sizes,\n current_grads_target_log_prob]):\n proposed_momentums = [m + 0.5 * ss * g for m, ss, g\n in zip(current_momentums,\n step_sizes,\n current_grads_target_log_prob)]\n proposed_state_parts = [x + ss * m for x, ss, m\n in zip(current_state_parts,\n step_sizes,\n proposed_momentums)]\n proposed_target_log_prob = target_log_prob_fn(*proposed_state_parts)\n if not proposed_target_log_prob.dtype.is_floating:\n raise TypeError(\"`target_log_prob_fn` must produce a `Tensor` \"\n \"with `float` `dtype`.\")\n proposed_grads_target_log_prob = gradients_ops.gradients(\n proposed_target_log_prob, proposed_state_parts)\n if any(g is None for g in proposed_grads_target_log_prob):\n raise ValueError(\n \"Encountered `None` gradient. Does your target `target_log_prob_fn` \"\n \"access all `tf.Variable`s via `tf.get_variable`?\\n\"\n \" current_state_parts: {}\\n\"\n \" proposed_state_parts: {}\\n\"\n \" proposed_grads_target_log_prob: {}\".format(\n current_state_parts,\n proposed_state_parts,\n proposed_grads_target_log_prob))\n proposed_momentums = [m + 0.5 * ss * g for m, ss, g\n in zip(proposed_momentums,\n step_sizes,\n proposed_grads_target_log_prob)]\n return [\n proposed_momentums,\n proposed_state_parts,\n proposed_target_log_prob,\n proposed_grads_target_log_prob,\n ]\n\n\ndef _compute_energy_change(current_target_log_prob,\n current_momentums,\n proposed_target_log_prob,\n proposed_momentums,\n independent_chain_ndims,\n name=None):\n \"\"\"Helper to `kernel` which computes the energy change.\"\"\"\n with ops.name_scope(\n name, \"compute_energy_change\",\n ([current_target_log_prob, proposed_target_log_prob,\n independent_chain_ndims] +\n current_momentums + proposed_momentums)):\n # Abbreviate lk0=log_kinetic_energy and lk1=proposed_log_kinetic_energy\n # since they're a mouthful and lets us inline more.\n lk0, lk1 = [], []\n for current_momentum, proposed_momentum in zip(current_momentums,\n proposed_momentums):\n axis = math_ops.range(independent_chain_ndims,\n array_ops.rank(current_momentum))\n lk0.append(_log_sum_sq(current_momentum, axis))\n lk1.append(_log_sum_sq(proposed_momentum, axis))\n\n lk0 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk0, axis=-1),\n axis=-1)\n lk1 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk1, axis=-1),\n axis=-1)\n lp0 = -current_target_log_prob # log_potential\n lp1 = -proposed_target_log_prob # proposed_log_potential\n x = array_ops.stack([lp1, math_ops.exp(lk1), -lp0, -math_ops.exp(lk0)],\n axis=-1)\n\n # The sum is NaN if any element is NaN or we see both +Inf and -Inf.\n # Thus we will replace such rows with infinite energy change which implies\n # rejection. Recall that float-comparisons with NaN are always False.\n is_sum_determinate = (\n math_ops.reduce_all(math_ops.is_finite(x) | (x >= 0.), axis=-1) &\n math_ops.reduce_all(math_ops.is_finite(x) | (x <= 0.), axis=-1))\n is_sum_determinate = array_ops.tile(\n is_sum_determinate[..., array_ops.newaxis],\n multiples=array_ops.concat([\n array_ops.ones(array_ops.rank(is_sum_determinate),\n dtype=dtypes.int32),\n [4],\n ], axis=0))\n x = array_ops.where(is_sum_determinate,\n x,\n array_ops.fill(array_ops.shape(x),\n value=x.dtype.as_numpy_dtype(np.inf)))\n\n return math_ops.reduce_sum(x, axis=-1)\n\n\ndef _choose(is_accepted,\n accepted,\n rejected,\n independent_chain_ndims,\n name=None):\n \"\"\"Helper to `kernel` which expand_dims `is_accepted` to apply tf.where.\"\"\"\n def _expand_is_accepted_like(x):\n with ops.name_scope(\"_choose\"):\n expand_shape = array_ops.concat([\n array_ops.shape(is_accepted),\n array_ops.ones([array_ops.rank(x) - array_ops.rank(is_accepted)],\n dtype=dtypes.int32),\n ], axis=0)\n multiples = array_ops.concat([\n array_ops.ones([array_ops.rank(is_accepted)], dtype=dtypes.int32),\n array_ops.shape(x)[independent_chain_ndims:],\n ], axis=0)\n m = array_ops.tile(array_ops.reshape(is_accepted, expand_shape),\n multiples)\n m.set_shape(x.shape)\n return m\n with ops.name_scope(name, \"_choose\", values=[\n is_accepted, accepted, rejected, independent_chain_ndims]):\n return array_ops.where(_expand_is_accepted_like(accepted),\n accepted,\n rejected)\n\n\ndef _maybe_call_fn_and_grads(fn,\n fn_arg_list,\n fn_result=None,\n grads_fn_result=None,\n description=\"target_log_prob\"):\n \"\"\"Helper which computes `fn_result` and `grads` if needed.\"\"\"\n fn_arg_list = (list(fn_arg_list) if _is_list_like(fn_arg_list)\n else [fn_arg_list])\n if fn_result is None:\n fn_result = fn(*fn_arg_list)\n if not fn_result.dtype.is_floating:\n raise TypeError(\"`{}` must be a `Tensor` with `float` `dtype`.\".format(\n description))\n if grads_fn_result is None:\n grads_fn_result = gradients_ops.gradients(\n fn_result, fn_arg_list)\n if len(fn_arg_list) != len(grads_fn_result):\n raise ValueError(\"`{}` must be in one-to-one correspondence with \"\n \"`grads_{}`\".format(*[description]*2))\n if any(g is None for g in grads_fn_result):\n raise ValueError(\"Encountered `None` gradient.\")\n return fn_result, grads_fn_result\n\n\ndef _prepare_args(target_log_prob_fn, state, step_size,\n target_log_prob=None, grads_target_log_prob=None,\n maybe_expand=False, description=\"target_log_prob\"):\n \"\"\"Helper which processes input args to meet list-like assumptions.\"\"\"\n state_parts = list(state) if _is_list_like(state) else [state]\n state_parts = [ops.convert_to_tensor(s, name=\"state\")\n for s in state_parts]\n target_log_prob, grads_target_log_prob = _maybe_call_fn_and_grads(\n target_log_prob_fn,\n state_parts,\n target_log_prob,\n grads_target_log_prob,\n description)\n step_sizes = list(step_size) if _is_list_like(step_size) else [step_size]\n step_sizes = [\n ops.convert_to_tensor(\n s, name=\"step_size\", dtype=target_log_prob.dtype)\n for s in step_sizes]\n if len(step_sizes) == 1:\n step_sizes *= len(state_parts)\n if len(state_parts) != len(step_sizes):\n raise ValueError(\"There should be exactly one `step_size` or it should \"\n \"have same length as `current_state`.\")\n maybe_flatten = lambda x: x if maybe_expand or _is_list_like(state) else x[0]\n return [\n maybe_flatten(state_parts),\n maybe_flatten(step_sizes),\n target_log_prob,\n grads_target_log_prob,\n ]\n\n\ndef _is_list_like(x):\n \"\"\"Helper which returns `True` if input is `list`-like.\"\"\"\n return isinstance(x, (tuple, list))\n\n\ndef _log_sum_sq(x, axis=None):\n \"\"\"Computes log(sum(x**2)).\"\"\"\n return math_ops.reduce_logsumexp(2. * math_ops.log(math_ops.abs(x)), axis)\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for depthwise convolutional operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import nn_impl\nfrom tensorflow.python.ops import nn_ops\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import test\n\n\ndef ConfigsToTest():\n \"\"\"Iterator for different convolution shapes, strides and paddings.\n\n Yields:\n Tuple (input_size, filter_size, out_size, stride, padding), the depthwise\n convolution parameters.\n \"\"\"\n input_sizes = [[4, 5, 5, 48], [4, 8, 8, 84], [4, 17, 17, 48], [4, 9, 27, 8],\n [4, 31, 31, 7], [4, 35, 35, 2], [4, 147, 147, 2],\n [3, 299, 299, 3], [5, 183, 183, 1]]\n filter_sizes = [[1, 1, 48, 2], [1, 3, 84, 1], [3, 1, 48, 4], [3, 3, 8, 1],\n [3, 3, 7, 1], [5, 5, 2, 1], [3, 3, 2, 8], [2, 2, 3,\n 8], [5, 5, 1, 2]]\n out_sizes = [[4, 5, 5, 96], [4, 8, 8, 84], [4, 17, 17, 192], [4, 9, 27, 8],\n [4, 31, 31, 7], [4, 35, 35, 2], [4, 49, 49, 16],\n [3, 150, 150, 24], [5, 92, 92, 2]]\n strides = [1, 1, 1, 1, 1, 1, 3, 2, 2]\n # pylint: disable=invalid-name\n VALID = \"VALID\"\n SAME = \"SAME\"\n # pylint: enable=invalid-name\n paddings = [SAME, SAME, SAME, SAME, SAME, SAME, VALID, SAME, SAME, SAME]\n for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,\n paddings):\n yield i, f, o, s, p\n\n\ndef CheckGradConfigsToTest():\n \"\"\"Iterator for different convolution shapes, strides and paddings.\n\n compute_gradient_error() is very expensive. So the configs should be\n relatively small.\n\n Yields:\n Tuple (input_size, filter_size, out_size, stride, padding), the depthwise\n convolution parameters.\n \"\"\"\n input_sizes = [[2, 5, 8, 1], [4, 5, 5, 1], [2, 4, 4, 2], [1, 15, 15, 2],\n [2, 15, 16, 1]]\n filter_sizes = [[4, 4, 1, 2], [2, 2, 1, 2], [3, 1, 2, 2], [1, 3, 2, 1],\n [3, 3, 1, 2]]\n out_sizes = [[2, 5, 8, 2], [4, 2, 2, 2], [2, 4, 4, 4], [1, 15, 15, 2],\n [2, 5, 5, 2]]\n strides = [1, 2, 1, 1, 3]\n # pylint: disable=invalid-name\n VALID = \"VALID\"\n SAME = \"SAME\"\n # pylint: enable=invalid-name\n paddings = [SAME, VALID, SAME, SAME, VALID]\n for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,\n paddings):\n yield i, f, o, s, p\n\n\nclass DepthwiseConv2DTest(test.TestCase):\n\n # This is testing that depthwise_conv2d and depthwise_conv2d_native\n # produce the same results. It also tests that NCHW and NWHC\n # formats agree, by comparing the depthwise_conv2d_native with\n # 'NCHW' format (with transposition) matches the 'NHWC' format using\n # the higher level interface.\n def _VerifyValues(self,\n tensor_in_sizes,\n filter_in_sizes,\n stride,\n padding,\n data_type,\n use_gpu,\n data_format=\"NHWC\"):\n \"\"\"Verifies the output values of the convolution function.\n\n Args:\n tensor_in_sizes: Input tensor dimensions in\n [batch, input_rows, input_cols, input_depth].\n filter_in_sizes: Filter tensor dimensions in\n [filter_rows, filter_cols, input_depth, depth_multiplier].\n stride: Stride.\n padding: Padding type.\n data_type: The data type to use.\n use_gpu: Whether to use GPU.\n data_format: The data_format of the input. \"NHWC\" or \"NCHW\".\n \"\"\"\n total_size_1 = 1\n total_size_2 = 1\n for s in tensor_in_sizes:\n total_size_1 *= s\n for s in filter_in_sizes:\n total_size_2 *= s\n # Initializes the input and filter tensor with numbers incrementing from 1.\n x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]\n x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]\n with self.test_session(use_gpu=use_gpu) as sess:\n if data_type == dtypes.float16:\n tolerance = 1e-5\n elif data_type == dtypes.float32:\n tolerance = 1e-5\n else:\n self.assertEqual(data_type, dtypes.float64)\n tolerance = 1e-8\n\n t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=data_type)\n t1.set_shape(tensor_in_sizes)\n t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=data_type)\n\n native_t1 = t1\n strides = [1, stride, stride, 1]\n if data_format == \"NCHW\":\n # Transpose from NWHC input to NCHW\n # Ex. [4, 5, 5, 48] to [4, 48, 5, 5]\n native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])\n strides = [1, 1, stride, stride]\n\n conv_native = nn_ops.depthwise_conv2d_native(\n native_t1,\n t2,\n strides=strides,\n data_format=data_format,\n padding=padding)\n\n if data_format == \"NCHW\":\n # Transpose back from NCHW to NHWC\n conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])\n\n conv_interface = nn_impl.depthwise_conv2d(\n t1, t2, strides=[1, stride, stride, 1], padding=padding)\n\n native_result = sess.run(conv_native)\n interface_result = sess.run(conv_interface)\n\n print(\"data_type:\", data_type, \"use_gpu:\", use_gpu, \"max diff = \",\n np.amax(np.absolute(native_result - interface_result)))\n self.assertArrayNear(\n np.ravel(native_result), np.ravel(interface_result), tolerance)\n self.assertShapeEqual(native_result, conv_native)\n self.assertShapeEqual(native_result, conv_interface)\n\n def testDepthwiseConv2D(self):\n for index, (input_size, filter_size, _, stride,\n padding) in enumerate(ConfigsToTest()):\n print(\"Testing DepthwiseConv2D,\", index, \"th config:\", input_size, \"*\",\n filter_size, \"stride:\", stride, \"padding:\", padding)\n for data_type in [dtypes.float16, dtypes.float32, dtypes.float64]:\n self._VerifyValues(\n input_size, filter_size, stride, padding, data_type, use_gpu=True)\n\n def testDepthwiseConv2DFormat(self):\n if not test.is_gpu_available():\n return\n\n for index, (input_size, filter_size, _, stride,\n padding) in enumerate(ConfigsToTest()):\n print(\"Testing DepthwiseConv2DFormat,\", index, \"th config:\", input_size,\n \"*\", filter_size, \"stride:\", stride, \"padding:\", padding)\n for data_type in [dtypes.float16, dtypes.float32, dtypes.float64]:\n self._VerifyValues(\n input_size,\n filter_size,\n stride,\n padding,\n data_type,\n use_gpu=True,\n data_format=\"NCHW\")\n\n# This is testing against hand calculated results.\n\n def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,\n expected, use_gpu):\n \"\"\"Verifies the output values of the depthwise convolution function.\n\n Args:\n tensor_in_sizes: Input tensor dimensions in\n [batch, input_rows, input_cols, input_depth].\n filter_in_sizes: Filter tensor dimensions in\n [filter_rows, filter_cols, input_depth, depth_multiplier].\n stride: Stride.\n padding: Padding type.\n expected: An array containing the expected operation outputs.\n use_gpu: Whether to use GPU.\n \"\"\"\n total_size_1 = 1\n total_size_2 = 1\n for s in tensor_in_sizes:\n total_size_1 *= s\n for s in filter_in_sizes:\n total_size_2 *= s\n # Initializes the input tensor with array containing incrementing\n # numbers from 1.\n x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]\n x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]\n with self.test_session(use_gpu=use_gpu) as sess:\n t1 = constant_op.constant(x1, shape=tensor_in_sizes)\n t1.set_shape(tensor_in_sizes)\n t2 = constant_op.constant(x2, shape=filter_in_sizes)\n conv = nn_ops.depthwise_conv2d_native(\n t1, t2, strides=[1, stride, stride, 1], padding=padding)\n value = sess.run(conv)\n print(\"value = \", value)\n self.assertArrayNear(expected, np.ravel(value), 1e-5)\n self.assertShapeEqual(value, conv)\n\n def testConv2D2x2Filter(self):\n # The inputs look like this (it's a 3 x 2 matrix, each of depth 2):\n #\n # [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]\n # [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]\n # We can view this as two inputs\n #\n # input depth 0:\n #\n # [ 1.0, 3.0, 5.0 ]\n # [ 7.0, 9.0, 11.0 ]\n #\n # input depth 1:\n #\n # [ 2.0, 4.0, 6.0 ]\n # [ 8.0, 10.0, 12.0 ]\n #\n # The filter looks like this (it has two 2 x 2 patches, each generating 2\n # depths):\n #\n # filter #0:\n #\n # [ (1.0, 3.0), ( 5.0, 7.0)]\n # [ (9.0, 11.0), (13.0, 15.0)]\n #\n # filter #1:\n #\n # [ ( 2.0, 4.0), ( 6.0, 8.0)]\n # [ (10.0, 12.0), (14.0, 16.0)]\n #\n # So the outputs are:\n #\n # (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)\n # 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196\n # (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)\n # 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216\n # (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)\n # 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272\n # (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)\n # 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296\n #\n # (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)\n # 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252\n # (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)\n # 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280\n # (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)\n # 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344\n # (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)\n # 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376\n expected_output = [196, 216, 272, 296, 252, 280, 344, 376]\n self._VerifyHandValues(\n tensor_in_sizes=[1, 2, 3, 2],\n filter_in_sizes=[2, 2, 2, 2],\n stride=1,\n padding=\"VALID\",\n expected=expected_output,\n use_gpu=False)\n\n self._VerifyHandValues(\n tensor_in_sizes=[1, 2, 3, 2],\n filter_in_sizes=[2, 2, 2, 2],\n stride=1,\n padding=\"VALID\",\n expected=expected_output,\n use_gpu=True)\n\n # Gradient checkers.This tests depthwise gradient computations for both\n # BackpropFilter and BackpropInput by comparing gradients computed by the\n # depthwise gradient ops with the gradients computed numerically (details can\n # be found in the compute_gradient_error().\n # Note this check is very expensive so the input should not be too big.\n def _ConstructAndTestGradient(self,\n input_shape,\n filter_shape,\n output_shape,\n stride,\n padding,\n data_type,\n test_input,\n use_gpu,\n data_format=\"NHWC\"):\n input_size = 1\n for x in input_shape:\n input_size *= x\n filter_size = 1\n for x in filter_shape:\n filter_size *= x\n input_data = [x * 1.0 / input_size for x in range(0, input_size)]\n filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]\n with self.test_session(use_gpu=use_gpu):\n if data_type == dtypes.float16:\n tolerance = 0.002\n elif data_type == dtypes.float32:\n tolerance = 0.002\n else:\n self.assertEqual(data_type, dtypes.float64)\n tolerance = 1e-8\n\n input_tensor = constant_op.constant(\n input_data, shape=input_shape, dtype=data_type, name=\"input\")\n filter_tensor = constant_op.constant(\n filter_data, shape=filter_shape, dtype=data_type, name=\"filter\")\n\n native_input = input_tensor\n strides = [1, stride, stride, 1]\n if data_format == \"NCHW\":\n # Transpose from NWHC input to NCHW\n # Ex. [4, 5, 5, 48] to [4, 48, 5, 5]\n native_input = array_ops.transpose(input_tensor, [0, 3, 1, 2])\n input_shape = [\n input_shape[0], input_shape[3], input_shape[1], input_shape[2]\n ]\n output_shape = [\n output_shape[0], output_shape[3], output_shape[1], output_shape[2]\n ]\n strides = [1, 1, stride, stride]\n\n depthwise_conv2d = nn_ops.depthwise_conv2d_native(\n native_input,\n filter_tensor,\n strides,\n padding,\n data_format=data_format,\n name=\"depthwise_conv2d\")\n\n self.assertEqual(output_shape, depthwise_conv2d.get_shape())\n if test_input:\n err = gradient_checker.compute_gradient_error(\n native_input, input_shape, depthwise_conv2d, output_shape)\n else:\n err = gradient_checker.compute_gradient_error(filter_tensor,\n filter_shape,\n depthwise_conv2d,\n output_shape)\n print(\"data_type:\", data_type, \"use_gpu:\", use_gpu, \", error = \", err)\n self.assertLess(err, tolerance)\n\n def testDepthwiseConv2DInputGrad(self):\n for index, (input_size, filter_size, output_size, stride,\n padding) in enumerate(CheckGradConfigsToTest()):\n print(\"Testing DepthwiseConv2DInputGrad,\", index, \"th config:\",\n input_size, \"*\", filter_size, \"stride:\", stride, \"padding:\",\n padding)\n # Note: float16 test for DepthwiseConv2DInputGrad is not enabled,\n # calculations are not very precise.\n for data_type in [dtypes.float32, dtypes.float64]:\n self._ConstructAndTestGradient(\n input_size,\n filter_size,\n output_size,\n stride,\n padding,\n data_type,\n test_input=True,\n use_gpu=True)\n\n def testDepthwiseConv2DInputGradFormat(self):\n if not test.is_gpu_available():\n return\n\n for index, (input_size, filter_size, output_size, stride,\n padding) in enumerate(CheckGradConfigsToTest()):\n print(\"Testing DepthwiseConv2DInputGradFormat,\", index, \"th config:\",\n input_size, \"*\", filter_size, \"stride:\", stride, \"padding:\",\n padding)\n # Note: float16 test for DepthwiseConv2DInputGradFormat is not enabled,\n # calculations are not very precise.\n for data_type in [dtypes.float32, dtypes.float64]:\n self._ConstructAndTestGradient(\n input_size,\n filter_size,\n output_size,\n stride,\n padding,\n data_type,\n test_input=True,\n use_gpu=True,\n data_format=\"NCHW\")\n\n def testDepthwiseConv2DFilterGrad(self):\n for index, (input_size, filter_size, output_size, stride,\n padding) in enumerate(CheckGradConfigsToTest()):\n print(\"Testing DepthwiseConv2DFilterGrad,\", index, \"th config:\",\n input_size, \"*\", filter_size, \"stride:\", stride, \"padding:\",\n padding)\n # Note: float16 test for DepthwiseConv2DFilterGrad is not enabled,\n # calculations are not very precise.\n for data_type in [dtypes.float32, dtypes.float64]:\n self._ConstructAndTestGradient(\n input_size,\n filter_size,\n output_size,\n stride,\n padding,\n data_type,\n test_input=False,\n use_gpu=True)\n\n def testDepthwiseConv2DFilterGradFormat(self):\n if not test.is_gpu_available():\n return\n\n for index, (input_size, filter_size, output_size, stride,\n padding) in enumerate(CheckGradConfigsToTest()):\n print(\"Testing DepthwiseConv2DFilterGradFormat,\", index, \"th config:\",\n input_size, \"*\", filter_size, \"stride:\", stride, \"padding:\",\n padding)\n # Note: float16 test for DepthwiseConv2DFilterGradFormat is not enabled,\n # calculations are not very precise.\n for data_type in [dtypes.float32, dtypes.float64]:\n self._ConstructAndTestGradient(\n input_size,\n filter_size,\n output_size,\n stride,\n padding,\n data_type,\n test_input=False,\n use_gpu=True,\n data_format=\"NCHW\")\n\n def _CompareBackpropInputFloat(self, input_sizes, filter_sizes, output_sizes,\n stride, padding):\n x1 = np.random.rand(*filter_sizes).astype(np.float32)\n x2 = np.random.rand(*output_sizes).astype(np.float32)\n\n def _GetVal(use_gpu):\n with self.test_session(use_gpu=use_gpu):\n t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])\n t1 = constant_op.constant(x1, shape=filter_sizes)\n t2 = constant_op.constant(x2, shape=output_sizes)\n backprop = nn_ops.depthwise_conv2d_native_backprop_input(\n t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)\n ret = backprop.eval()\n self.assertShapeEqual(ret, backprop)\n return ret\n\n gpu_value = _GetVal(use_gpu=True)\n cpu_value = _GetVal(use_gpu=False)\n self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)\n\n def _CompareBackpropInputDouble(self, input_sizes, filter_sizes, output_sizes,\n stride, padding):\n x1 = np.random.rand(*filter_sizes).astype(np.float64)\n x2 = np.random.rand(*output_sizes).astype(np.float64)\n\n def _GetVal(use_gpu):\n with self.test_session(use_gpu=use_gpu):\n t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])\n t1 = constant_op.constant(x1, shape=filter_sizes)\n t2 = constant_op.constant(x2, shape=output_sizes)\n backprop = nn_ops.depthwise_conv2d_native_backprop_input(\n t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)\n ret = backprop.eval()\n self.assertShapeEqual(ret, backprop)\n return ret\n\n gpu_value = _GetVal(use_gpu=True)\n cpu_value = _GetVal(use_gpu=False)\n self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)\n\n def testDepthwiseConv2DInputGradCompare(self):\n for index, (input_size, filter_size, output_size, stride,\n padding) in enumerate(ConfigsToTest()):\n print(\"Testing DepthwiseConv2DInputGradCompare,\", index, \"th config:\",\n input_size, \"*\", filter_size, \"stride:\", stride, \"padding:\",\n padding)\n self._CompareBackpropInputFloat(input_size, filter_size, output_size,\n stride, padding)\n self._CompareBackpropInputDouble(input_size, filter_size, output_size,\n stride, padding)\n\n def _CompareBackpropFilterFloat(self, input_sizes, filter_sizes, output_sizes,\n stride, padding):\n x0 = np.random.rand(*input_sizes).astype(np.float32)\n x2 = np.random.rand(*output_sizes).astype(np.float32)\n\n def _GetVal(use_gpu):\n with self.test_session(use_gpu=use_gpu):\n t0 = constant_op.constant(x0, shape=input_sizes)\n t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])\n t2 = constant_op.constant(x2, shape=output_sizes)\n backprop = nn_ops.depthwise_conv2d_native_backprop_filter(\n t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)\n ret = backprop.eval()\n self.assertShapeEqual(ret, backprop)\n return ret\n\n gpu_value = _GetVal(use_gpu=True)\n cpu_value = _GetVal(use_gpu=False)\n self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)\n\n def _CompareBackpropFilterDouble(self, input_sizes, filter_sizes,\n output_sizes, stride, padding):\n x0 = np.random.rand(*input_sizes).astype(np.float64)\n x2 = np.random.rand(*output_sizes).astype(np.float64)\n\n def _GetVal(use_gpu):\n with self.test_session(use_gpu=use_gpu):\n t0 = constant_op.constant(x0, shape=input_sizes)\n t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])\n t2 = constant_op.constant(x2, shape=output_sizes)\n backprop = nn_ops.depthwise_conv2d_native_backprop_filter(\n t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)\n ret = backprop.eval()\n self.assertShapeEqual(ret, backprop)\n return ret\n\n gpu_value = _GetVal(use_gpu=True)\n cpu_value = _GetVal(use_gpu=False)\n self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)\n\n def testDepthwiseConv2DFilterGradCompare(self):\n for index, (input_size, filter_size, output_size, stride,\n padding) in enumerate(ConfigsToTest()):\n print(\"Testing DepthwiseConv2DFilterGradCompare,\", index, \"th config:\",\n input_size, \"*\", filter_size, \"stride:\", stride, \"padding:\",\n padding)\n self._CompareBackpropFilterFloat(input_size, filter_size, output_size,\n stride, padding)\n self._CompareBackpropFilterDouble(input_size, filter_size, output_size,\n stride, padding)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport os\n\nimport six\n\nfrom tensorflow.contrib.eager.python import checkpointable\nfrom tensorflow.contrib.eager.python import network as network_lib\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.layers import base\nfrom tensorflow.python.layers import core\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.training import adam\nfrom tensorflow.python.training import saver as core_saver\nfrom tensorflow.python.training import training_util\n\n\nclass CheckpointableDenseLayer(core.Dense, checkpointable.Checkpointable):\n\n def __init__(self, *args, **kwargs):\n checkpointable.Checkpointable.__init__(self)\n core.Dense.__init__(self, *args, **kwargs)\n\n def add_variable(self, name, shape, **kwargs):\n # Calls both Checkpointable.add_variable and Layer.add_variable. Eventually\n # Layer.add_variable should inherit from Checkpointable and simply call\n # super and then do post-processing.\n return checkpointable.Checkpointable.add_variable(\n self,\n name=name,\n shape=shape,\n getter=functools.partial(core.Dense.add_variable, self),\n **kwargs)\n\n\n# pylint: disable=not-callable\nclass CheckpointableNetwork(network_lib.Network, checkpointable.Checkpointable):\n\n def __init__(self):\n network_lib.Network.__init__(self)\n checkpointable.Checkpointable.__init__(self)\n\n def __setattr__(self, name, value):\n if isinstance(value, base.Layer) and value not in self._already_tracked:\n self.track_layer(value, name=name)\n # Checkpointable is next in the method resolution order, so this will catch\n # Checkpointable objects which aren't Layers.\n super(CheckpointableNetwork, self).__setattr__(name, value)\n\n def track_layer(self, layer, name):\n self.track_checkpointable(layer, name=name)\n return super(CheckpointableNetwork, self).track_layer(layer)\n\n\nclass CheckpointableAdam(adam.AdamOptimizer, checkpointable.Checkpointable):\n\n def __init__(self, *args, **kwargs):\n checkpointable.Checkpointable.__init__(self)\n adam.AdamOptimizer.__init__(self, *args, **kwargs)\n\n # NOTE: Copied from Optimizer with modifications to use add_variable\n # for non-slot variables. These contortions are necessary to maintain\n # checkpoint compatibility with variable.name based saving.\n # TODO(allenl): Make this cleaner.\n def _create_non_slot_variable(self, initial_value, name, colocate_with):\n \"\"\"Add an extra variable, not associated with a slot.\"\"\"\n if context.in_graph_mode():\n graph = colocate_with.graph\n else:\n graph = None\n\n key = (name, graph)\n v = self._non_slot_dict.get(key, None)\n if v is None:\n with ops.colocate_with(colocate_with):\n def _variable_getter(name, shape, dtype, initializer):\n del shape, dtype # not used, but there for compatibility\n return variable_scope.variable(\n name=name, initial_value=initializer, trainable=False)\n\n initial_value = ops.convert_to_tensor(initial_value)\n v = self.add_variable(\n name=name,\n shape=initial_value.get_shape(),\n initializer=initial_value,\n getter=_variable_getter)\n\n self._non_slot_dict[key] = v\n\n return v\n\n\nclass NonLayerCheckpointable(checkpointable.Checkpointable):\n\n def __init__(self):\n super(NonLayerCheckpointable, self).__init__()\n self.a_variable = self.add_variable(name=\"a_variable\", shape=[])\n\n\nclass MyNetwork(CheckpointableNetwork):\n \"\"\"A concrete Network for testing.\"\"\"\n\n def __init__(self):\n super(MyNetwork, self).__init__()\n self._named_dense = CheckpointableDenseLayer(1, use_bias=True)\n self._via_track_layer = self.track_layer(\n CheckpointableDenseLayer(1, use_bias=False), name=\"via_track_layer\")\n # We can still track Checkpointables which aren't Layers.\n self._non_layer = NonLayerCheckpointable()\n\n def call(self, values):\n return self._via_track_layer(self._named_dense(values))\n\n\nclass Root(checkpointable.Checkpointable):\n \"\"\"A stand-in for a Trainer class.\"\"\"\n\n def __init__(self, optimizer, network):\n super(Root, self).__init__()\n self._optimizer = optimizer\n self._network = self.track_checkpointable(network, \"network\")\n self._global_step = None\n\n @property\n def global_step(self):\n if self._global_step is None:\n # Get the default create_global_step utility to actually call\n # self.add_variable, by setting a custom creator.\n def _owned_variable_as_creator(\n next_creator, initial_value, **kwargs):\n def _creator_as_getter(initializer, **kwargs):\n return next_creator(initial_value=initializer, **kwargs)\n return self.add_variable(\n getter=_creator_as_getter, initializer=initial_value, shape=[],\n **kwargs)\n\n with variable_scope.variable_creator_scope(\n _owned_variable_as_creator):\n self._global_step = training_util.create_global_step()\n return self._global_step\n\n\nclass InterfaceTests(test.TestCase):\n\n @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)\n def testAddVariable(self):\n obj = NonLayerCheckpointable()\n with self.assertRaisesRegexp(ValueError, \"do not specify shape\"):\n obj.add_variable(\n name=\"shape_specified_twice\", shape=[], initializer=1)\n constant_initializer = obj.add_variable(\n name=\"constant_initializer\", initializer=1)\n with variable_scope.variable_scope(\"some_variable_scope\"):\n ones_initializer = obj.add_variable(\n name=\"ones_initializer\",\n shape=[2],\n initializer=init_ops.ones_initializer(dtype=dtypes.float32))\n bare_initializer = obj.add_variable(\n name=\"bare_initializer\",\n shape=[2, 2],\n dtype=dtypes.float64,\n initializer=init_ops.zeros_initializer)\n\n # Even in graph mode, there are no naming conflicts between objects, only\n # naming conflicts within an object.\n other_duplicate = resource_variable_ops.ResourceVariable(\n name=\"duplicate\", initial_value=1.)\n duplicate = obj.add_variable(name=\"duplicate\", shape=[])\n with self.assertRaisesRegexp(ValueError, \"'duplicate' already exists\"):\n obj.add_variable(name=\"duplicate\", shape=[])\n\n if context.in_graph_mode():\n self.evaluate(variables.global_variables_initializer())\n self.assertEqual(\"constant_initializer:0\", constant_initializer.name)\n self.assertEqual(1, self.evaluate(constant_initializer))\n self.assertEqual(\"some_variable_scope/ones_initializer:0\",\n ones_initializer.name)\n self.assertAllEqual([1, 1], self.evaluate(ones_initializer))\n self.assertAllEqual([[0., 0.],\n [0., 0.]], self.evaluate(bare_initializer))\n self.assertEqual(\"a_variable:0\", obj.a_variable.name)\n self.assertEqual(\"duplicate:0\", other_duplicate.name)\n if context.in_graph_mode():\n # The .name attribute may be globally influenced, but the checkpoint name\n # won't be (tested below).\n self.assertEqual(\"duplicate_1:0\", duplicate.name)\n else:\n # When executing eagerly, there's no uniquification of variable names. The\n # checkpoint name will be the same.\n self.assertEqual(\"duplicate:0\", duplicate.name)\n named_variables, _ = checkpointable._serialize_object_graph(obj)\n expected_checkpoint_names = (\n \"a_variable\",\n \"bare_initializer\",\n \"constant_initializer\",\n \"duplicate\",\n \"ones_initializer\",\n )\n six.assertCountEqual(\n self, expected_checkpoint_names, named_variables.keys())\n\n def testInitNotCalled(self):\n\n class NoInit(checkpointable.Checkpointable):\n\n def __init__(self):\n pass\n\n with self.assertRaisesRegexp(RuntimeError, \"__init__\"):\n NoInit().add_variable(\"var\", shape=[])\n\n\nclass CheckpointingTests(test.TestCase):\n\n @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)\n def testNamingWithOptimizer(self):\n input_value = constant_op.constant([[3.]])\n network = MyNetwork()\n # A nuisance Network using the same optimizer. Its slot variables should not\n # go in the checkpoint, since it is never depended on.\n other_network = MyNetwork()\n optimizer = CheckpointableAdam(0.001)\n root_checkpointable = Root(optimizer=optimizer, network=network)\n if context.in_eager_mode():\n optimizer.minimize(\n lambda: network(input_value),\n global_step=root_checkpointable.global_step)\n optimizer.minimize(\n lambda: other_network(input_value),\n global_step=root_checkpointable.global_step)\n else:\n train_op = optimizer.minimize(\n network(input_value), global_step=root_checkpointable.global_step)\n optimizer.minimize(\n other_network(input_value),\n global_step=root_checkpointable.global_step)\n self.evaluate(variables.global_variables_initializer())\n self.evaluate(train_op)\n named_variables, serialized_graph = checkpointable._serialize_object_graph(\n root_checkpointable)\n expected_checkpoint_names = (\n # Created in the root node, so no prefix.\n \"global_step\",\n # No name provided to track_checkpointable(), so the position is used\n # instead (one-based).\n \"network/via_track_layer/kernel\",\n # track_checkpointable() with a name provided, so that's used\n \"network/_named_dense/kernel\",\n \"network/_named_dense/bias\",\n # non-Layer dependency of the network\n \"network/_non_layer/a_variable\",\n # The optimizer creates two non-slot variables\n \"_optimizer/beta1_power\",\n \"_optimizer/beta2_power\",\n # Slot variables\n \"network/via_track_layer/kernel/-OPTIMIZER_SLOT/_optimizer/m\",\n \"network/via_track_layer/kernel/-OPTIMIZER_SLOT/_optimizer/v\",\n \"network/_named_dense/kernel/-OPTIMIZER_SLOT/_optimizer/m\",\n \"network/_named_dense/kernel/-OPTIMIZER_SLOT/_optimizer/v\",\n \"network/_named_dense/bias/-OPTIMIZER_SLOT/_optimizer/m\",\n \"network/_named_dense/bias/-OPTIMIZER_SLOT/_optimizer/v\",\n )\n six.assertCountEqual(self, expected_checkpoint_names,\n named_variables.keys())\n # Check that we've mapped to the right variable objects (not exhaustive)\n self.assertEqual(\"global_step:0\", named_variables[\"global_step\"].name)\n self.assertEqual(\"my_network/checkpointable_dense_layer_1/kernel:0\",\n named_variables[\"network/via_track_layer/kernel\"].name)\n self.assertEqual(\"my_network/checkpointable_dense_layer/kernel:0\",\n named_variables[\"network/_named_dense/kernel\"].name)\n self.assertEqual(\"beta1_power:0\",\n named_variables[\"_optimizer/beta1_power\"].name)\n self.assertEqual(\"beta2_power:0\",\n named_variables[\"_optimizer/beta2_power\"].name)\n # Spot check the generated protocol buffers.\n self.assertEqual(\"_optimizer\",\n serialized_graph.nodes[0].children[0].local_name)\n optimizer_node = serialized_graph.nodes[serialized_graph.nodes[0].children[\n 0].node_id]\n self.assertEqual(\"beta1_power\", optimizer_node.variables[0].local_name)\n self.assertEqual(\"beta1_power\", optimizer_node.variables[0].full_name)\n # Variable ordering is arbitrary but deterministic (alphabetized)\n self.assertEqual(\n \"bias\", optimizer_node.slot_variables[0].original_variable_local_name)\n original_variable_owner = serialized_graph.nodes[\n optimizer_node.slot_variables[0].original_variable_node_id]\n self.assertEqual(\"network/_named_dense/bias\",\n original_variable_owner.variables[0].checkpoint_key)\n self.assertEqual(\"bias\", original_variable_owner.variables[0].local_name)\n self.assertEqual(\"m\", optimizer_node.slot_variables[0].slot_name)\n self.assertEqual(\"network/_named_dense/bias/-OPTIMIZER_SLOT/_optimizer/m\",\n optimizer_node.slot_variables[0].checkpoint_key)\n # We strip off the :0 suffix, as variable.name-based saving does.\n self.assertEqual(\"my_network/checkpointable_dense_layer/bias/Adam\",\n optimizer_node.slot_variables[0].full_name)\n self.assertEqual(\"my_network/checkpointable_dense_layer/bias/Adam:0\",\n optimizer.get_slot(\n var=named_variables[\"network/_named_dense/bias\"],\n name=\"m\").name)\n\n @test_util.run_in_graph_and_eager_modes()\n def testSaveRestore(self):\n network = MyNetwork()\n optimizer = CheckpointableAdam(0.001)\n root_checkpointable = Root(optimizer=optimizer, network=network)\n input_value = constant_op.constant([[3.]])\n if context.in_eager_mode():\n optimizer.minimize(\n lambda: network(input_value),\n global_step=root_checkpointable.global_step)\n else:\n train_op = optimizer.minimize(\n network(input_value), global_step=root_checkpointable.global_step)\n self.evaluate(variables.global_variables_initializer())\n self.evaluate(train_op)\n prefix = os.path.join(self.get_temp_dir(), \"ckpt\")\n self.evaluate(state_ops.assign(network._named_dense.variables[1], [42.]))\n m_bias_slot = optimizer.get_slot(network._named_dense.variables[1], \"m\")\n self.evaluate(state_ops.assign(m_bias_slot, [1.5]))\n serialized_graph, save_path = checkpointable.save(\n file_prefix=prefix,\n root_checkpointable=root_checkpointable,\n global_step=root_checkpointable.global_step)\n self.evaluate(state_ops.assign(network._named_dense.variables[1], [43.]))\n self.evaluate(state_ops.assign(root_checkpointable.global_step, 3))\n optimizer_variables = self.evaluate(optimizer.variables())\n self.evaluate(state_ops.assign(m_bias_slot, [-2.]))\n # Immediate restoration\n checkpointable.restore(\n save_path=save_path,\n root_checkpointable=root_checkpointable,\n object_graph_proto=serialized_graph)\n self.assertAllEqual([42.], self.evaluate(network._named_dense.variables[1]))\n self.assertAllEqual(1, self.evaluate(root_checkpointable.global_step))\n self.assertAllEqual([1.5], self.evaluate(m_bias_slot))\n with ops.Graph().as_default():\n on_create_network = MyNetwork()\n on_create_optimizer = CheckpointableAdam(0.001)\n on_create_root = Root(\n optimizer=on_create_optimizer, network=on_create_network)\n with self.test_session(graph=ops.get_default_graph()):\n # Deferred restoration\n checkpointable.restore(\n save_path=save_path,\n root_checkpointable=on_create_root,\n object_graph_proto=serialized_graph)\n on_create_network(constant_op.constant([[3.]])) # create variables\n self.assertAllEqual(1, self.evaluate(on_create_root.global_step))\n self.assertAllEqual([42.],\n self.evaluate(\n on_create_network._named_dense.variables[1]))\n on_create_m_bias_slot = on_create_optimizer.get_slot(\n on_create_network._named_dense.variables[1], \"m\")\n # Optimizer slot variables are created when the original variable is\n # restored.\n self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))\n # beta1_power and beta2_power haven't been created yet, but everything\n # else matches.\n self.assertAllEqual(optimizer_variables[2:],\n self.evaluate(on_create_optimizer.variables()))\n on_create_optimizer._create_slots(\n [resource_variable_ops.ResourceVariable([1.])])\n beta1_power, beta2_power = on_create_optimizer._get_beta_accumulators()\n self.assertAllEqual(optimizer_variables[0], self.evaluate(beta1_power))\n self.assertAllEqual(optimizer_variables[1], self.evaluate(beta2_power))\n\n def testDeferredRestorationUsageEager(self):\n \"\"\"An idiomatic eager execution example.\"\"\"\n num_training_steps = 10\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n latest_object_graph = None # Will be saved with the checkpoint eventually.\n for training_continuation in range(3):\n with ops.Graph().as_default():\n network = MyNetwork()\n optimizer = CheckpointableAdam(0.001)\n root = Root(optimizer=optimizer, network=network)\n checkpointable.restore(\n save_path=core_saver.latest_checkpoint(checkpoint_directory),\n root_checkpointable=root,\n object_graph_proto=latest_object_graph)\n for _ in range(num_training_steps):\n # TODO(allenl): Use a Dataset and serialize/checkpoint it.\n input_value = constant_op.constant([[3.]])\n optimizer.minimize(\n lambda: network(input_value), # pylint: disable=cell-var-from-loop\n global_step=root.global_step)\n latest_object_graph, _ = checkpointable.save(\n file_prefix=checkpoint_prefix,\n root_checkpointable=root)\n self.assertEqual((training_continuation + 1) * num_training_steps,\n root.global_step.numpy())\n\n def testUsageGraph(self):\n \"\"\"Expected usage when graph building.\"\"\"\n with context.graph_mode():\n num_training_steps = 10\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n latest_object_graph = None\n for training_continuation in range(3):\n with ops.Graph().as_default():\n network = MyNetwork()\n optimizer = CheckpointableAdam(0.001)\n root = Root(optimizer=optimizer, network=network)\n input_value = constant_op.constant([[3.]])\n train_op = optimizer.minimize(\n network(input_value),\n global_step=root.global_step)\n init_op = variables.global_variables_initializer()\n checkpoint_path = core_saver.latest_checkpoint(checkpoint_directory)\n with self.test_session(graph=ops.get_default_graph()) as session:\n if checkpoint_path is None:\n self.assertEqual(0, training_continuation)\n session.run(init_op)\n # Another alternative would be to run initializers automatically\n # if no checkpoint is being loaded. This would make deferred\n # loading a bit more useful with graph execution.\n else:\n checkpointable.restore(\n save_path=checkpoint_path,\n root_checkpointable=root,\n object_graph_proto=latest_object_graph,\n session=session)\n for _ in range(num_training_steps):\n session.run(train_op)\n latest_object_graph, _ = checkpointable.save(\n file_prefix=checkpoint_prefix,\n root_checkpointable=root,\n session=session)\n self.assertEqual((training_continuation + 1) * num_training_steps,\n session.run(root.global_step))\n\n def _get_checkpoint_name(self, name):\n root = checkpointable.Checkpointable()\n root.add_variable(name=name, shape=[1, 2], dtype=dtypes.float64)\n named_variables, _ = checkpointable._serialize_object_graph(root)\n checkpoint_name, = named_variables.keys()\n with ops.name_scope(\"root/\" + checkpoint_name):\n pass # Make sure we can use this as an op name if we prefix it.\n return checkpoint_name\n\n @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)\n def testVariableNameEscaping(self):\n self.assertEqual(r\"a_S__b_S__c\", self._get_checkpoint_name(r\"a/b/c\"))\n self.assertEqual(r\"b\", self._get_checkpoint_name(r\"b\"))\n self.assertEqual(r\"c_S__\", self._get_checkpoint_name(r\"c/\"))\n self.assertEqual(r\"d_S___S_._\", self._get_checkpoint_name(r\"d/_S__\"))\n\n @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)\n def testNumberedPath(self):\n root = checkpointable.Checkpointable()\n leaf = checkpointable.Checkpointable()\n root.track_checkpointable(leaf, name=\"leaf\")\n leaf.add_variable(name=\"v\", shape=[])\n named_variables, _ = checkpointable._serialize_object_graph(root)\n variable_name, = named_variables.keys()\n self.assertEqual(r\"leaf/v\", variable_name)\n\n @test_util.run_in_graph_and_eager_modes()\n def testLocalNameValidation(self):\n root = checkpointable.Checkpointable()\n leaf = checkpointable.Checkpointable()\n with self.assertRaisesRegexp(ValueError, \"invalid name\"):\n # Leading dashes are reserved, which avoids conflicts with un-named edges\n # in paths and the optimizer slots identifier.\n root.track_checkpointable(leaf, name=\"-unnamed-12\")\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.distributions import dirichlet_multinomial\nfrom tensorflow.python.platform import test\n\n\nds = dirichlet_multinomial\n\n\nclass DirichletMultinomialTest(test.TestCase):\n\n def setUp(self):\n self._rng = np.random.RandomState(42)\n\n def testSimpleShapes(self):\n with self.test_session():\n alpha = np.random.rand(3)\n dist = ds.DirichletMultinomial(1., alpha)\n self.assertEqual(3, dist.event_shape_tensor().eval())\n self.assertAllEqual([], dist.batch_shape_tensor().eval())\n self.assertEqual(tensor_shape.TensorShape([3]), dist.event_shape)\n self.assertEqual(tensor_shape.TensorShape([]), dist.batch_shape)\n\n def testComplexShapes(self):\n with self.test_session():\n alpha = np.random.rand(3, 2, 2)\n n = [[3., 2], [4, 5], [6, 7]]\n dist = ds.DirichletMultinomial(n, alpha)\n self.assertEqual(2, dist.event_shape_tensor().eval())\n self.assertAllEqual([3, 2], dist.batch_shape_tensor().eval())\n self.assertEqual(tensor_shape.TensorShape([2]), dist.event_shape)\n self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.batch_shape)\n\n def testNproperty(self):\n alpha = [[1., 2, 3]]\n n = [[5.]]\n with self.test_session():\n dist = ds.DirichletMultinomial(n, alpha)\n self.assertEqual([1, 1], dist.total_count.get_shape())\n self.assertAllClose(n, dist.total_count.eval())\n\n def testAlphaProperty(self):\n alpha = [[1., 2, 3]]\n with self.test_session():\n dist = ds.DirichletMultinomial(1, alpha)\n self.assertEqual([1, 3], dist.concentration.get_shape())\n self.assertAllClose(alpha, dist.concentration.eval())\n\n def testPmfNandCountsAgree(self):\n alpha = [[1., 2, 3]]\n n = [[5.]]\n with self.test_session():\n dist = ds.DirichletMultinomial(n, alpha, validate_args=True)\n dist.prob([2., 3, 0]).eval()\n dist.prob([3., 0, 2]).eval()\n with self.assertRaisesOpError(\"must be non-negative\"):\n dist.prob([-1., 4, 2]).eval()\n with self.assertRaisesOpError(\n \"last-dimension must sum to `self.total_count`\"):\n dist.prob([3., 3, 0]).eval()\n\n def testPmfNonIntegerCounts(self):\n alpha = [[1., 2, 3]]\n n = [[5.]]\n with self.test_session():\n dist = ds.DirichletMultinomial(n, alpha, validate_args=True)\n dist.prob([2., 3, 0]).eval()\n dist.prob([3., 0, 2]).eval()\n dist.prob([3.0, 0, 2.0]).eval()\n # Both equality and integer checking fail.\n placeholder = array_ops.placeholder(dtypes.float32)\n with self.assertRaisesOpError(\n \"cannot contain fractional components\"):\n dist.prob(placeholder).eval(feed_dict={placeholder: [1.0, 2.5, 1.5]})\n dist = ds.DirichletMultinomial(n, alpha, validate_args=False)\n dist.prob([1., 2., 3.]).eval()\n # Non-integer arguments work.\n dist.prob([1.0, 2.5, 1.5]).eval()\n\n def testPmfBothZeroBatches(self):\n # The probabilities of one vote falling into class k is the mean for class\n # k.\n with self.test_session():\n # Both zero-batches. No broadcast\n alpha = [1., 2]\n counts = [1., 0]\n dist = ds.DirichletMultinomial(1., alpha)\n pmf = dist.prob(counts)\n self.assertAllClose(1 / 3., pmf.eval())\n self.assertEqual((), pmf.get_shape())\n\n def testPmfBothZeroBatchesNontrivialN(self):\n # The probabilities of one vote falling into class k is the mean for class\n # k.\n with self.test_session():\n # Both zero-batches. No broadcast\n alpha = [1., 2]\n counts = [3., 2]\n dist = ds.DirichletMultinomial(5., alpha)\n pmf = dist.prob(counts)\n self.assertAllClose(1 / 7., pmf.eval())\n self.assertEqual((), pmf.get_shape())\n\n def testPmfBothZeroBatchesMultidimensionalN(self):\n # The probabilities of one vote falling into class k is the mean for class\n # k.\n with self.test_session():\n alpha = [1., 2]\n counts = [3., 2]\n n = np.full([4, 3], 5., dtype=np.float32)\n dist = ds.DirichletMultinomial(n, alpha)\n pmf = dist.prob(counts)\n self.assertAllClose([[1 / 7., 1 / 7., 1 / 7.]] * 4, pmf.eval())\n self.assertEqual((4, 3), pmf.get_shape())\n\n def testPmfAlphaStretchedInBroadcastWhenSameRank(self):\n # The probabilities of one vote falling into class k is the mean for class\n # k.\n with self.test_session():\n alpha = [[1., 2]]\n counts = [[1., 0], [0., 1]]\n dist = ds.DirichletMultinomial([1.], alpha)\n pmf = dist.prob(counts)\n self.assertAllClose([1 / 3., 2 / 3.], pmf.eval())\n self.assertAllEqual([2], pmf.get_shape())\n\n def testPmfAlphaStretchedInBroadcastWhenLowerRank(self):\n # The probabilities of one vote falling into class k is the mean for class\n # k.\n with self.test_session():\n alpha = [1., 2]\n counts = [[1., 0], [0., 1]]\n pmf = ds.DirichletMultinomial(1., alpha).prob(counts)\n self.assertAllClose([1 / 3., 2 / 3.], pmf.eval())\n self.assertAllEqual([2], pmf.get_shape())\n\n def testPmfCountsStretchedInBroadcastWhenSameRank(self):\n # The probabilities of one vote falling into class k is the mean for class\n # k.\n with self.test_session():\n alpha = [[1., 2], [2., 3]]\n counts = [[1., 0]]\n pmf = ds.DirichletMultinomial([1., 1.], alpha).prob(counts)\n self.assertAllClose([1 / 3., 2 / 5.], pmf.eval())\n self.assertAllEqual([2], pmf.get_shape())\n\n def testPmfCountsStretchedInBroadcastWhenLowerRank(self):\n # The probabilities of one vote falling into class k is the mean for class\n # k.\n with self.test_session():\n alpha = [[1., 2], [2., 3]]\n counts = [1., 0]\n pmf = ds.DirichletMultinomial(1., alpha).prob(counts)\n self.assertAllClose([1 / 3., 2 / 5.], pmf.eval())\n self.assertAllEqual([2], pmf.get_shape())\n\n def testPmfForOneVoteIsTheMeanWithOneRecordInput(self):\n # The probabilities of one vote falling into class k is the mean for class\n # k.\n alpha = [1., 2, 3]\n with self.test_session():\n for class_num in range(3):\n counts = np.zeros([3], dtype=np.float32)\n counts[class_num] = 1\n dist = ds.DirichletMultinomial(1., alpha)\n mean = dist.mean().eval()\n pmf = dist.prob(counts).eval()\n\n self.assertAllClose(mean[class_num], pmf)\n self.assertAllEqual([3], mean.shape)\n self.assertAllEqual([], pmf.shape)\n\n def testMeanDoubleTwoVotes(self):\n # The probabilities of two votes falling into class k for\n # DirichletMultinomial(2, alpha) is twice as much as the probability of one\n # vote falling into class k for DirichletMultinomial(1, alpha)\n alpha = [1., 2, 3]\n with self.test_session():\n for class_num in range(3):\n counts_one = np.zeros([3], dtype=np.float32)\n counts_one[class_num] = 1.\n counts_two = np.zeros([3], dtype=np.float32)\n counts_two[class_num] = 2\n\n dist1 = ds.DirichletMultinomial(1., alpha)\n dist2 = ds.DirichletMultinomial(2., alpha)\n\n mean1 = dist1.mean().eval()\n mean2 = dist2.mean().eval()\n\n self.assertAllClose(mean2[class_num], 2 * mean1[class_num])\n self.assertAllEqual([3], mean1.shape)\n\n def testCovarianceFromSampling(self):\n # We will test mean, cov, var, stddev on a DirichletMultinomial constructed\n # via broadcast between alpha, n.\n alpha = np.array([[1., 2, 3],\n [2.5, 4, 0.01]], dtype=np.float32)\n # Ideally we'd be able to test broadcasting but, the multinomial sampler\n # doesn't support different total counts.\n n = np.float32(5)\n with self.test_session() as sess:\n # batch_shape=[2], event_shape=[3]\n dist = ds.DirichletMultinomial(n, alpha)\n x = dist.sample(int(250e3), seed=1)\n sample_mean = math_ops.reduce_mean(x, 0)\n x_centered = x - sample_mean[array_ops.newaxis, ...]\n sample_cov = math_ops.reduce_mean(math_ops.matmul(\n x_centered[..., array_ops.newaxis],\n x_centered[..., array_ops.newaxis, :]), 0)\n sample_var = array_ops.matrix_diag_part(sample_cov)\n sample_stddev = math_ops.sqrt(sample_var)\n [\n sample_mean_,\n sample_cov_,\n sample_var_,\n sample_stddev_,\n analytic_mean,\n analytic_cov,\n analytic_var,\n analytic_stddev,\n ] = sess.run([\n sample_mean,\n sample_cov,\n sample_var,\n sample_stddev,\n dist.mean(),\n dist.covariance(),\n dist.variance(),\n dist.stddev(),\n ])\n self.assertAllClose(sample_mean_, analytic_mean, atol=0., rtol=0.04)\n self.assertAllClose(sample_cov_, analytic_cov, atol=0., rtol=0.05)\n self.assertAllClose(sample_var_, analytic_var, atol=0., rtol=0.05)\n self.assertAllClose(sample_stddev_, analytic_stddev, atol=0., rtol=0.02)\n\n def testCovariance(self):\n # Shape [2]\n alpha = [1., 2]\n ns = [2., 3., 4., 5.]\n alpha_0 = np.sum(alpha)\n\n # Diagonal entries are of the form:\n # Var(X_i) = n * alpha_i / alpha_sum * (1 - alpha_i / alpha_sum) *\n # (alpha_sum + n) / (alpha_sum + 1)\n variance_entry = lambda a, a_sum: a / a_sum * (1 - a / a_sum)\n # Off diagonal entries are of the form:\n # Cov(X_i, X_j) = -n * alpha_i * alpha_j / (alpha_sum ** 2) *\n # (alpha_sum + n) / (alpha_sum + 1)\n covariance_entry = lambda a, b, a_sum: -a * b / a_sum**2\n # Shape [2, 2].\n shared_matrix = np.array([[\n variance_entry(alpha[0], alpha_0),\n covariance_entry(alpha[0], alpha[1], alpha_0)\n ], [\n covariance_entry(alpha[1], alpha[0], alpha_0),\n variance_entry(alpha[1], alpha_0)\n ]])\n\n with self.test_session():\n for n in ns:\n # n is shape [] and alpha is shape [2].\n dist = ds.DirichletMultinomial(n, alpha)\n covariance = dist.covariance()\n expected_covariance = n * (n + alpha_0) / (1 + alpha_0) * shared_matrix\n\n self.assertEqual([2, 2], covariance.get_shape())\n self.assertAllClose(expected_covariance, covariance.eval())\n\n def testCovarianceNAlphaBroadcast(self):\n alpha_v = [1., 2, 3]\n alpha_0 = 6.\n\n # Shape [4, 3]\n alpha = np.array(4 * [alpha_v], dtype=np.float32)\n # Shape [4, 1]\n ns = np.array([[2.], [3.], [4.], [5.]], dtype=np.float32)\n\n variance_entry = lambda a, a_sum: a / a_sum * (1 - a / a_sum)\n covariance_entry = lambda a, b, a_sum: -a * b / a_sum**2\n # Shape [4, 3, 3]\n shared_matrix = np.array(\n 4 * [[[\n variance_entry(alpha_v[0], alpha_0),\n covariance_entry(alpha_v[0], alpha_v[1], alpha_0),\n covariance_entry(alpha_v[0], alpha_v[2], alpha_0)\n ], [\n covariance_entry(alpha_v[1], alpha_v[0], alpha_0),\n variance_entry(alpha_v[1], alpha_0),\n covariance_entry(alpha_v[1], alpha_v[2], alpha_0)\n ], [\n covariance_entry(alpha_v[2], alpha_v[0], alpha_0),\n covariance_entry(alpha_v[2], alpha_v[1], alpha_0),\n variance_entry(alpha_v[2], alpha_0)\n ]]],\n dtype=np.float32)\n\n with self.test_session():\n # ns is shape [4, 1], and alpha is shape [4, 3].\n dist = ds.DirichletMultinomial(ns, alpha)\n covariance = dist.covariance()\n expected_covariance = shared_matrix * (\n ns * (ns + alpha_0) / (1 + alpha_0))[..., array_ops.newaxis]\n\n self.assertEqual([4, 3, 3], covariance.get_shape())\n self.assertAllClose(expected_covariance, covariance.eval())\n\n def testCovarianceMultidimensional(self):\n alpha = np.random.rand(3, 5, 4).astype(np.float32)\n alpha2 = np.random.rand(6, 3, 3).astype(np.float32)\n\n ns = np.random.randint(low=1, high=11, size=[3, 5, 1]).astype(np.float32)\n ns2 = np.random.randint(low=1, high=11, size=[6, 1, 1]).astype(np.float32)\n\n with self.test_session():\n dist = ds.DirichletMultinomial(ns, alpha)\n dist2 = ds.DirichletMultinomial(ns2, alpha2)\n\n covariance = dist.covariance()\n covariance2 = dist2.covariance()\n self.assertEqual([3, 5, 4, 4], covariance.get_shape())\n self.assertEqual([6, 3, 3, 3], covariance2.get_shape())\n\n def testZeroCountsResultsInPmfEqualToOne(self):\n # There is only one way for zero items to be selected, and this happens with\n # probability 1.\n alpha = [5, 0.5]\n counts = [0., 0]\n with self.test_session():\n dist = ds.DirichletMultinomial(0., alpha)\n pmf = dist.prob(counts)\n self.assertAllClose(1.0, pmf.eval())\n self.assertEqual((), pmf.get_shape())\n\n def testLargeTauGivesPreciseProbabilities(self):\n # If tau is large, we are doing coin flips with probability mu.\n mu = np.array([0.1, 0.1, 0.8], dtype=np.float32)\n tau = np.array([100.], dtype=np.float32)\n alpha = tau * mu\n\n # One (three sided) coin flip. Prob[coin 3] = 0.8.\n # Note that since it was one flip, value of tau didn't matter.\n counts = [0., 0, 1]\n with self.test_session():\n dist = ds.DirichletMultinomial(1., alpha)\n pmf = dist.prob(counts)\n self.assertAllClose(0.8, pmf.eval(), atol=1e-4)\n self.assertEqual((), pmf.get_shape())\n\n # Two (three sided) coin flips. Prob[coin 3] = 0.8.\n counts = [0., 0, 2]\n with self.test_session():\n dist = ds.DirichletMultinomial(2., alpha)\n pmf = dist.prob(counts)\n self.assertAllClose(0.8**2, pmf.eval(), atol=1e-2)\n self.assertEqual((), pmf.get_shape())\n\n # Three (three sided) coin flips.\n counts = [1., 0, 2]\n with self.test_session():\n dist = ds.DirichletMultinomial(3., alpha)\n pmf = dist.prob(counts)\n self.assertAllClose(3 * 0.1 * 0.8 * 0.8, pmf.eval(), atol=1e-2)\n self.assertEqual((), pmf.get_shape())\n\n def testSmallTauPrefersCorrelatedResults(self):\n # If tau is small, then correlation between draws is large, so draws that\n # are both of the same class are more likely.\n mu = np.array([0.5, 0.5], dtype=np.float32)\n tau = np.array([0.1], dtype=np.float32)\n alpha = tau * mu\n\n # If there is only one draw, it is still a coin flip, even with small tau.\n counts = [1., 0]\n with self.test_session():\n dist = ds.DirichletMultinomial(1., alpha)\n pmf = dist.prob(counts)\n self.assertAllClose(0.5, pmf.eval())\n self.assertEqual((), pmf.get_shape())\n\n # If there are two draws, it is much more likely that they are the same.\n counts_same = [2., 0]\n counts_different = [1, 1.]\n with self.test_session():\n dist = ds.DirichletMultinomial(2., alpha)\n pmf_same = dist.prob(counts_same)\n pmf_different = dist.prob(counts_different)\n self.assertLess(5 * pmf_different.eval(), pmf_same.eval())\n self.assertEqual((), pmf_same.get_shape())\n\n def testNonStrictTurnsOffAllChecks(self):\n # Make totally invalid input.\n with self.test_session():\n alpha = [[-1., 2]] # alpha should be positive.\n counts = [[1., 0], [0., -1]] # counts should be non-negative.\n n = [-5.3] # n should be a non negative integer equal to counts.sum.\n dist = ds.DirichletMultinomial(n, alpha, validate_args=False)\n dist.prob(counts).eval() # Should not raise.\n\n def testSampleUnbiasedNonScalarBatch(self):\n with self.test_session() as sess:\n dist = ds.DirichletMultinomial(\n total_count=5.,\n concentration=1. + 2. * self._rng.rand(4, 3, 2).astype(np.float32))\n n = int(3e3)\n x = dist.sample(n, seed=0)\n sample_mean = math_ops.reduce_mean(x, 0)\n # Cyclically rotate event dims left.\n x_centered = array_ops.transpose(x - sample_mean, [1, 2, 3, 0])\n sample_covariance = math_ops.matmul(\n x_centered, x_centered, adjoint_b=True) / n\n [\n sample_mean_,\n sample_covariance_,\n actual_mean_,\n actual_covariance_,\n ] = sess.run([\n sample_mean,\n sample_covariance,\n dist.mean(),\n dist.covariance(),\n ])\n self.assertAllEqual([4, 3, 2], sample_mean.get_shape())\n self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.15)\n self.assertAllEqual([4, 3, 2, 2], sample_covariance.get_shape())\n self.assertAllClose(\n actual_covariance_, sample_covariance_, atol=0., rtol=0.20)\n\n def testSampleUnbiasedScalarBatch(self):\n with self.test_session() as sess:\n dist = ds.DirichletMultinomial(\n total_count=5.,\n concentration=1. + 2. * self._rng.rand(4).astype(np.float32))\n n = int(5e3)\n x = dist.sample(n, seed=0)\n sample_mean = math_ops.reduce_mean(x, 0)\n x_centered = x - sample_mean # Already transposed to [n, 2].\n sample_covariance = math_ops.matmul(\n x_centered, x_centered, adjoint_a=True) / n\n [\n sample_mean_,\n sample_covariance_,\n actual_mean_,\n actual_covariance_,\n ] = sess.run([\n sample_mean,\n sample_covariance,\n dist.mean(),\n dist.covariance(),\n ])\n self.assertAllEqual([4], sample_mean.get_shape())\n self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.05)\n self.assertAllEqual([4, 4], sample_covariance.get_shape())\n self.assertAllClose(\n actual_covariance_, sample_covariance_, atol=0., rtol=0.15)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for TFGAN summaries.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nfrom tensorflow.contrib.gan.python import namedtuples\nfrom tensorflow.contrib.gan.python.eval.python import summaries_impl as summaries\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.summary import summary\n\n\ndef generator_model(inputs):\n return variable_scope.get_variable('dummy_g', initializer=2.0) * inputs\n\n\ndef discriminator_model(inputs, _):\n return variable_scope.get_variable('dummy_d', initializer=2.0) * inputs\n\n\ndef get_gan_model():\n # TODO(joelshor): Find a better way of creating a variable scope.\n with variable_scope.variable_scope('generator') as gen_scope:\n pass\n with variable_scope.variable_scope('discriminator') as dis_scope:\n pass\n return namedtuples.GANModel(\n generator_inputs=array_ops.zeros([4, 32, 32, 3]),\n generated_data=array_ops.zeros([4, 32, 32, 3]),\n generator_variables=[variables.Variable(0), variables.Variable(1)],\n generator_scope=gen_scope,\n generator_fn=generator_model,\n real_data=array_ops.ones([4, 32, 32, 3]),\n discriminator_real_outputs=array_ops.ones([1, 2, 3]),\n discriminator_gen_outputs=array_ops.ones([1, 2, 3]),\n discriminator_variables=[variables.Variable(0)],\n discriminator_scope=dis_scope,\n discriminator_fn=discriminator_model)\n\n\ndef get_cyclegan_model():\n with variable_scope.variable_scope('x2y'):\n model_x2y = get_gan_model()\n with variable_scope.variable_scope('y2x'):\n model_y2x = get_gan_model()\n return namedtuples.CycleGANModel(\n model_x2y=model_x2y,\n model_y2x=model_y2x,\n reconstructed_x=array_ops.zeros([3, 30, 35, 6]),\n reconstructed_y=array_ops.zeros([3, 30, 35, 6]))\n\n\nclass SummariesTest(test.TestCase):\n\n def _test_add_gan_model_image_summaries_impl(self, get_model_fn,\n expected_num_summary_ops,\n model_summaries):\n summaries.add_gan_model_image_summaries(get_model_fn(), grid_size=2,\n model_summaries=model_summaries)\n\n self.assertEquals(expected_num_summary_ops,\n len(ops.get_collection(ops.GraphKeys.SUMMARIES)))\n with self.test_session(use_gpu=True):\n variables.global_variables_initializer().run()\n summary.merge_all().eval()\n\n def test_add_gan_model_image_summaries(self):\n self._test_add_gan_model_image_summaries_impl(get_gan_model, 5, True)\n\n def test_add_gan_model_image_summaries_no_model(self):\n self._test_add_gan_model_image_summaries_impl(get_gan_model, 2, False)\n\n def test_add_gan_model_image_summaries_for_cyclegan(self):\n self._test_add_gan_model_image_summaries_impl(get_cyclegan_model, 10,\n True)\n\n def _test_add_gan_model_summaries_impl(self, get_model_fn,\n expected_num_summary_ops):\n summaries.add_gan_model_summaries(get_model_fn())\n\n self.assertEquals(expected_num_summary_ops,\n len(ops.get_collection(ops.GraphKeys.SUMMARIES)))\n with self.test_session(use_gpu=True):\n variables.global_variables_initializer().run()\n summary.merge_all().eval()\n\n def test_add_gan_model_summaries(self):\n self._test_add_gan_model_summaries_impl(get_gan_model, 3)\n\n def test_add_gan_model_summaries_for_cyclegan(self):\n self._test_add_gan_model_summaries_impl(get_cyclegan_model, 6)\n\n def _test_add_regularization_loss_summaries_impl(self, get_model_fn,\n expected_num_summary_ops):\n summaries.add_regularization_loss_summaries(get_model_fn())\n\n self.assertEquals(expected_num_summary_ops,\n len(ops.get_collection(ops.GraphKeys.SUMMARIES)))\n with self.test_session(use_gpu=True):\n summary.merge_all().eval()\n\n def test_add_regularization_loss_summaries(self):\n self._test_add_regularization_loss_summaries_impl(get_gan_model, 2)\n\n def test_add_regularization_loss_summaries_for_cyclegan(self):\n self._test_add_regularization_loss_summaries_impl(get_cyclegan_model, 4)\n\n # TODO(joelshor): Add correctness test.\n def _test_add_image_comparison_summaries_impl(self, get_model_fn,\n expected_num_summary_ops):\n summaries.add_image_comparison_summaries(get_model_fn(), display_diffs=True)\n\n self.assertEquals(expected_num_summary_ops,\n len(ops.get_collection(ops.GraphKeys.SUMMARIES)))\n with self.test_session(use_gpu=True):\n summary.merge_all().eval()\n\n def test_add_image_comparison_summaries(self):\n self._test_add_image_comparison_summaries_impl(get_gan_model, 1)\n\n def test_add_image_comparison_summaries_for_cyclegan(self):\n self._test_add_image_comparison_summaries_impl(get_cyclegan_model, 2)\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Locally-connected layers.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.keras._impl.keras import activations\nfrom tensorflow.python.keras._impl.keras import backend as K\nfrom tensorflow.python.keras._impl.keras import constraints\nfrom tensorflow.python.keras._impl.keras import initializers\nfrom tensorflow.python.keras._impl.keras import regularizers\nfrom tensorflow.python.keras._impl.keras.engine import InputSpec\nfrom tensorflow.python.keras._impl.keras.engine import Layer\nfrom tensorflow.python.keras._impl.keras.engine.topology import shape_type_conversion\nfrom tensorflow.python.keras._impl.keras.utils import conv_utils\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export('keras.layers.LocallyConnected1D')\nclass LocallyConnected1D(Layer):\n \"\"\"Locally-connected layer for 1D inputs.\n\n The `LocallyConnected1D` layer works similarly to\n the `Conv1D` layer, except that weights are unshared,\n that is, a different set of filters is applied at each different patch\n of the input.\n\n Example:\n ```python\n # apply a unshared weight convolution 1d of length 3 to a sequence with\n # 10 timesteps, with 64 output filters\n model = Sequential()\n model.add(LocallyConnected1D(64, 3, input_shape=(10, 32)))\n # now model.output_shape == (None, 8, 64)\n # add a new conv1d on top\n model.add(LocallyConnected1D(32, 3))\n # now model.output_shape == (None, 6, 32)\n ```\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number output of filters in the convolution).\n kernel_size: An integer or tuple/list of a single integer,\n specifying the length of the 1D convolution window.\n strides: An integer or tuple/list of a single integer,\n specifying the stride length of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: Currently only supports `\"valid\"` (case-insensitive).\n `\"same\"` may be supported in the future.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\")..\n kernel_constraint: Constraint function applied to the kernel matrix.\n bias_constraint: Constraint function applied to the bias vector.\n\n Input shape:\n 3D tensor with shape: `(batch_size, steps, input_dim)`\n\n Output shape:\n 3D tensor with shape: `(batch_size, new_steps, filters)`\n `steps` value might have changed due to padding or strides.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=1,\n padding='valid',\n data_format=None,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(LocallyConnected1D, self).__init__(**kwargs)\n self.filters = filters\n self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size')\n self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')\n self.padding = conv_utils.normalize_padding(padding)\n if self.padding != 'valid':\n raise ValueError('Invalid border mode for LocallyConnected1D '\n '(only \"valid\" is supported): ' + padding)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.input_spec = InputSpec(ndim=3)\n\n @shape_type_conversion\n def build(self, input_shape):\n input_dim = input_shape[2]\n if input_dim is None:\n raise ValueError('Axis 2 of input should be fully-defined. '\n 'Found shape:', input_shape)\n output_length = conv_utils.conv_output_length(\n input_shape[1], self.kernel_size[0], self.padding, self.strides[0])\n self.kernel_shape = (output_length, self.kernel_size[0] * input_dim,\n self.filters)\n self.kernel = self.add_weight(\n shape=self.kernel_shape,\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.bias = self.add_weight(\n shape=(output_length, self.filters),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.input_spec = InputSpec(ndim=3, axes={2: input_dim})\n self.built = True\n\n @shape_type_conversion\n def compute_output_shape(self, input_shape):\n length = conv_utils.conv_output_length(input_shape[1], self.kernel_size[0],\n self.padding, self.strides[0])\n return (input_shape[0], length, self.filters)\n\n def call(self, inputs):\n output = K.local_conv1d(inputs, self.kernel, self.kernel_size, self.strides)\n if self.use_bias:\n output = K.bias_add(output, self.bias)\n if self.activation is not None:\n output = self.activation(output)\n return output\n\n def get_config(self):\n config = {\n 'filters':\n self.filters,\n 'kernel_size':\n self.kernel_size,\n 'strides':\n self.strides,\n 'padding':\n self.padding,\n 'activation':\n activations.serialize(self.activation),\n 'use_bias':\n self.use_bias,\n 'kernel_initializer':\n initializers.serialize(self.kernel_initializer),\n 'bias_initializer':\n initializers.serialize(self.bias_initializer),\n 'kernel_regularizer':\n regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer':\n regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint':\n constraints.serialize(self.kernel_constraint),\n 'bias_constraint':\n constraints.serialize(self.bias_constraint)\n }\n base_config = super(LocallyConnected1D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@tf_export('keras.layers.LocallyConnected2D')\nclass LocallyConnected2D(Layer):\n \"\"\"Locally-connected layer for 2D inputs.\n\n The `LocallyConnected2D` layer works similarly\n to the `Conv2D` layer, except that weights are unshared,\n that is, a different set of filters is applied at each\n different patch of the input.\n\n Examples:\n ```python\n # apply a 3x3 unshared weights convolution with 64 output filters on a\n 32x32 image\n # with `data_format=\"channels_last\"`:\n model = Sequential()\n model.add(LocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))\n # now model.output_shape == (None, 30, 30, 64)\n # notice that this layer will consume (30*30)*(3*3*3*64) + (30*30)*64\n parameters\n\n # add a 3x3 unshared weights convolution on top, with 32 output filters:\n model.add(LocallyConnected2D(32, (3, 3)))\n # now model.output_shape == (None, 28, 28, 32)\n ```\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number output of filters in the convolution).\n kernel_size: An integer or tuple/list of 2 integers, specifying the\n width and height of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the width and height.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n padding: Currently only support `\"valid\"` (case-insensitive).\n `\"same\"` will be supported in future.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n activation: Activation function to use.\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\")..\n kernel_constraint: Constraint function applied to the kernel matrix.\n bias_constraint: Constraint function applied to the bias vector.\n\n Input shape:\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(samples, filters, new_rows, new_cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, new_rows, new_cols, filters)` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to padding.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(LocallyConnected2D, self).__init__(**kwargs)\n self.filters = filters\n self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')\n self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')\n self.padding = conv_utils.normalize_padding(padding)\n if self.padding != 'valid':\n raise ValueError('Invalid border mode for LocallyConnected2D '\n '(only \"valid\" is supported): ' + padding)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.input_spec = InputSpec(ndim=4)\n\n @shape_type_conversion\n def build(self, input_shape):\n if self.data_format == 'channels_last':\n input_row, input_col = input_shape[1:-1]\n input_filter = input_shape[3]\n else:\n input_row, input_col = input_shape[2:]\n input_filter = input_shape[1]\n if input_row is None or input_col is None:\n raise ValueError('The spatial dimensions of the inputs to '\n ' a LocallyConnected2D layer '\n 'should be fully-defined, but layer received '\n 'the inputs shape ' + str(input_shape))\n output_row = conv_utils.conv_output_length(input_row, self.kernel_size[0],\n self.padding, self.strides[0])\n output_col = conv_utils.conv_output_length(input_col, self.kernel_size[1],\n self.padding, self.strides[1])\n self.output_row = output_row\n self.output_col = output_col\n self.kernel_shape = (\n output_row * output_col,\n self.kernel_size[0] * self.kernel_size[1] * input_filter, self.filters)\n self.kernel = self.add_weight(\n shape=self.kernel_shape,\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.bias = self.add_weight(\n shape=(output_row, output_col, self.filters),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n if self.data_format == 'channels_first':\n self.input_spec = InputSpec(ndim=4, axes={1: input_filter})\n else:\n self.input_spec = InputSpec(ndim=4, axes={-1: input_filter})\n self.built = True\n\n @shape_type_conversion\n def compute_output_shape(self, input_shape):\n if self.data_format == 'channels_first':\n rows = input_shape[2]\n cols = input_shape[3]\n elif self.data_format == 'channels_last':\n rows = input_shape[1]\n cols = input_shape[2]\n\n rows = conv_utils.conv_output_length(rows, self.kernel_size[0],\n self.padding, self.strides[0])\n cols = conv_utils.conv_output_length(cols, self.kernel_size[1],\n self.padding, self.strides[1])\n\n if self.data_format == 'channels_first':\n return (input_shape[0], self.filters, rows, cols)\n elif self.data_format == 'channels_last':\n return (input_shape[0], rows, cols, self.filters)\n\n def call(self, inputs):\n output = K.local_conv2d(inputs, self.kernel, self.kernel_size, self.strides,\n (self.output_row, self.output_col),\n self.data_format)\n\n if self.use_bias:\n output = K.bias_add(output, self.bias, data_format=self.data_format)\n\n output = self.activation(output)\n return output\n\n def get_config(self):\n config = {\n 'filters':\n self.filters,\n 'kernel_size':\n self.kernel_size,\n 'strides':\n self.strides,\n 'padding':\n self.padding,\n 'data_format':\n self.data_format,\n 'activation':\n activations.serialize(self.activation),\n 'use_bias':\n self.use_bias,\n 'kernel_initializer':\n initializers.serialize(self.kernel_initializer),\n 'bias_initializer':\n initializers.serialize(self.bias_initializer),\n 'kernel_regularizer':\n regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer':\n regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint':\n constraints.serialize(self.kernel_constraint),\n 'bias_constraint':\n constraints.serialize(self.bias_constraint)\n }\n base_config = super(LocallyConnected2D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for linear_optimizer.sdca_estimator.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib\nfrom tensorflow.contrib.linear_optimizer.python import sdca_estimator\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.platform import test\n\n\nclass SDCALogisticClassifierTest(test.TestCase):\n\n def testRealValuedFeatures(self):\n \"\"\"Tests SDCALogisticClassifier works with real valued features.\"\"\"\n\n def input_fn():\n return {\n 'example_id': constant_op.constant(['1', '2']),\n 'maintenance_cost': constant_op.constant([500.0, 200.0]),\n 'sq_footage': constant_op.constant([[800.0], [600.0]]),\n 'weights': constant_op.constant([[1.0], [1.0]])\n }, constant_op.constant([[0], [1]])\n\n with self.test_session():\n maintenance_cost = feature_column_lib.real_valued_column(\n 'maintenance_cost')\n sq_footage = feature_column_lib.real_valued_column('sq_footage')\n classifier = sdca_estimator.SDCALogisticClassifier(\n example_id_column='example_id',\n feature_columns=[maintenance_cost, sq_footage],\n weight_column_name='weights')\n classifier.fit(input_fn=input_fn, steps=100)\n loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n self.assertLess(loss, 0.05)\n\n def testRealValuedFeatureWithHigherDimension(self):\n \"\"\"Tests SDCALogisticClassifier with high-dimension real valued features.\"\"\"\n\n # input_fn is identical to the one in testRealValuedFeatures where 2\n # 1-dimensional dense features are replaced by a 2-dimensional feature.\n def input_fn():\n return {\n 'example_id':\n constant_op.constant(['1', '2']),\n 'dense_feature':\n constant_op.constant([[500.0, 800.0], [200.0, 600.0]])\n }, constant_op.constant([[0], [1]])\n\n with self.test_session():\n dense_feature = feature_column_lib.real_valued_column(\n 'dense_feature', dimension=2)\n classifier = sdca_estimator.SDCALogisticClassifier(\n example_id_column='example_id', feature_columns=[dense_feature])\n classifier.fit(input_fn=input_fn, steps=100)\n loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n self.assertLess(loss, 0.05)\n\n def testBucketizedFeatures(self):\n \"\"\"Tests SDCALogisticClassifier with bucketized features.\"\"\"\n\n def input_fn():\n return {\n 'example_id': constant_op.constant(['1', '2', '3']),\n 'price': constant_op.constant([600.0, 1000.0, 400.0]),\n 'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]),\n 'weights': constant_op.constant([[1.0], [1.0], [1.0]])\n }, constant_op.constant([[1], [0], [1]])\n\n with self.test_session():\n price_bucket = feature_column_lib.bucketized_column(\n feature_column_lib.real_valued_column('price'),\n boundaries=[500.0, 700.0])\n sq_footage_bucket = feature_column_lib.bucketized_column(\n feature_column_lib.real_valued_column('sq_footage'),\n boundaries=[650.0])\n classifier = sdca_estimator.SDCALogisticClassifier(\n example_id_column='example_id',\n feature_columns=[price_bucket, sq_footage_bucket],\n weight_column_name='weights',\n l2_regularization=1.0)\n classifier.fit(input_fn=input_fn, steps=50)\n metrics = classifier.evaluate(input_fn=input_fn, steps=1)\n self.assertGreater(metrics['accuracy'], 0.9)\n\n def testSparseFeatures(self):\n \"\"\"Tests SDCALogisticClassifier with sparse features.\"\"\"\n\n def input_fn():\n return {\n 'example_id':\n constant_op.constant(['1', '2', '3']),\n 'price':\n constant_op.constant([[0.4], [0.6], [0.3]]),\n 'country':\n sparse_tensor.SparseTensor(\n values=['IT', 'US', 'GB'],\n indices=[[0, 0], [1, 3], [2, 1]],\n dense_shape=[3, 5]),\n 'weights':\n constant_op.constant([[1.0], [1.0], [1.0]])\n }, constant_op.constant([[1], [0], [1]])\n\n with self.test_session():\n price = feature_column_lib.real_valued_column('price')\n country = feature_column_lib.sparse_column_with_hash_bucket(\n 'country', hash_bucket_size=5)\n classifier = sdca_estimator.SDCALogisticClassifier(\n example_id_column='example_id',\n feature_columns=[price, country],\n weight_column_name='weights')\n classifier.fit(input_fn=input_fn, steps=50)\n metrics = classifier.evaluate(input_fn=input_fn, steps=1)\n self.assertGreater(metrics['accuracy'], 0.9)\n\n def testWeightedSparseFeatures(self):\n \"\"\"Tests SDCALogisticClassifier with weighted sparse features.\"\"\"\n\n def input_fn():\n return {\n 'example_id':\n constant_op.constant(['1', '2', '3']),\n 'price':\n sparse_tensor.SparseTensor(\n values=[2., 3., 1.],\n indices=[[0, 0], [1, 0], [2, 0]],\n dense_shape=[3, 5]),\n 'country':\n sparse_tensor.SparseTensor(\n values=['IT', 'US', 'GB'],\n indices=[[0, 0], [1, 0], [2, 0]],\n dense_shape=[3, 5])\n }, constant_op.constant([[1], [0], [1]])\n\n with self.test_session():\n country = feature_column_lib.sparse_column_with_hash_bucket(\n 'country', hash_bucket_size=5)\n country_weighted_by_price = feature_column_lib.weighted_sparse_column(\n country, 'price')\n classifier = sdca_estimator.SDCALogisticClassifier(\n example_id_column='example_id',\n feature_columns=[country_weighted_by_price])\n classifier.fit(input_fn=input_fn, steps=50)\n metrics = classifier.evaluate(input_fn=input_fn, steps=1)\n self.assertGreater(metrics['accuracy'], 0.9)\n\n def testCrossedFeatures(self):\n \"\"\"Tests SDCALogisticClassifier with crossed features.\"\"\"\n\n def input_fn():\n return {\n 'example_id':\n constant_op.constant(['1', '2', '3']),\n 'language':\n sparse_tensor.SparseTensor(\n values=['english', 'italian', 'spanish'],\n indices=[[0, 0], [1, 0], [2, 0]],\n dense_shape=[3, 1]),\n 'country':\n sparse_tensor.SparseTensor(\n values=['US', 'IT', 'MX'],\n indices=[[0, 0], [1, 0], [2, 0]],\n dense_shape=[3, 1])\n }, constant_op.constant([[0], [0], [1]])\n\n with self.test_session():\n language = feature_column_lib.sparse_column_with_hash_bucket(\n 'language', hash_bucket_size=5)\n country = feature_column_lib.sparse_column_with_hash_bucket(\n 'country', hash_bucket_size=5)\n country_language = feature_column_lib.crossed_column(\n [language, country], hash_bucket_size=10)\n classifier = sdca_estimator.SDCALogisticClassifier(\n example_id_column='example_id', feature_columns=[country_language])\n classifier.fit(input_fn=input_fn, steps=10)\n metrics = classifier.evaluate(input_fn=input_fn, steps=1)\n self.assertGreater(metrics['accuracy'], 0.9)\n\n def testMixedFeatures(self):\n \"\"\"Tests SDCALogisticClassifier with a mix of features.\"\"\"\n\n def input_fn():\n return {\n 'example_id':\n constant_op.constant(['1', '2', '3']),\n 'price':\n constant_op.constant([[0.6], [0.8], [0.3]]),\n 'sq_footage':\n constant_op.constant([900.0, 700.0, 600.0]),\n 'country':\n sparse_tensor.SparseTensor(\n values=['IT', 'US', 'GB'],\n indices=[[0, 0], [1, 3], [2, 1]],\n dense_shape=[3, 5]),\n 'weights':\n constant_op.constant([[3.0], [1.0], [1.0]])\n }, constant_op.constant([[1], [0], [1]])\n\n with self.test_session():\n price = feature_column_lib.real_valued_column('price')\n sq_footage_bucket = feature_column_lib.bucketized_column(\n feature_column_lib.real_valued_column('sq_footage'),\n boundaries=[650.0, 800.0])\n country = feature_column_lib.sparse_column_with_hash_bucket(\n 'country', hash_bucket_size=5)\n sq_footage_country = feature_column_lib.crossed_column(\n [sq_footage_bucket, country], hash_bucket_size=10)\n classifier = sdca_estimator.SDCALogisticClassifier(\n example_id_column='example_id',\n feature_columns=[\n price, sq_footage_bucket, country, sq_footage_country\n ],\n weight_column_name='weights')\n classifier.fit(input_fn=input_fn, steps=50)\n metrics = classifier.evaluate(input_fn=input_fn, steps=1)\n self.assertGreater(metrics['accuracy'], 0.9)\n\n\nclass SDCALinearRegressorTest(test.TestCase):\n\n def _single_threaded_test_session(self):\n # TODO(andreasst): figure out why SDCALinearRegressor needs a single\n # threaded session to pass in tsan mode but SDCALogisticClassifier does not.\n config = config_pb2.ConfigProto(\n inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)\n return self.test_session(config=config)\n\n def testRealValuedLinearFeatures(self):\n \"\"\"Tests SDCALinearRegressor works with real valued features.\"\"\"\n x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]\n weights = [[3.0], [-1.2], [0.5]]\n y = np.dot(x, weights)\n\n def input_fn():\n return {\n 'example_id': constant_op.constant(['1', '2', '3']),\n 'x': constant_op.constant(x),\n 'weights': constant_op.constant([[10.0], [10.0], [10.0]])\n }, constant_op.constant(y)\n\n with self._single_threaded_test_session():\n x_column = feature_column_lib.real_valued_column('x', dimension=3)\n regressor = sdca_estimator.SDCALinearRegressor(\n example_id_column='example_id',\n feature_columns=[x_column],\n weight_column_name='weights')\n regressor.fit(input_fn=input_fn, steps=20)\n loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']\n self.assertLess(loss, 0.01)\n self.assertIn('linear/x/weight', regressor.get_variable_names())\n regressor_weights = regressor.get_variable_value('linear/x/weight')\n self.assertAllClose(\n [w[0] for w in weights], regressor_weights.flatten(), rtol=0.1)\n\n def testMixedFeaturesArbitraryWeights(self):\n \"\"\"Tests SDCALinearRegressor works with a mix of features.\"\"\"\n\n def input_fn():\n return {\n 'example_id':\n constant_op.constant(['1', '2', '3']),\n 'price':\n constant_op.constant([[0.6], [0.8], [0.3]]),\n 'sq_footage':\n constant_op.constant([[900.0], [700.0], [600.0]]),\n 'country':\n sparse_tensor.SparseTensor(\n values=['IT', 'US', 'GB'],\n indices=[[0, 0], [1, 3], [2, 1]],\n dense_shape=[3, 5]),\n 'weights':\n constant_op.constant([[3.0], [5.0], [7.0]])\n }, constant_op.constant([[1.55], [-1.25], [-3.0]])\n\n with self._single_threaded_test_session():\n price = feature_column_lib.real_valued_column('price')\n sq_footage_bucket = feature_column_lib.bucketized_column(\n feature_column_lib.real_valued_column('sq_footage'),\n boundaries=[650.0, 800.0])\n country = feature_column_lib.sparse_column_with_hash_bucket(\n 'country', hash_bucket_size=5)\n sq_footage_country = feature_column_lib.crossed_column(\n [sq_footage_bucket, country], hash_bucket_size=10)\n regressor = sdca_estimator.SDCALinearRegressor(\n example_id_column='example_id',\n feature_columns=[\n price, sq_footage_bucket, country, sq_footage_country\n ],\n l2_regularization=1.0,\n weight_column_name='weights')\n regressor.fit(input_fn=input_fn, steps=20)\n loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']\n self.assertLess(loss, 0.05)\n\n def testSdcaOptimizerSparseFeaturesWithL1Reg(self):\n \"\"\"SDCALinearRegressor works with sparse features and L1 regularization.\"\"\"\n\n def input_fn():\n return {\n 'example_id':\n constant_op.constant(['1', '2', '3']),\n 'price':\n constant_op.constant([0.4, 0.6, 0.3]),\n 'country':\n sparse_tensor.SparseTensor(\n values=['IT', 'US', 'GB'],\n indices=[[0, 0], [1, 3], [2, 1]],\n dense_shape=[3, 5]),\n 'weights':\n constant_op.constant([[10.0], [10.0], [10.0]])\n }, constant_op.constant([[1.4], [-0.8], [2.6]])\n\n with self._single_threaded_test_session():\n price = feature_column_lib.real_valued_column('price')\n country = feature_column_lib.sparse_column_with_hash_bucket(\n 'country', hash_bucket_size=5)\n # Regressor with no L1 regularization.\n regressor = sdca_estimator.SDCALinearRegressor(\n example_id_column='example_id',\n feature_columns=[price, country],\n weight_column_name='weights')\n regressor.fit(input_fn=input_fn, steps=20)\n no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']\n variable_names = regressor.get_variable_names()\n self.assertIn('linear/price/weight', variable_names)\n self.assertIn('linear/country/weights', variable_names)\n no_l1_reg_weights = {\n 'linear/price/weight':\n regressor.get_variable_value('linear/price/weight'),\n 'linear/country/weights':\n regressor.get_variable_value('linear/country/weights'),\n }\n\n # Regressor with L1 regularization.\n regressor = sdca_estimator.SDCALinearRegressor(\n example_id_column='example_id',\n feature_columns=[price, country],\n l1_regularization=1.0,\n weight_column_name='weights')\n regressor.fit(input_fn=input_fn, steps=20)\n l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']\n l1_reg_weights = {\n 'linear/price/weight':\n regressor.get_variable_value('linear/price/weight'),\n 'linear/country/weights':\n regressor.get_variable_value('linear/country/weights'),\n }\n\n # Unregularized loss is lower when there is no L1 regularization.\n self.assertLess(no_l1_reg_loss, l1_reg_loss)\n self.assertLess(no_l1_reg_loss, 0.05)\n\n # But weights returned by the regressor with L1 regularization have\n # smaller L1 norm.\n l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0\n for var_name in sorted(l1_reg_weights):\n l1_reg_weights_norm += sum(\n np.absolute(l1_reg_weights[var_name].flatten()))\n no_l1_reg_weights_norm += sum(\n np.absolute(no_l1_reg_weights[var_name].flatten()))\n print('Var name: %s, value: %s' %\n (var_name, no_l1_reg_weights[var_name].flatten()))\n self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)\n\n def testBiasOnly(self):\n \"\"\"Tests SDCALinearRegressor has a valid bias weight.\"\"\"\n\n def input_fn():\n \"\"\"Testing the bias weight when it's the only feature present.\n\n All of the instances in this input only have the bias feature, and a\n 1/4 of the labels are positive. This means that the expected weight for\n the bias should be close to the average prediction, i.e 0.25.\n Returns:\n Training data for the test.\n \"\"\"\n num_examples = 40\n return {\n 'example_id':\n constant_op.constant([str(x + 1) for x in range(num_examples)]),\n # place_holder is an empty column which is always 0 (absent), because\n # LinearClassifier requires at least one column.\n 'place_holder':\n constant_op.constant([[0.0]] * num_examples),\n }, constant_op.constant([[1 if i % 4 is 0 else 0]\n for i in range(num_examples)])\n\n with self._single_threaded_test_session():\n place_holder = feature_column_lib.real_valued_column('place_holder')\n regressor = sdca_estimator.SDCALinearRegressor(\n example_id_column='example_id', feature_columns=[place_holder])\n regressor.fit(input_fn=input_fn, steps=100)\n self.assertNear(\n regressor.get_variable_value('linear/bias_weight')[0], 0.25, err=0.1)\n\n def testBiasAndOtherColumns(self):\n \"\"\"SDCALinearRegressor has valid bias weight with other columns present.\"\"\"\n\n def input_fn():\n \"\"\"Testing the bias weight when there are other features present.\n\n 1/2 of the instances in this input have feature 'a', the rest have\n feature 'b', and we expect the bias to be added to each instance as well.\n 0.4 of all instances that have feature 'a' are positive, and 0.2 of all\n instances that have feature 'b' are positive. The labels in the dataset\n are ordered to appear shuffled since SDCA expects shuffled data, and\n converges faster with this pseudo-random ordering.\n If the bias was centered we would expect the weights to be:\n bias: 0.3\n a: 0.1\n b: -0.1\n Until b/29339026 is resolved, the bias gets regularized with the same\n global value for the other columns, and so the expected weights get\n shifted and are:\n bias: 0.2\n a: 0.2\n b: 0.0\n Returns:\n The test dataset.\n \"\"\"\n num_examples = 200\n half = int(num_examples / 2)\n return {\n 'example_id':\n constant_op.constant([str(x + 1) for x in range(num_examples)]),\n 'a':\n constant_op.constant([[1]] * int(half) + [[0]] * int(half)),\n 'b':\n constant_op.constant([[0]] * int(half) + [[1]] * int(half)),\n }, constant_op.constant(\n [[x]\n for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half / 10) +\n [0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half / 10)])\n\n with self._single_threaded_test_session():\n regressor = sdca_estimator.SDCALinearRegressor(\n example_id_column='example_id',\n feature_columns=[\n feature_column_lib.real_valued_column('a'),\n feature_column_lib.real_valued_column('b')\n ])\n\n regressor.fit(input_fn=input_fn, steps=200)\n\n variable_names = regressor.get_variable_names()\n self.assertIn('linear/bias_weight', variable_names)\n self.assertIn('linear/a/weight', variable_names)\n self.assertIn('linear/b/weight', variable_names)\n # TODO(b/29339026): Change the expected results to expect a centered bias.\n self.assertNear(\n regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)\n self.assertNear(\n regressor.get_variable_value('linear/a/weight')[0], 0.2, err=0.05)\n self.assertNear(\n regressor.get_variable_value('linear/b/weight')[0], 0.0, err=0.05)\n\n def testBiasAndOtherColumnsFabricatedCentered(self):\n \"\"\"SDCALinearRegressor has valid bias weight when instances are centered.\"\"\"\n\n def input_fn():\n \"\"\"Testing the bias weight when there are other features present.\n\n 1/2 of the instances in this input have feature 'a', the rest have\n feature 'b', and we expect the bias to be added to each instance as well.\n 0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of\n all instances that have feature 'b' have a label of -1.\n We can expect the weights to be:\n bias: 0.0\n a: 0.1\n b: -0.1\n Returns:\n The test dataset.\n \"\"\"\n num_examples = 200\n half = int(num_examples / 2)\n return {\n 'example_id':\n constant_op.constant([str(x + 1) for x in range(num_examples)]),\n 'a':\n constant_op.constant([[1]] * int(half) + [[0]] * int(half)),\n 'b':\n constant_op.constant([[0]] * int(half) + [[1]] * int(half)),\n }, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] +\n [[-1 if x % 10 == 0 else 0] for x in range(half)])\n\n with self._single_threaded_test_session():\n regressor = sdca_estimator.SDCALinearRegressor(\n example_id_column='example_id',\n feature_columns=[\n feature_column_lib.real_valued_column('a'),\n feature_column_lib.real_valued_column('b')\n ])\n\n regressor.fit(input_fn=input_fn, steps=100)\n\n variable_names = regressor.get_variable_names()\n self.assertIn('linear/bias_weight', variable_names)\n self.assertIn('linear/a/weight', variable_names)\n self.assertIn('linear/b/weight', variable_names)\n self.assertNear(\n regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)\n self.assertNear(\n regressor.get_variable_value('linear/a/weight')[0], 0.1, err=0.05)\n self.assertNear(\n regressor.get_variable_value('linear/b/weight')[0], -0.1, err=0.05)\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras callbacks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport csv\nimport os\nimport re\nimport shutil\nimport threading\nimport unittest\n\nimport numpy as np\n\nfrom tensorflow.python.keras._impl import keras\nfrom tensorflow.python.keras._impl.keras import testing_utils\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.summary.writer import writer_cache\n\ntry:\n import h5py # pylint:disable=g-import-not-at-top\nexcept ImportError:\n h5py = None\n\ntry:\n import requests # pylint:disable=g-import-not-at-top\nexcept ImportError:\n requests = None\n\n\nTRAIN_SAMPLES = 10\nTEST_SAMPLES = 10\nNUM_CLASSES = 2\nINPUT_DIM = 3\nNUM_HIDDEN = 5\nBATCH_SIZE = 5\n\n\nclass KerasCallbacksTest(test.TestCase):\n\n def test_ModelCheckpoint(self):\n if h5py is None:\n return # Skip test if models cannot be saved.\n\n with self.test_session():\n np.random.seed(1337)\n\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir)\n\n filepath = os.path.join(temp_dir, 'checkpoint.h5')\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_test = keras.utils.to_categorical(y_test)\n y_train = keras.utils.to_categorical(y_train)\n # case 1\n monitor = 'val_loss'\n save_best_only = False\n mode = 'auto'\n\n model = keras.models.Sequential()\n model.add(\n keras.layers.Dense(\n NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))\n model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))\n model.compile(\n loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n cbks = [\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n mode=mode)\n ]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=1,\n verbose=0)\n assert os.path.exists(filepath)\n os.remove(filepath)\n\n # case 2\n mode = 'min'\n cbks = [\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n mode=mode)\n ]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=1,\n verbose=0)\n assert os.path.exists(filepath)\n os.remove(filepath)\n\n # case 3\n mode = 'max'\n monitor = 'val_acc'\n cbks = [\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n mode=mode)\n ]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=1,\n verbose=0)\n assert os.path.exists(filepath)\n os.remove(filepath)\n\n # case 4\n save_best_only = True\n cbks = [\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n mode=mode)\n ]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=1,\n verbose=0)\n assert os.path.exists(filepath)\n os.remove(filepath)\n\n # Case: metric not available.\n cbks = [\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor='unknown',\n save_best_only=True)\n ]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=1,\n verbose=0)\n # File won't be written.\n assert not os.path.exists(filepath)\n\n # case 5\n save_best_only = False\n period = 2\n mode = 'auto'\n\n filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')\n cbks = [\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n mode=mode,\n period=period)\n ]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=4,\n verbose=1)\n assert os.path.exists(filepath.format(epoch=2))\n assert os.path.exists(filepath.format(epoch=4))\n os.remove(filepath.format(epoch=2))\n os.remove(filepath.format(epoch=4))\n assert not os.path.exists(filepath.format(epoch=1))\n assert not os.path.exists(filepath.format(epoch=3))\n\n # Invalid use: this will raise a warning but not an Exception.\n keras.callbacks.ModelCheckpoint(\n filepath,\n monitor=monitor,\n save_best_only=save_best_only,\n mode='unknown')\n\n def test_EarlyStopping(self):\n with self.test_session():\n np.random.seed(123)\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_test = keras.utils.to_categorical(y_test)\n y_train = keras.utils.to_categorical(y_train)\n model = keras.models.Sequential()\n model.add(\n keras.layers.Dense(\n NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))\n model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))\n model.compile(\n loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n cases = [\n ('max', 'val_acc'),\n ('min', 'val_loss'),\n ('auto', 'val_acc'),\n ('auto', 'loss'),\n ('unknown', 'unknown')\n ]\n for mode, monitor in cases:\n patience = 0\n cbks = [\n keras.callbacks.EarlyStopping(\n patience=patience, monitor=monitor, mode=mode)\n ]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=5,\n verbose=0)\n\n def test_EarlyStopping_reuse(self):\n with self.test_session():\n np.random.seed(1337)\n patience = 3\n data = np.random.random((100, 1))\n labels = np.where(data > 0.5, 1, 0)\n model = keras.models.Sequential((keras.layers.Dense(\n 1, input_dim=1, activation='relu'), keras.layers.Dense(\n 1, activation='sigmoid'),))\n model.compile(\n optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])\n stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)\n weights = model.get_weights()\n\n hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)\n assert len(hist.epoch) >= patience\n\n # This should allow training to go for at least `patience` epochs\n model.set_weights(weights)\n hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)\n assert len(hist.epoch) >= patience\n\n def test_RemoteMonitor(self):\n if requests is None:\n return\n\n monitor = keras.callbacks.RemoteMonitor()\n # This will raise a warning since the default address in unreachable:\n monitor.on_epoch_end(0, logs={'loss': 0.})\n\n def test_LearningRateScheduler(self):\n with self.test_session():\n np.random.seed(1337)\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_test = keras.utils.to_categorical(y_test)\n y_train = keras.utils.to_categorical(y_train)\n model = keras.models.Sequential()\n model.add(\n keras.layers.Dense(\n NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))\n model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))\n model.compile(\n loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=5,\n verbose=0)\n assert (float(keras.backend.get_value(model.optimizer.lr)) - 0.2\n ) < keras.backend.epsilon()\n\n def test_ReduceLROnPlateau(self):\n with self.test_session():\n np.random.seed(1337)\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_test = keras.utils.to_categorical(y_test)\n y_train = keras.utils.to_categorical(y_train)\n\n def make_model():\n np.random.seed(1337)\n model = keras.models.Sequential()\n model.add(\n keras.layers.Dense(\n NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))\n model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))\n\n model.compile(\n loss='categorical_crossentropy',\n optimizer=keras.optimizers.SGD(lr=0.1),\n metrics=['accuracy'])\n return model\n\n model = make_model()\n # This should reduce the LR after the first epoch (due to high epsilon).\n cbks = [\n keras.callbacks.ReduceLROnPlateau(\n monitor='val_loss',\n factor=0.1,\n epsilon=10,\n patience=1,\n cooldown=5)\n ]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=5,\n verbose=0)\n self.assertAllClose(\n float(keras.backend.get_value(model.optimizer.lr)),\n 0.01,\n atol=1e-4)\n\n def test_CSVLogger(self):\n with self.test_session():\n np.random.seed(1337)\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir)\n filepath = os.path.join(temp_dir, 'log.tsv')\n\n sep = '\\t'\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_test = keras.utils.to_categorical(y_test)\n y_train = keras.utils.to_categorical(y_train)\n\n def make_model():\n np.random.seed(1337)\n model = keras.models.Sequential()\n model.add(\n keras.layers.Dense(\n NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))\n model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))\n\n model.compile(\n loss='categorical_crossentropy',\n optimizer=keras.optimizers.SGD(lr=0.1),\n metrics=['accuracy'])\n return model\n\n # case 1, create new file with defined separator\n model = make_model()\n cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=1,\n verbose=0)\n\n assert os.path.exists(filepath)\n with open(filepath) as csvfile:\n dialect = csv.Sniffer().sniff(csvfile.read())\n assert dialect.delimiter == sep\n del model\n del cbks\n\n # case 2, append data to existing file, skip header\n model = make_model()\n cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=1,\n verbose=0)\n\n # case 3, reuse of CSVLogger object\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=1,\n verbose=0)\n\n with open(filepath) as csvfile:\n output = ' '.join(csvfile.readlines())\n assert len(re.findall('epoch', output)) == 1\n\n os.remove(filepath)\n\n def test_stop_training_csv(self):\n # Test that using the CSVLogger callback with the TerminateOnNaN callback\n # does not result in invalid CSVs.\n np.random.seed(1337)\n tmpdir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, tmpdir)\n\n with self.test_session():\n fp = os.path.join(tmpdir, 'test.csv')\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n\n y_test = keras.utils.to_categorical(y_test)\n y_train = keras.utils.to_categorical(y_train)\n cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]\n model = keras.models.Sequential()\n for _ in range(5):\n model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))\n model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))\n model.compile(loss='mean_squared_error',\n optimizer='rmsprop')\n\n def data_generator():\n i = 0\n max_batch_index = len(x_train) // BATCH_SIZE\n tot = 0\n while 1:\n if tot > 3 * len(x_train):\n yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,\n np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)\n else:\n yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],\n y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])\n i += 1\n tot += 1\n i %= max_batch_index\n\n history = model.fit_generator(data_generator(),\n len(x_train) // BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=20)\n loss = history.history['loss']\n assert len(loss) > 1\n assert loss[-1] == np.inf or np.isnan(loss[-1])\n\n values = []\n with open(fp) as f:\n for x in csv.reader(f):\n # In windows, due to \\r\\n line ends we may end up reading empty lines\n # after each line. Skip empty lines.\n if x:\n values.append(x)\n assert 'nan' in values[-1], 'The last epoch was not logged.'\n\n def test_TerminateOnNaN(self):\n np.random.seed(1337)\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n\n y_test = keras.utils.to_categorical(y_test)\n y_train = keras.utils.to_categorical(y_train)\n cbks = [keras.callbacks.TerminateOnNaN()]\n model = keras.models.Sequential()\n initializer = keras.initializers.Constant(value=1e5)\n for _ in range(5):\n model.add(keras.layers.Dense(2,\n input_dim=INPUT_DIM,\n activation='relu',\n kernel_initializer=initializer))\n model.add(keras.layers.Dense(NUM_CLASSES))\n model.compile(loss='mean_squared_error',\n optimizer='rmsprop')\n\n history = model.fit(x_train, y_train, batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks, epochs=20)\n loss = history.history['loss']\n assert len(loss) == 1\n assert loss[0] == np.inf\n\n def test_TensorBoard(self):\n np.random.seed(1337)\n\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir)\n\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_test = keras.utils.to_categorical(y_test)\n y_train = keras.utils.to_categorical(y_train)\n\n def data_generator(train):\n if train:\n max_batch_index = len(x_train) // BATCH_SIZE\n else:\n max_batch_index = len(x_test) // BATCH_SIZE\n i = 0\n while 1:\n if train:\n yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],\n y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])\n else:\n yield (x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],\n y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])\n i += 1\n i %= max_batch_index\n\n # case: Sequential\n with self.test_session():\n model = keras.models.Sequential()\n model.add(\n keras.layers.Dense(\n NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))\n model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))\n model.compile(\n loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n tsb = keras.callbacks.TensorBoard(\n log_dir=temp_dir, histogram_freq=1, write_images=True,\n write_grads=True, batch_size=5)\n cbks = [tsb]\n\n # fit with validation data\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=3,\n verbose=0)\n\n # fit with validation data and accuracy\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=2,\n verbose=0)\n\n # fit generator with validation data\n model.fit_generator(\n data_generator(True),\n len(x_train),\n epochs=2,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n verbose=0)\n\n # fit generator without validation data\n model.fit_generator(\n data_generator(True),\n len(x_train),\n epochs=2,\n callbacks=cbks,\n verbose=0)\n\n # fit generator with validation data and accuracy\n model.fit_generator(\n data_generator(True),\n len(x_train),\n epochs=2,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n verbose=0)\n\n # fit generator without validation data and accuracy\n model.fit_generator(\n data_generator(True), len(x_train), epochs=2, callbacks=cbks)\n assert os.path.exists(temp_dir)\n\n def test_TensorBoard_histogram_freq_must_have_validation_data(self):\n np.random.seed(1337)\n tmpdir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, tmpdir)\n\n with self.test_session():\n filepath = os.path.join(tmpdir, 'logs')\n\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_test = keras.utils.to_categorical(y_test)\n y_train = keras.utils.to_categorical(y_train)\n\n def data_generator(train):\n if train:\n max_batch_index = len(x_train) // BATCH_SIZE\n else:\n max_batch_index = len(x_test) // BATCH_SIZE\n i = 0\n while 1:\n if train:\n # simulate multi-input/output models\n yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],\n y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])\n else:\n yield (x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],\n y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])\n i += 1\n i %= max_batch_index\n\n inp = keras.Input((INPUT_DIM,))\n hidden = keras.layers.Dense(2, activation='relu')(inp)\n hidden = keras.layers.Dropout(0.1)(hidden)\n output = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)\n model = keras.models.Model(inputs=inp, outputs=output)\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n # we must generate new callbacks for each test, as they aren't stateless\n def callbacks_factory(histogram_freq):\n return [keras.callbacks.TensorBoard(\n log_dir=filepath,\n histogram_freq=histogram_freq,\n write_images=True, write_grads=True,\n batch_size=5)]\n\n # fit w/o validation data should raise ValueError if histogram_freq > 0\n cbs = callbacks_factory(histogram_freq=1)\n with self.assertRaises(ValueError):\n model.fit(\n x_train, y_train, batch_size=BATCH_SIZE, callbacks=cbs, epochs=3)\n\n for cb in cbs:\n cb.on_train_end()\n\n # fit generator without validation data should raise ValueError if\n # histogram_freq > 0\n cbs = callbacks_factory(histogram_freq=1)\n with self.assertRaises(ValueError):\n model.fit_generator(\n data_generator(True), len(x_train), epochs=2, callbacks=cbs)\n\n for cb in cbs:\n cb.on_train_end()\n\n # fit generator with validation data generator should raise ValueError if\n # histogram_freq > 0\n cbs = callbacks_factory(histogram_freq=1)\n with self.assertRaises(ValueError):\n model.fit_generator(\n data_generator(True),\n len(x_train),\n epochs=2,\n validation_data=data_generator(False),\n validation_steps=1,\n callbacks=cbs)\n\n for cb in cbs:\n cb.on_train_end()\n\n # Make sure file writer cache is clear to avoid failures during cleanup.\n writer_cache.FileWriterCache.clear()\n\n def test_TensorBoard_multi_input_output(self):\n np.random.seed(1337)\n tmpdir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, tmpdir)\n\n with self.test_session():\n filepath = os.path.join(tmpdir, 'logs')\n\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_test = keras.utils.to_categorical(y_test)\n y_train = keras.utils.to_categorical(y_train)\n\n def data_generator(train):\n if train:\n max_batch_index = len(x_train) // BATCH_SIZE\n else:\n max_batch_index = len(x_test) // BATCH_SIZE\n i = 0\n while 1:\n if train:\n # simulate multi-input/output models\n yield ([x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,\n [y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)\n else:\n yield ([x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,\n [y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)\n i += 1\n i %= max_batch_index\n\n inp1 = keras.Input((INPUT_DIM,))\n inp2 = keras.Input((INPUT_DIM,))\n inp = keras.layers.add([inp1, inp2])\n hidden = keras.layers.Dense(2, activation='relu')(inp)\n hidden = keras.layers.Dropout(0.1)(hidden)\n output1 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)\n output2 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)\n model = keras.models.Model([inp1, inp2], [output1, output2])\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n # we must generate new callbacks for each test, as they aren't stateless\n def callbacks_factory(histogram_freq):\n return [keras.callbacks.TensorBoard(log_dir=filepath,\n histogram_freq=histogram_freq,\n write_images=True, write_grads=True,\n batch_size=5)]\n\n # fit without validation data\n model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,\n callbacks=callbacks_factory(histogram_freq=0), epochs=3)\n\n # fit with validation data and accuracy\n model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,\n validation_data=([x_test] * 2, [y_test] * 2),\n callbacks=callbacks_factory(histogram_freq=1), epochs=2)\n\n # fit generator without validation data\n model.fit_generator(data_generator(True), len(x_train), epochs=2,\n callbacks=callbacks_factory(histogram_freq=0))\n\n # fit generator with validation data and accuracy\n model.fit_generator(data_generator(True), len(x_train), epochs=2,\n validation_data=([x_test] * 2, [y_test] * 2),\n callbacks=callbacks_factory(histogram_freq=1))\n assert os.path.isdir(filepath)\n\n @unittest.skipIf(\n os.name == 'nt',\n 'use_multiprocessing=True does not work on windows properly.')\n def test_LambdaCallback(self):\n with self.test_session():\n np.random.seed(1337)\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_test = keras.utils.to_categorical(y_test)\n y_train = keras.utils.to_categorical(y_train)\n model = keras.models.Sequential()\n model.add(\n keras.layers.Dense(\n NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))\n model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))\n model.compile(\n loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n # Start an arbitrary process that should run during model\n # training and be terminated after training has completed.\n e = threading.Event()\n\n def target():\n e.wait()\n\n t = threading.Thread(target=target)\n t.start()\n cleanup_callback = keras.callbacks.LambdaCallback(\n on_train_end=lambda logs: e.set())\n\n cbks = [cleanup_callback]\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=5,\n verbose=0)\n t.join()\n assert not t.is_alive()\n\n def test_TensorBoard_with_ReduceLROnPlateau(self):\n with self.test_session():\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir)\n\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES)\n y_test = keras.utils.to_categorical(y_test)\n y_train = keras.utils.to_categorical(y_train)\n\n model = keras.models.Sequential()\n model.add(\n keras.layers.Dense(\n NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))\n model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))\n model.compile(\n loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])\n\n cbks = [\n keras.callbacks.ReduceLROnPlateau(\n monitor='val_loss', factor=0.5, patience=4, verbose=1),\n keras.callbacks.TensorBoard(log_dir=temp_dir)\n ]\n\n model.fit(\n x_train,\n y_train,\n batch_size=BATCH_SIZE,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n epochs=2,\n verbose=0)\n\n assert os.path.exists(temp_dir)\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for TensorQueueDataset.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.data.python.kernel_tests import dataset_serialization_test_base\nfrom tensorflow.contrib.training.python.training import tensor_queue_dataset as tqd\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.platform import test\n\n\nclass PrependFromQueueAndPaddedBatchDatasetTest(test.TestCase):\n\n def testNoEnqueue(self):\n dataset = dataset_ops.Dataset.from_tensor_slices([0, 1, 2])\n dataset = dataset.apply(\n tqd.prepend_from_queue_and_padded_batch_dataset(batch_size=1))\n self.assertEqual((dtypes.variant, dtypes.int32), dataset.output_types)\n self.assertAllEqual(([None],) * 2,\n [x.as_list() for x in dataset.output_shapes])\n iterator = dataset.make_one_shot_iterator()\n _, value = iterator.get_next()\n self.assertEqual([0], self.evaluate(value))\n self.assertEqual([1], self.evaluate(value))\n self.assertEqual([2], self.evaluate(value))\n with self.assertRaisesOpError(\"End of sequence\"):\n self.evaluate(value)\n\n def testBatchedNoEnqueue(self):\n dataset = dataset_ops.Dataset.from_tensor_slices([0, 1, 2])\n dataset = dataset.apply(\n tqd.prepend_from_queue_and_padded_batch_dataset(batch_size=2))\n iterator = dataset.make_one_shot_iterator()\n _, value = iterator.get_next()\n self.assertAllEqual([0, 1], self.evaluate(value))\n self.assertAllEqual([2], self.evaluate(value))\n with self.assertRaisesOpError(\"End of sequence\"):\n self.evaluate(value)\n\n def testBatchedWithBiggerPaddingNoEnqueue(self):\n dataset = dataset_ops.Dataset.from_tensor_slices([[0], [1], [2]])\n dataset = dataset.apply(\n tqd.prepend_from_queue_and_padded_batch_dataset(\n batch_size=2, padded_shapes=[3]))\n iterator = dataset.make_one_shot_iterator()\n _, value = iterator.get_next()\n self.assertAllEqual([[0, 0, 0], [1, 0, 0]], self.evaluate(value))\n self.assertAllEqual([[2, 0, 0]], self.evaluate(value))\n with self.assertRaisesOpError(\"End of sequence\"):\n self.evaluate(value)\n\n def testBatchedWithBiggerPaddingOneEnqueue(self):\n dataset = dataset_ops.Dataset.from_tensor_slices([[0], [1], [2]])\n dataset = dataset.apply(\n tqd.prepend_from_queue_and_padded_batch_dataset(\n batch_size=1, padded_shapes=[3]))\n iterator = dataset.make_one_shot_iterator()\n queue_handle, value = iterator.get_next()\n enqueue_negative = tqd.enqueue_in_queue_dataset(queue_handle, -value)\n with self.test_session() as sess:\n self.assertAllEqual([[0, 0, 0]], sess.run(value))\n value_1, _ = sess.run([value, enqueue_negative])\n self.assertAllEqual([[1, 0, 0]], value_1)\n value_2, _ = sess.run([value, enqueue_negative])\n self.assertAllEqual([[-1, 0, 0]], value_2)\n value_3 = sess.run(value)\n self.assertAllEqual([[1, 0, 0]], value_3)\n value_4, _ = sess.run([value, enqueue_negative])\n self.assertAllEqual([[2, 0, 0]], value_4)\n value_5 = sess.run(value)\n self.assertAllEqual([[-2, 0, 0]], value_5)\n with self.assertRaisesOpError(\"End of sequence\"):\n sess.run(value)\n\n def testOneEnqueue(self):\n dataset = dataset_ops.Dataset.from_tensor_slices([0, 1, 2])\n dataset = dataset.apply(\n tqd.prepend_from_queue_and_padded_batch_dataset(batch_size=1))\n iterator = dataset.make_one_shot_iterator()\n queue_handle, value = iterator.get_next()\n enqueue_negative = tqd.enqueue_in_queue_dataset(queue_handle, -value)\n with self.test_session() as sess:\n self.assertEqual([0], sess.run(value))\n value_1, _ = sess.run([value, enqueue_negative])\n self.assertEqual([1], value_1)\n value_2, _ = sess.run([value, enqueue_negative])\n self.assertEqual([-1], value_2)\n value_3 = sess.run(value)\n self.assertEqual([1], value_3)\n value_4, _ = sess.run([value, enqueue_negative])\n self.assertEqual([2], value_4)\n value_5 = sess.run(value)\n self.assertEqual([-2], value_5)\n with self.assertRaisesOpError(\"End of sequence\"):\n sess.run(value)\n\n def testBatchedOneEnqueue(self):\n dataset = dataset_ops.Dataset.from_tensor_slices([0, 1, 2])\n dataset = dataset.apply(\n tqd.prepend_from_queue_and_padded_batch_dataset(batch_size=2))\n iterator = dataset.make_one_shot_iterator()\n queue_handle, value = iterator.get_next()\n enqueue_negative = tqd.enqueue_in_queue_dataset(queue_handle, -value)\n enqueue_zeroth = tqd.enqueue_in_queue_dataset([queue_handle[0]],\n array_ops.expand_dims(\n value[0], axis=0))\n with self.test_session() as sess:\n value_0, _ = sess.run([value, enqueue_negative])\n self.assertAllEqual([0, 1], value_0)\n value_1, _ = sess.run([value, enqueue_zeroth])\n self.assertAllEqual([0, -1], value_1)\n value_2, _ = sess.run([value, enqueue_negative])\n self.assertAllEqual([0, 2], value_2)\n self.assertAllEqual([0, -2], sess.run(value))\n with self.assertRaisesOpError(\"End of sequence\"):\n sess.run(value)\n\n def testManyEnqueue(self):\n dataset = dataset_ops.Dataset.from_tensor_slices([0, 1])\n dataset = dataset.apply(\n tqd.prepend_from_queue_and_padded_batch_dataset(batch_size=1))\n iterator = dataset.make_one_shot_iterator()\n queue_handle, value = iterator.get_next()\n enqueue_many_more = [\n tqd.enqueue_in_queue_dataset(queue_handle, value + 100 + i)\n for i in range(1000)\n ]\n with self.test_session() as sess:\n value_0, _ = sess.run((value, enqueue_many_more))\n self.assertEqual([0], value_0)\n rest = []\n for _ in range(1000):\n rest.append(sess.run(value))\n self.assertEquals([[100 + i] for i in range(1000)], sorted(rest))\n # Going back to the original input.\n value_1, _ = sess.run((value, enqueue_many_more))\n self.assertEqual(1, value_1)\n rest = []\n for _ in range(1000):\n rest.append(sess.run(value))\n self.assertEquals([[100 + i + 1] for i in range(1000)], sorted(rest))\n with self.assertRaisesOpError(\"End of sequence\"):\n sess.run(value)\n\n def testEnqueueWithPrefetch(self):\n dataset = dataset_ops.Dataset.from_tensor_slices([0])\n dataset = dataset.apply(\n tqd.prepend_from_queue_and_padded_batch_dataset(batch_size=1))\n # Prefetching will request additional values before they are\n # available to the queue.\n dataset = dataset.prefetch(buffer_size=3)\n iterator = dataset.make_one_shot_iterator()\n queue_handle, value = iterator.get_next()\n enqueue = tqd.enqueue_in_queue_dataset(queue_handle, value + 1)\n with self.test_session() as sess:\n i = 0\n while i < 4:\n received, _ = sess.run((value, enqueue))\n if received.size > 0:\n self.assertAllEqual([i], received)\n i += 1\n received_last = False\n while True:\n try:\n received = sess.run(value)\n if received.size > 0:\n self.assertAllEqual([4], received)\n received_last = True\n except errors.OutOfRangeError:\n break\n self.assertTrue(received_last)\n\n def testDatasetWithPaddedShapeSmallerThanInputFails(self):\n dataset = dataset_ops.Dataset.from_tensor_slices([[0, 0, 0]]).repeat(None)\n dataset = dataset.apply(\n tqd.prepend_from_queue_and_padded_batch_dataset(\n batch_size=1, padded_shapes=[2]))\n iterator = dataset.make_one_shot_iterator()\n _, value = iterator.get_next()\n with self.test_session() as sess:\n with self.assertRaisesOpError(\n r\"Incompatible input shapes at component 0 between \"\n r\"input dataset this dataset: \\[3\\] vs. \\[2\\]\"):\n sess.run(value)\n\n def testEnqueueWithIncompatibleInputsFailsWithInformativeError(self):\n dataset = dataset_ops.Dataset.from_tensor_slices([0]).repeat(None)\n dataset = dataset.apply(\n tqd.prepend_from_queue_and_padded_batch_dataset(batch_size=1))\n iterator = dataset.make_one_shot_iterator()\n queue_handle, value = iterator.get_next()\n\n enqueue_bad_structure = tqd.enqueue_in_queue_dataset(\n queue_handle, (value, value))\n enqueue_bad_dtype = tqd.enqueue_in_queue_dataset(queue_handle,\n np.array(\n [1.0],\n dtype=np.float32))\n enqueue_bad_shape_no_batch_dim = tqd.enqueue_in_queue_dataset(\n queue_handle, ([1],))\n enqueue_bad_shape = tqd.enqueue_in_queue_dataset(queue_handle,\n np.array(\n [[1]], dtype=np.int32))\n\n with self.test_session() as sess:\n with self.assertRaisesOpError(\n \"mismatched number of tensors. Queue expects 1 tensors but \"\n \"tried to insert 2\"):\n sess.run(enqueue_bad_structure)\n with self.assertRaisesOpError(r\"Expected component 0 to have batched \"\n r\"shape \\[1,...\\], but saw shape: \\[\\]\"):\n sess.run(enqueue_bad_shape_no_batch_dim)\n with self.assertRaisesOpError(\n r\"mismatched shapes at component 0. Attempted to insert tensor \"\n r\"with shape \\[1\\] but queue expected shape: \\[\\]\"):\n sess.run(enqueue_bad_shape)\n with self.assertRaisesOpError(\n r\"mismatched dtypes at component 0. Attempted to insert tensor \"\n r\"of type float but queue expected type: int32\"):\n sess.run(enqueue_bad_dtype)\n\n def testEnqueueWithPaddedBatchFailsWithInformativeError(self):\n dataset = dataset_ops.Dataset.from_tensor_slices([0, 1, 2])\n dataset = dataset.apply(\n tqd.prepend_from_queue_and_padded_batch_dataset(batch_size=1))\n with self.assertRaisesRegexp(\n TypeError, r\"Unable to create padding for field of type 'variant'\"):\n dataset.padded_batch(batch_size=10, padded_shapes=[1])\n\n def testOneEnqueueWithPadding(self):\n dataset = dataset_ops.Dataset.from_tensor_slices([0, 2, 4, 6])\n # Make a dataset of variable-length vectors and their lengths.\n dataset = dataset.map(\n lambda c: (c, c * array_ops.ones((c,), dtype=c.dtype)))\n # Emit a queue we can prepend to, and counts/values as padded\n # batch.\n dataset = dataset.apply(\n tqd.prepend_from_queue_and_padded_batch_dataset(batch_size=3))\n\n iterator = dataset.make_one_shot_iterator()\n queue, (count, padded_value) = iterator.get_next()\n\n # Split the padded_value into two pieces: head and rest\n rest_indices = array_ops.squeeze(array_ops.where(count > 2), axis=1)\n bound = math_ops.minimum(2, math_ops.reduce_max(count))\n value_head = padded_value[:, :bound]\n count_rest = array_ops.gather(count - 2, rest_indices)\n value_rest = array_ops.gather(padded_value, rest_indices)[:, bound:]\n queue_rest = array_ops.gather(queue, rest_indices)\n enqueue_rest_op = tqd.enqueue_in_queue_dataset(queue_rest,\n (count_rest, value_rest))\n with ops.control_dependencies([enqueue_rest_op]):\n calc = array_ops.identity(value_head)\n\n with self.test_session() as sess:\n self.assertAllEqual([[0, 0], [2, 2], [4, 4]], sess.run(calc))\n self.assertAllEqual([[4, 4], [6, 6]], sess.run(calc))\n self.assertAllEqual([[6, 6]], sess.run(calc))\n self.assertAllEqual([[6, 6]], sess.run(calc))\n # Get some final batches due to prefetching.\n for _ in range(3):\n try:\n self.assertAllEqual(\n np.empty(shape=(0, 0), dtype=np.int32), sess.run(calc))\n except errors.OutOfRangeError as e:\n self.assertTrue(str(e).startswith(\"End of sequence\"))\n\n def testNonstandardPadding(self):\n dataset = dataset_ops.Dataset.from_tensor_slices([0, 2, 4, 6])\n # Make a dataset of variable-length vectors and their lengths.\n dataset = dataset.map(\n lambda c: (c, c * array_ops.ones((c,), dtype=c.dtype)))\n # Emit a queue we can prepend to, and counts/values as padded\n # batch.\n dataset = dataset.apply(\n tqd.prepend_from_queue_and_padded_batch_dataset(\n batch_size=3, padding_values=(\n 0,\n -1,\n )))\n\n iterator = dataset.make_one_shot_iterator()\n _, (unused_count, padded_value) = iterator.get_next()\n\n with self.test_session() as sess:\n self.assertAllEqual([[-1, -1, -1, -1], [2, 2, -1, -1], [4, 4, 4, 4]],\n sess.run(padded_value))\n self.assertAllEqual([[6] * 6], sess.run(padded_value))\n with self.assertRaisesOpError(\"End of sequence\"):\n sess.run(padded_value)\n\n\n# TODO(ebrevdo): Figure out how to use run_core_tests to test state\n# saving of an iterator that's had some tensors enqueued into its queue.\nclass PrependFromQueueAndPaddedBatchDatasetSerializationTest(\n dataset_serialization_test_base.DatasetSerializationTestBase):\n\n def testPrependFromQueueAndPaddedBatch(self):\n\n def build_dataset(seq_lens):\n return dataset_ops.Dataset.from_tensor_slices(seq_lens).map(\n lambda x: array_ops.fill([x], x)).apply(\n tqd.prepend_from_queue_and_padded_batch_dataset(batch_size=4))\n\n seq_lens1 = np.random.randint(1, 20, size=(32,)).astype(np.int32)\n seq_lens2 = np.random.randint(21, 40, size=(32,)).astype(np.int32)\n self.run_core_tests(lambda: build_dataset(seq_lens1),\n lambda: build_dataset(seq_lens2), 8)\n\n def testPrependFromQueueAndPaddedBatchNonDefaultPadding(self):\n\n def build_dataset(seq_lens):\n\n def fill_tuple(x):\n filled = array_ops.fill([x], x)\n return (filled, string_ops.as_string(filled))\n\n padded_shape = [-1]\n return dataset_ops.Dataset.from_tensor_slices(seq_lens).map(\n fill_tuple).apply(\n tqd.prepend_from_queue_and_padded_batch_dataset(\n batch_size=4,\n padded_shapes=(padded_shape, padded_shape),\n padding_values=(-1, \"<end>\")))\n\n seq_lens1 = np.random.randint(1, 20, size=(32,)).astype(np.int32)\n seq_lens2 = np.random.randint(21, 40, size=(32,)).astype(np.int32)\n self.run_core_tests(lambda: build_dataset(seq_lens1),\n lambda: build_dataset(seq_lens2), 8)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Abstractions for the head(s) of a model.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\n\nfrom tensorflow.python.estimator import model_fn\nfrom tensorflow.python.estimator.canned import head as head_lib\nfrom tensorflow.python.estimator.canned import metric_keys\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import metrics as metrics_lib\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.summary import summary\n\n\n_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY\n\n\ndef multi_head(heads, head_weights=None):\n \"\"\"Creates a `_Head` for multi-objective learning.\n\n This class merges the output of multiple `_Head` objects.\n Specifically:\n * For training, sums losses of each head, calls `train_op_fn` with this\n final loss.\n * For eval, merges metrics by adding `head.name` suffix to the keys in eval\n metrics, such as `precision/head1`, `precision/head2`.\n * For prediction, merges predictions and updates keys in prediction dict to a\n 2-tuple, `(head.name, prediction_key)`. Merges `export_outputs` such that\n by default the first head is served.\n\n Usage:\n\n ```python\n # In `input_fn` specify labels as a dict keyed by head name:\n def input_fn():\n features = ...\n labels1 = ...\n labels2 = ...\n return features, {'head1': labels1, 'head2': labels2}\n\n # In `model_fn`, specify logits as a dict keyed by head name:\n def model_fn(features, labels, mode):\n # Create simple heads and specify head name.\n head1 = multi_class_head(n_classes=3, name='head1')\n head2 = binary_classification_head(name='head2')\n # Create multi-head from two simple heads.\n head = multi_head([head1, head2])\n # Create logits for each head, and combine them into a dict.\n logits1, logits2 = logit_fn()\n logits = {'head1': logits1, 'head2': logits2}\n # Return the merged EstimatorSpec\n return head.create_estimator_spec(..., logits=logits, ...)\n\n # Create an estimator with this model_fn.\n estimator = tf.estimator.Estimator(model_fn=model_fn)\n estimator.train(input_fn=input_fn, steps=100)\n ```\n\n Also supports `logits` as a `Tensor` of shape\n `[D0, D1, ... DN, logits_dimension]`. It will split the `Tensor` along the\n last dimension and distribute it appropriately among the heads. E.g.:\n\n ```python\n def model_fn(features, labels, mode):\n # Create simple heads and specify head name.\n head1 = multi_class_head(n_classes=3, name='head1')\n head2 = binary_classification_head(name='head2')\n # Create multi-head from two simple heads.\n head = multi_head([head1, head2])\n # Create logits for the multihead.\n logits = logit_fn(logits_dimension=head.logits_dimension)\n # Return the merged EstimatorSpec\n return head.create_estimator_spec(..., logits=logits, ...)\n ```\n\n Args:\n heads: List or tuple of `_Head` instances. All heads must have `name`\n specified. The first head in the list is the default used at serving time.\n head_weights: Optional list of weights, same length as `heads`. Used when\n merging losses to calculate the weighted sum of losses from each head. If\n `None`, all losses are weighted equally.\n\n Returns:\n A instance of `_Head` that merges multiple heads.\n\n Raises:\n ValueError: If `heads` is empty.\n ValueError: If any of the `heads` does not have `name` specified.\n ValueError: If `heads` and `head_weights` have different size.\n \"\"\"\n if head_weights:\n if len(head_weights) != len(heads):\n raise ValueError(\n 'heads and head_weights must have the same size. '\n 'Given len(heads): {}. Given len(head_weights): {}.'.format(\n len(heads), len(head_weights)))\n if not heads:\n raise ValueError('Must specify heads. Given: {}'.format(heads))\n for head in heads:\n if not head.name:\n raise ValueError(\n 'All given heads must have name specified. '\n 'Given: {}'.format(head))\n\n return _MultiHead(\n heads=tuple(heads),\n head_weights=tuple(head_weights) if head_weights else tuple())\n\n\ndef _no_op_train_fn(loss):\n del loss\n return control_flow_ops.no_op()\n\n\ndef _merge_losses(losses, head_weights=None):\n \"\"\"Merges the given losses into one tensor.\"\"\"\n losses = tuple(losses)\n with ops.name_scope(\n 'merge_losses', values=losses + (head_weights or tuple())):\n if head_weights:\n weighted_losses = []\n for loss, weight in zip(losses, head_weights):\n weighted_losses.append(math_ops.multiply(loss, weight))\n else:\n weighted_losses = losses\n return math_ops.add_n(weighted_losses)\n\n\ndef _default_export_output(export_outputs, head_name):\n \"\"\"Extracts the default export output from the given export_outputs dict.\"\"\"\n if len(export_outputs) == 1:\n return next(six.itervalues(export_outputs))\n for k, v in six.iteritems(export_outputs):\n if k == _DEFAULT_SERVING_KEY:\n return v\n raise ValueError(\n '{} did not specify default export_outputs. '\n 'Given: {} '\n 'Suggested fix: Use one of the heads in tf.contrib.estimator, or include '\n 'key {} in export_outputs.'.format(\n head_name, export_outputs, _DEFAULT_SERVING_KEY))\n\n\nclass _MultiHead(head_lib._Head): # pylint:disable=protected-access\n \"\"\"`_Head` for multi objective learning.\"\"\"\n\n def __init__(self, heads, head_weights):\n self._logits_dimension = 0\n for head in heads:\n self._logits_dimension += head.logits_dimension\n\n self._heads = heads\n self._head_weights = head_weights\n\n @property\n def name(self):\n return '_'.join([h.name for h in self._heads])\n\n @property\n def logits_dimension(self):\n return self._logits_dimension\n\n def create_loss(self, features, mode, logits, labels):\n \"\"\"See `Head`.\"\"\"\n if isinstance(logits, dict):\n logits_dict = logits\n else:\n logits_dict = self._split_logits(logits)\n training_losses = []\n labels_by_head = {}\n unreduced_losses_by_head = {}\n example_weights_by_head = {}\n for i, head in enumerate(self._heads):\n (training_loss, unreduced_loss,\n weights, processed_labels) = head.create_loss(\n features, mode, logits_dict[head.name], labels[head.name])\n training_losses.append(training_loss)\n labels_by_head[head.name] = processed_labels\n if self._head_weights:\n head_weight = self._head_weights[i]\n unreduced_losses_by_head[head.name] = math_ops.multiply(\n unreduced_loss, head_weight)\n example_weights_by_head[head.name] = math_ops.multiply(\n weights, head_weight)\n else:\n unreduced_losses_by_head[head.name] = unreduced_loss\n example_weights_by_head[head.name] = weights\n\n training_losses = tuple(training_losses)\n with ops.name_scope(\n 'merge_losses',\n values=training_losses + (self._head_weights or tuple())):\n if self._head_weights:\n head_weighted_training_losses = []\n for training_loss, head_weight in zip(\n training_losses, self._head_weights):\n head_weighted_training_losses.append(\n math_ops.multiply(training_loss, head_weight))\n merged_training_loss = math_ops.add_n(head_weighted_training_losses)\n else:\n merged_training_loss = math_ops.add_n(training_losses)\n\n return head_lib.LossSpec(\n training_loss=merged_training_loss,\n unreduced_loss=unreduced_losses_by_head,\n weights=example_weights_by_head,\n processed_labels=labels_by_head)\n\n def create_estimator_spec(\n self, features, mode, logits, labels=None, train_op_fn=None):\n \"\"\"See `_Head`.\"\"\"\n if isinstance(logits, dict):\n logits_dict = logits\n else:\n logits_dict = self._split_logits(logits)\n if labels and not isinstance(labels, dict):\n raise ValueError('labels must be a dict. Given: {}'.format(labels))\n\n all_estimator_spec = []\n for head in self._heads:\n head_name = head.name\n all_estimator_spec.append(\n head.create_estimator_spec(\n features=features,\n mode=mode,\n logits=logits_dict[head_name],\n labels=labels[head_name] if labels else None,\n train_op_fn=_no_op_train_fn))\n\n if mode == model_fn.ModeKeys.TRAIN:\n if train_op_fn is None:\n raise ValueError('train_op_fn can not be None in TRAIN mode.')\n spec = self._merge_train(all_estimator_spec, train_op_fn)\n with ops.name_scope(''):\n summary.scalar(metric_keys.MetricKeys.LOSS, spec.loss)\n return spec\n if mode == model_fn.ModeKeys.PREDICT:\n return self._merge_predict(all_estimator_spec)\n if mode == model_fn.ModeKeys.EVAL:\n return self._merge_eval(all_estimator_spec)\n raise ValueError('mode={} unrecognized'.format(mode))\n\n def _split_logits(self, logits):\n \"\"\"Splits logits along the last dimension and returns a dict.\"\"\"\n logits_dict = {}\n with ops.name_scope(None, 'split_logits', values=[logits]):\n logits = ops.convert_to_tensor(logits)\n batch_shape = array_ops.shape(logits)[:-1]\n zeros_like_batch_shape = array_ops.zeros_like(batch_shape)\n minus_ones_like_batch_shape = -1 * array_ops.ones_like(batch_shape)\n begin_idx = 0\n for head in self._heads:\n begin_tensor = array_ops.concat(\n [zeros_like_batch_shape, [begin_idx]], axis=0)\n size_tensor = array_ops.concat(\n [minus_ones_like_batch_shape, [head.logits_dimension]], axis=0)\n logits_dict[head.name] = array_ops.slice(\n logits, begin=begin_tensor, size=size_tensor)\n begin_idx += head.logits_dimension\n return logits_dict\n\n def _merge_train(self, all_estimator_spec, train_op_fn):\n \"\"\"Merges list of `EstimatorSpec` for training.\n\n Args:\n all_estimator_spec: list of `EstimatorSpec` for the individual heads.\n train_op_fn: Function to create train op. See `create_estimator_spec`\n documentation for more details.\n\n Returns:\n `EstimatorSpec` that merges all heads for TRAIN.\n \"\"\"\n losses = []\n metrics = {}\n for spec in all_estimator_spec:\n losses.append(spec.loss)\n # Metric keys already contain head.name.\n metrics.update(spec.eval_metric_ops or {})\n loss = _merge_losses(losses, self._head_weights)\n\n return model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN,\n loss=loss,\n train_op=train_op_fn(loss),\n eval_metric_ops=metrics)\n\n def _merge_predict(self, all_estimator_spec):\n \"\"\"Merges list of `EstimatorSpec` for prediction.\n\n Args:\n all_estimator_spec: list of `EstimatorSpec` for the individual heads.\n\n Returns:\n `EstimatorSpec` that merges all heads for PREDICT.\n \"\"\"\n predictions = {}\n export_outputs = {\n _DEFAULT_SERVING_KEY: _default_export_output(\n all_estimator_spec[0].export_outputs,\n self._heads[0].name),\n }\n for head, spec in zip(self._heads, all_estimator_spec):\n head_name = head.name\n for k, v in six.iteritems(spec.export_outputs):\n if k == _DEFAULT_SERVING_KEY:\n key = head_name\n else:\n key = '%s/%s' % (k, head_name)\n export_outputs[key] = v\n for k, v in six.iteritems(spec.predictions):\n predictions[(head_name, k)] = v\n\n return model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.PREDICT,\n predictions=predictions,\n export_outputs=export_outputs)\n\n def _merge_eval(self, all_estimator_spec):\n \"\"\"Merges list of `EstimatorSpec` for eval.\n\n Args:\n all_estimator_spec: list of `EstimatorSpec` for the individual heads.\n\n Returns:\n `EstimatorSpec` that merges all heads for EVAL.\n \"\"\"\n predictions = {}\n metrics = {}\n losses = []\n with ops.name_scope('merge_eval'):\n for head, spec in zip(self._heads, all_estimator_spec):\n losses.append(spec.loss)\n head_name = head.name\n # Loss metric is not added by default.\n loss_name = head_lib._summary_key( # pylint:disable=protected-access\n head_name, metric_keys.MetricKeys.LOSS)\n metrics[loss_name] = metrics_lib.mean(spec.loss, name=loss_name)\n # Metric keys already contain head.name.\n metrics.update(spec.eval_metric_ops or {})\n for k, v in six.iteritems(spec.predictions):\n predictions[(head_name, k)] = v\n loss = _merge_losses(losses, self._head_weights)\n\n return model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions=predictions,\n loss=loss,\n eval_metric_ops=metrics)\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"The DirichletMultinomial distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import special_math_ops\nfrom tensorflow.python.ops.distributions import distribution\nfrom tensorflow.python.ops.distributions import util as distribution_util\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n__all__ = [\n \"DirichletMultinomial\",\n]\n\n\n_dirichlet_multinomial_sample_note = \"\"\"For each batch of counts,\n`value = [n_0, ..., n_{K-1}]`, `P[value]` is the probability that after\nsampling `self.total_count` draws from this Dirichlet-Multinomial distribution,\nthe number of draws falling in class `j` is `n_j`. Since this definition is\n[exchangeable](https://en.wikipedia.org/wiki/Exchangeable_random_variables);\ndifferent sequences have the same counts so the probability includes a\ncombinatorial coefficient.\n\nNote: `value` must be a non-negative tensor with dtype `self.dtype`, have no\nfractional components, and such that\n`tf.reduce_sum(value, -1) = self.total_count`. Its shape must be broadcastable\nwith `self.concentration` and `self.total_count`.\"\"\"\n\n\n@tf_export(\"distributions.DirichletMultinomial\")\nclass DirichletMultinomial(distribution.Distribution):\n \"\"\"Dirichlet-Multinomial compound distribution.\n\n The Dirichlet-Multinomial distribution is parameterized by a (batch of)\n length-`K` `concentration` vectors (`K > 1`) and a `total_count` number of\n trials, i.e., the number of trials per draw from the DirichletMultinomial. It\n is defined over a (batch of) length-`K` vector `counts` such that\n `tf.reduce_sum(counts, -1) = total_count`. The Dirichlet-Multinomial is\n identically the Beta-Binomial distribution when `K = 2`.\n\n #### Mathematical Details\n\n The Dirichlet-Multinomial is a distribution over `K`-class counts, i.e., a\n length-`K` vector of non-negative integer `counts = n = [n_0, ..., n_{K-1}]`.\n\n The probability mass function (pmf) is,\n\n ```none\n pmf(n; alpha, N) = Beta(alpha + n) / (prod_j n_j!) / Z\n Z = Beta(alpha) / N!\n ```\n\n where:\n\n * `concentration = alpha = [alpha_0, ..., alpha_{K-1}]`, `alpha_j > 0`,\n * `total_count = N`, `N` a positive integer,\n * `N!` is `N` factorial, and,\n * `Beta(x) = prod_j Gamma(x_j) / Gamma(sum_j x_j)` is the\n [multivariate beta function](\n https://en.wikipedia.org/wiki/Beta_function#Multivariate_beta_function),\n and,\n * `Gamma` is the [gamma function](\n https://en.wikipedia.org/wiki/Gamma_function).\n\n Dirichlet-Multinomial is a [compound distribution](\n https://en.wikipedia.org/wiki/Compound_probability_distribution), i.e., its\n samples are generated as follows.\n\n 1. Choose class probabilities:\n `probs = [p_0,...,p_{K-1}] ~ Dir(concentration)`\n 2. Draw integers:\n `counts = [n_0,...,n_{K-1}] ~ Multinomial(total_count, probs)`\n\n The last `concentration` dimension parametrizes a single Dirichlet-Multinomial\n distribution. When calling distribution functions (e.g., `dist.prob(counts)`),\n `concentration`, `total_count` and `counts` are broadcast to the same shape.\n The last dimension of `counts` corresponds single Dirichlet-Multinomial\n distributions.\n\n Distribution parameters are automatically broadcast in all functions; see\n examples for details.\n\n #### Pitfalls\n\n The number of classes, `K`, must not exceed:\n - the largest integer representable by `self.dtype`, i.e.,\n `2**(mantissa_bits+1)` (IEE754),\n - the maximum `Tensor` index, i.e., `2**31-1`.\n\n In other words,\n\n ```python\n K <= min(2**31-1, {\n tf.float16: 2**11,\n tf.float32: 2**24,\n tf.float64: 2**53 }[param.dtype])\n ```\n\n Note: This condition is validated only when `self.validate_args = True`.\n\n #### Examples\n\n ```python\n alpha = [1., 2., 3.]\n n = 2.\n dist = DirichletMultinomial(n, alpha)\n ```\n\n Creates a 3-class distribution, with the 3rd class is most likely to be\n drawn.\n The distribution functions can be evaluated on counts.\n\n ```python\n # counts same shape as alpha.\n counts = [0., 0., 2.]\n dist.prob(counts) # Shape []\n\n # alpha will be broadcast to [[1., 2., 3.], [1., 2., 3.]] to match counts.\n counts = [[1., 1., 0.], [1., 0., 1.]]\n dist.prob(counts) # Shape [2]\n\n # alpha will be broadcast to shape [5, 7, 3] to match counts.\n counts = [[...]] # Shape [5, 7, 3]\n dist.prob(counts) # Shape [5, 7]\n ```\n\n Creates a 2-batch of 3-class distributions.\n\n ```python\n alpha = [[1., 2., 3.], [4., 5., 6.]] # Shape [2, 3]\n n = [3., 3.]\n dist = DirichletMultinomial(n, alpha)\n\n # counts will be broadcast to [[2., 1., 0.], [2., 1., 0.]] to match alpha.\n counts = [2., 1., 0.]\n dist.prob(counts) # Shape [2]\n ```\n\n \"\"\"\n\n # TODO(b/27419586) Change docstring for dtype of concentration once int\n # allowed.\n def __init__(self,\n total_count,\n concentration,\n validate_args=False,\n allow_nan_stats=True,\n name=\"DirichletMultinomial\"):\n \"\"\"Initialize a batch of DirichletMultinomial distributions.\n\n Args:\n total_count: Non-negative floating point tensor, whose dtype is the same\n as `concentration`. The shape is broadcastable to `[N1,..., Nm]` with\n `m >= 0`. Defines this as a batch of `N1 x ... x Nm` different\n Dirichlet multinomial distributions. Its components should be equal to\n integer values.\n concentration: Positive floating point tensor, whose dtype is the\n same as `n` with shape broadcastable to `[N1,..., Nm, K]` `m >= 0`.\n Defines this as a batch of `N1 x ... x Nm` different `K` class Dirichlet\n multinomial distributions.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = locals()\n with ops.name_scope(name, values=[total_count, concentration]):\n # Broadcasting works because:\n # * The broadcasting convention is to prepend dimensions of size [1], and\n # we use the last dimension for the distribution, whereas\n # the batch dimensions are the leading dimensions, which forces the\n # distribution dimension to be defined explicitly (i.e. it cannot be\n # created automatically by prepending). This forces enough explicitness.\n # * All calls involving `counts` eventually require a broadcast between\n # `counts` and concentration.\n self._total_count = ops.convert_to_tensor(total_count, name=\"total_count\")\n if validate_args:\n self._total_count = (\n distribution_util.embed_check_nonnegative_integer_form(\n self._total_count))\n self._concentration = self._maybe_assert_valid_concentration(\n ops.convert_to_tensor(concentration,\n name=\"concentration\"),\n validate_args)\n self._total_concentration = math_ops.reduce_sum(self._concentration, -1)\n super(DirichletMultinomial, self).__init__(\n dtype=self._concentration.dtype,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n reparameterization_type=distribution.NOT_REPARAMETERIZED,\n parameters=parameters,\n graph_parents=[self._total_count,\n self._concentration],\n name=name)\n\n @property\n def total_count(self):\n \"\"\"Number of trials used to construct a sample.\"\"\"\n return self._total_count\n\n @property\n def concentration(self):\n \"\"\"Concentration parameter; expected prior counts for that coordinate.\"\"\"\n return self._concentration\n\n @property\n def total_concentration(self):\n \"\"\"Sum of last dim of concentration parameter.\"\"\"\n return self._total_concentration\n\n def _batch_shape_tensor(self):\n return array_ops.shape(self.total_concentration)\n\n def _batch_shape(self):\n return self.total_concentration.get_shape()\n\n def _event_shape_tensor(self):\n return array_ops.shape(self.concentration)[-1:]\n\n def _event_shape(self):\n # Event shape depends only on total_concentration, not \"n\".\n return self.concentration.get_shape().with_rank_at_least(1)[-1:]\n\n def _sample_n(self, n, seed=None):\n n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)\n k = self.event_shape_tensor()[0]\n unnormalized_logits = array_ops.reshape(\n math_ops.log(random_ops.random_gamma(\n shape=[n],\n alpha=self.concentration,\n dtype=self.dtype,\n seed=seed)),\n shape=[-1, k])\n draws = random_ops.multinomial(\n logits=unnormalized_logits,\n num_samples=n_draws,\n seed=distribution_util.gen_new_seed(seed, salt=\"dirichlet_multinomial\"))\n x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), -2)\n final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)\n x = array_ops.reshape(x, final_shape)\n return math_ops.cast(x, self.dtype)\n\n @distribution_util.AppendDocstring(_dirichlet_multinomial_sample_note)\n def _log_prob(self, counts):\n counts = self._maybe_assert_valid_sample(counts)\n ordered_prob = (\n special_math_ops.lbeta(self.concentration + counts)\n - special_math_ops.lbeta(self.concentration))\n return ordered_prob + distribution_util.log_combinations(\n self.total_count, counts)\n\n @distribution_util.AppendDocstring(_dirichlet_multinomial_sample_note)\n def _prob(self, counts):\n return math_ops.exp(self._log_prob(counts))\n\n def _mean(self):\n return self.total_count * (self.concentration /\n self.total_concentration[..., array_ops.newaxis])\n\n @distribution_util.AppendDocstring(\n \"\"\"The covariance for each batch member is defined as the following:\n\n ```none\n Var(X_j) = n * alpha_j / alpha_0 * (1 - alpha_j / alpha_0) *\n (n + alpha_0) / (1 + alpha_0)\n ```\n\n where `concentration = alpha` and\n `total_concentration = alpha_0 = sum_j alpha_j`.\n\n The covariance between elements in a batch is defined as:\n\n ```none\n Cov(X_i, X_j) = -n * alpha_i * alpha_j / alpha_0 ** 2 *\n (n + alpha_0) / (1 + alpha_0)\n ```\n \"\"\")\n def _covariance(self):\n x = self._variance_scale_term() * self._mean()\n return array_ops.matrix_set_diag(\n -math_ops.matmul(x[..., array_ops.newaxis],\n x[..., array_ops.newaxis, :]), # outer prod\n self._variance())\n\n def _variance(self):\n scale = self._variance_scale_term()\n x = scale * self._mean()\n return x * (self.total_count * scale - x)\n\n def _variance_scale_term(self):\n \"\"\"Helper to `_covariance` and `_variance` which computes a shared scale.\"\"\"\n # We must take care to expand back the last dim whenever we use the\n # total_concentration.\n c0 = self.total_concentration[..., array_ops.newaxis]\n return math_ops.sqrt((1. + c0 / self.total_count) / (1. + c0))\n\n def _maybe_assert_valid_concentration(self, concentration, validate_args):\n \"\"\"Checks the validity of the concentration parameter.\"\"\"\n if not validate_args:\n return concentration\n concentration = distribution_util.embed_check_categorical_event_shape(\n concentration)\n return control_flow_ops.with_dependencies([\n check_ops.assert_positive(\n concentration,\n message=\"Concentration parameter must be positive.\"),\n ], concentration)\n\n def _maybe_assert_valid_sample(self, counts):\n \"\"\"Check counts for proper shape, values, then return tensor version.\"\"\"\n if not self.validate_args:\n return counts\n counts = distribution_util.embed_check_nonnegative_integer_form(counts)\n return control_flow_ops.with_dependencies([\n check_ops.assert_equal(\n self.total_count, math_ops.reduce_sum(counts, -1),\n message=\"counts last-dimension must sum to `self.total_count`\"),\n ], counts)\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for spectral_ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.signal.python.ops import spectral_ops\nfrom tensorflow.contrib.signal.python.ops import window_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import spectral_ops_test_util\nfrom tensorflow.python.platform import test\n\n\nclass SpectralOpsTest(test.TestCase):\n\n @staticmethod\n def _np_hann_periodic_window(length):\n if length == 1:\n return np.ones(1)\n odd = length % 2\n if not odd:\n length += 1\n window = 0.5 - 0.5 * np.cos(2.0 * np.pi * np.arange(length) / (length - 1))\n if not odd:\n window = window[:-1]\n return window\n\n @staticmethod\n def _np_frame(data, window_length, hop_length):\n num_frames = 1 + int(np.floor((len(data) - window_length) // hop_length))\n shape = (num_frames, window_length)\n strides = (data.strides[0] * hop_length, data.strides[0])\n return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)\n\n @staticmethod\n def _np_stft(data, fft_length, hop_length, window_length):\n frames = SpectralOpsTest._np_frame(data, window_length, hop_length)\n window = SpectralOpsTest._np_hann_periodic_window(window_length)\n return np.fft.rfft(frames * window, fft_length)\n\n @staticmethod\n def _np_inverse_stft(stft, fft_length, hop_length, window_length):\n frames = np.fft.irfft(stft, fft_length)\n # Pad or truncate frames's inner dimension to window_length.\n frames = frames[..., :window_length]\n frames = np.pad(frames, [[0, 0]] * (frames.ndim - 1) +\n [[0, max(0, window_length - frames.shape[-1])]], \"constant\")\n window = SpectralOpsTest._np_hann_periodic_window(window_length)\n return SpectralOpsTest._np_overlap_add(frames * window, hop_length)\n\n @staticmethod\n def _np_overlap_add(stft, hop_length):\n num_frames, window_length = np.shape(stft)\n # Output length will be one complete window, plus another hop_length's\n # worth of points for each additional window.\n output_length = window_length + (num_frames - 1) * hop_length\n output = np.zeros(output_length)\n for i in range(num_frames):\n output[i * hop_length:i * hop_length + window_length] += stft[i,]\n return output\n\n def _compare(self, signal, frame_length, frame_step, fft_length):\n with spectral_ops_test_util.fft_kernel_label_map(), (\n self.test_session(use_gpu=True)) as sess:\n actual_stft = spectral_ops.stft(\n signal, frame_length, frame_step, fft_length, pad_end=False)\n signal_ph = array_ops.placeholder(dtype=dtypes.as_dtype(signal.dtype))\n actual_stft_from_ph = spectral_ops.stft(\n signal_ph, frame_length, frame_step, fft_length, pad_end=False)\n\n actual_inverse_stft = spectral_ops.inverse_stft(\n actual_stft, frame_length, frame_step, fft_length)\n\n actual_stft, actual_stft_from_ph, actual_inverse_stft = sess.run(\n [actual_stft, actual_stft_from_ph, actual_inverse_stft],\n feed_dict={signal_ph: signal})\n\n actual_stft_ph = array_ops.placeholder(dtype=actual_stft.dtype)\n actual_inverse_stft_from_ph = sess.run(\n spectral_ops.inverse_stft(\n actual_stft_ph, frame_length, frame_step, fft_length),\n feed_dict={actual_stft_ph: actual_stft})\n\n # Confirm that there is no difference in output when shape/rank is fully\n # unknown or known.\n self.assertAllClose(actual_stft, actual_stft_from_ph)\n self.assertAllClose(actual_inverse_stft, actual_inverse_stft_from_ph)\n\n expected_stft = SpectralOpsTest._np_stft(\n signal, fft_length, frame_step, frame_length)\n self.assertAllClose(expected_stft, actual_stft, 1e-4, 1e-4)\n\n expected_inverse_stft = SpectralOpsTest._np_inverse_stft(\n expected_stft, fft_length, frame_step, frame_length)\n self.assertAllClose(\n expected_inverse_stft, actual_inverse_stft, 1e-4, 1e-4)\n\n def test_shapes(self):\n with spectral_ops_test_util.fft_kernel_label_map(), (\n self.test_session(use_gpu=True)):\n signal = np.zeros((512,)).astype(np.float32)\n\n # If fft_length is not provided, the smallest enclosing power of 2 of\n # frame_length (8) is used.\n stft = spectral_ops.stft(signal, frame_length=7, frame_step=8,\n pad_end=True)\n self.assertAllEqual([64, 5], stft.shape.as_list())\n self.assertAllEqual([64, 5], stft.eval().shape)\n\n stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,\n pad_end=True)\n self.assertAllEqual([64, 5], stft.shape.as_list())\n self.assertAllEqual([64, 5], stft.eval().shape)\n\n stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,\n fft_length=16, pad_end=True)\n self.assertAllEqual([64, 9], stft.shape.as_list())\n self.assertAllEqual([64, 9], stft.eval().shape)\n\n stft = spectral_ops.stft(signal, frame_length=16, frame_step=8,\n fft_length=8, pad_end=True)\n self.assertAllEqual([64, 5], stft.shape.as_list())\n self.assertAllEqual([64, 5], stft.eval().shape)\n\n stft = np.zeros((32, 9)).astype(np.complex64)\n\n inverse_stft = spectral_ops.inverse_stft(stft, frame_length=8,\n fft_length=16, frame_step=8)\n expected_length = (stft.shape[0] - 1) * 8 + 8\n self.assertAllEqual([None], inverse_stft.shape.as_list())\n self.assertAllEqual([expected_length], inverse_stft.eval().shape)\n\n def test_stft_and_inverse_stft(self):\n \"\"\"Test that spectral_ops.stft/inverse_stft match a NumPy implementation.\"\"\"\n # Tuples of (signal_length, frame_length, frame_step, fft_length).\n test_configs = [\n (512, 64, 32, 64),\n (512, 64, 64, 64),\n (512, 72, 64, 64),\n (512, 64, 25, 64),\n (512, 25, 15, 36),\n (123, 23, 5, 42),\n ]\n\n for signal_length, frame_length, frame_step, fft_length in test_configs:\n signal = np.random.random(signal_length).astype(np.float32)\n self._compare(signal, frame_length, frame_step, fft_length)\n\n def test_stft_round_trip(self):\n # Tuples of (signal_length, frame_length, frame_step, fft_length,\n # threshold, corrected_threshold).\n test_configs = [\n # 87.5% overlap.\n (4096, 256, 32, 256, 1e-5, 1e-6),\n # 75% overlap.\n (4096, 256, 64, 256, 1e-5, 1e-6),\n # Odd frame hop.\n (4096, 128, 25, 128, 1e-3, 1e-6),\n # Odd frame length.\n (4096, 127, 32, 128, 1e-3, 1e-6),\n # 50% overlap.\n (4096, 128, 64, 128, 0.40, 1e-6),\n ]\n\n for (signal_length, frame_length, frame_step, fft_length, threshold,\n corrected_threshold) in test_configs:\n # Generate a random white Gaussian signal.\n signal = random_ops.random_normal([signal_length])\n\n with spectral_ops_test_util.fft_kernel_label_map(), (\n self.test_session(use_gpu=True)) as sess:\n stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,\n pad_end=False)\n inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,\n fft_length)\n inverse_stft_corrected = spectral_ops.inverse_stft(\n stft, frame_length, frame_step, fft_length,\n window_fn=spectral_ops.inverse_stft_window_fn(frame_step))\n signal, inverse_stft, inverse_stft_corrected = sess.run(\n [signal, inverse_stft, inverse_stft_corrected])\n\n # Truncate signal to the size of inverse stft.\n signal = signal[:inverse_stft.shape[0]]\n\n # Ignore the frame_length samples at either edge.\n signal = signal[frame_length:-frame_length]\n inverse_stft = inverse_stft[frame_length:-frame_length]\n inverse_stft_corrected = inverse_stft_corrected[\n frame_length:-frame_length]\n\n # Check that the inverse and original signal are close up to a scale\n # factor.\n inverse_stft_scaled = inverse_stft / np.mean(np.abs(inverse_stft))\n signal_scaled = signal / np.mean(np.abs(signal))\n self.assertLess(np.std(inverse_stft_scaled - signal_scaled), threshold)\n\n # Check that the inverse with correction and original signal are close.\n self.assertLess(np.std(inverse_stft_corrected - signal),\n corrected_threshold)\n\n def test_inverse_stft_window_fn(self):\n \"\"\"Test that inverse_stft_window_fn has unit gain at each window phase.\"\"\"\n # Tuples of (frame_length, frame_step).\n test_configs = [\n (256, 32),\n (256, 64),\n (128, 25),\n (127, 32),\n (128, 64),\n ]\n\n for (frame_length, frame_step) in test_configs:\n hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)\n inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)\n inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)\n\n with self.test_session(use_gpu=True) as sess:\n hann_window, inverse_window = sess.run([hann_window, inverse_window])\n\n # Expect unit gain at each phase of the window.\n product_window = hann_window * inverse_window\n for i in range(frame_step):\n self.assertAllClose(1.0, np.sum(product_window[i::frame_step]))\n\n def test_inverse_stft_window_fn_special_case(self):\n \"\"\"Test inverse_stft_window_fn in special overlap = 3/4 case.\"\"\"\n # Cases in which frame_length is an integer multiple of 4 * frame_step are\n # special because they allow exact reproduction of the waveform with a\n # squared Hann window (Hann window in both forward and reverse transforms).\n # In the case where frame_length = 4 * frame_step, that combination\n # produces a constant gain of 1.5, and so the corrected window will be the\n # Hann window / 1.5.\n\n # Tuples of (frame_length, frame_step).\n test_configs = [\n (256, 64),\n (128, 32),\n ]\n\n for (frame_length, frame_step) in test_configs:\n hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)\n inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)\n inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)\n\n with self.test_session(use_gpu=True) as sess:\n hann_window, inverse_window = sess.run([hann_window, inverse_window])\n\n self.assertAllClose(hann_window, inverse_window * 1.5)\n\n @staticmethod\n def _compute_stft_gradient(signal, frame_length=32, frame_step=16,\n fft_length=32):\n \"\"\"Computes the gradient of the STFT with respect to `signal`.\"\"\"\n stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length)\n magnitude_stft = math_ops.abs(stft)\n loss = math_ops.reduce_sum(magnitude_stft)\n return gradients_impl.gradients([loss], [signal])[0]\n\n def test_gradients(self):\n \"\"\"Test that spectral_ops.stft has a working gradient.\"\"\"\n with spectral_ops_test_util.fft_kernel_label_map(), (\n self.test_session(use_gpu=True)) as sess:\n signal_length = 512\n\n # An all-zero signal has all zero gradients with respect to the sum of the\n # magnitude STFT.\n empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32)\n empty_signal_gradient = sess.run(\n self._compute_stft_gradient(empty_signal))\n self.assertTrue((empty_signal_gradient == 0.0).all())\n\n # A sinusoid will have non-zero components of its gradient with respect to\n # the sum of the magnitude STFT.\n sinusoid = math_ops.sin(\n 2 * np.pi * math_ops.linspace(0.0, 1.0, signal_length))\n sinusoid_gradient = sess.run(self._compute_stft_gradient(sinusoid))\n self.assertFalse((sinusoid_gradient == 0.0).all())\n\n def test_gradients_numerical(self):\n with spectral_ops_test_util.fft_kernel_label_map(), (\n self.test_session(use_gpu=True)):\n # Tuples of (signal_length, frame_length, frame_step, fft_length,\n # stft_bound, inverse_stft_bound).\n # TODO(rjryan): Investigate why STFT gradient error is so high.\n test_configs = [\n (64, 16, 8, 16),\n (64, 16, 16, 16),\n (64, 16, 7, 16),\n (64, 7, 4, 9),\n (29, 5, 1, 10),\n ]\n\n for (signal_length, frame_length, frame_step, fft_length) in test_configs:\n signal_shape = [signal_length]\n signal = random_ops.random_uniform(signal_shape)\n stft_shape = [max(0, 1 + (signal_length - frame_length) // frame_step),\n fft_length // 2 + 1]\n stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,\n pad_end=False)\n inverse_stft_shape = [(stft_shape[0] - 1) * frame_step + frame_length]\n inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,\n fft_length)\n stft_error = test.compute_gradient_error(signal, [signal_length],\n stft, stft_shape)\n inverse_stft_error = test.compute_gradient_error(\n stft, stft_shape, inverse_stft, inverse_stft_shape)\n self.assertLess(stft_error, 2e-3)\n self.assertLess(inverse_stft_error, 5e-4)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Example of DNNClassifier for Iris plant dataset.\n\nThis example uses APIs in Tensorflow 1.4 or above.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport urllib\n\nimport tensorflow as tf\n\n# Data sets\nIRIS_TRAINING = 'iris_training.csv'\nIRIS_TRAINING_URL = 'http://download.tensorflow.org/data/iris_training.csv'\n\nIRIS_TEST = 'iris_test.csv'\nIRIS_TEST_URL = 'http://download.tensorflow.org/data/iris_test.csv'\n\nFEATURE_KEYS = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']\n\n\ndef maybe_download_iris_data(file_name, download_url):\n \"\"\"Downloads the file and returns the number of data.\"\"\"\n if not os.path.exists(file_name):\n raw = urllib.urlopen(download_url).read()\n with open(file_name, 'w') as f:\n f.write(raw)\n\n # The first line is a comma-separated string. The first one is the number of\n # total data in the file.\n with open(file_name, 'r') as f:\n first_line = f.readline()\n num_elements = first_line.split(',')[0]\n return int(num_elements)\n\n\ndef input_fn(file_name, num_data, batch_size, is_training):\n \"\"\"Creates an input_fn required by Estimator train/evaluate.\"\"\"\n # If the data sets aren't stored locally, download them.\n\n def _parse_csv(rows_string_tensor):\n \"\"\"Takes the string input tensor and returns tuple of (features, labels).\"\"\"\n # Last dim is the label.\n num_features = len(FEATURE_KEYS)\n num_columns = num_features + 1\n columns = tf.decode_csv(rows_string_tensor,\n record_defaults=[[]] * num_columns)\n features = dict(zip(FEATURE_KEYS, columns[:num_features]))\n labels = tf.cast(columns[num_features], tf.int32)\n return features, labels\n\n def _input_fn():\n \"\"\"The input_fn.\"\"\"\n dataset = tf.data.TextLineDataset([file_name])\n # Skip the first line (which does not have data).\n dataset = dataset.skip(1)\n dataset = dataset.map(_parse_csv)\n\n if is_training:\n # For this small dataset, which can fit into memory, to achieve true\n # randomness, the shuffle buffer size is set as the total number of\n # elements in the dataset.\n dataset = dataset.shuffle(num_data)\n dataset = dataset.repeat()\n\n dataset = dataset.batch(batch_size)\n iterator = dataset.make_one_shot_iterator()\n features, labels = iterator.get_next()\n return features, labels\n\n return _input_fn\n\n\ndef main(unused_argv):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n num_training_data = maybe_download_iris_data(\n IRIS_TRAINING, IRIS_TRAINING_URL)\n num_test_data = maybe_download_iris_data(IRIS_TEST, IRIS_TEST_URL)\n\n # Build 3 layer DNN with 10, 20, 10 units respectively.\n feature_columns = [\n tf.feature_column.numeric_column(key, shape=1) for key in FEATURE_KEYS]\n classifier = tf.estimator.DNNClassifier(\n feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)\n\n # Train.\n train_input_fn = input_fn(IRIS_TRAINING, num_training_data, batch_size=32,\n is_training=True)\n classifier.train(input_fn=train_input_fn, steps=400)\n\n # Eval.\n test_input_fn = input_fn(IRIS_TEST, num_test_data, batch_size=32,\n is_training=False)\n scores = classifier.evaluate(input_fn=test_input_fn)\n print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))\n\n\nif __name__ == '__main__':\n tf.app.run()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Auto-Regressive models for time series data.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib import distributions\n\nfrom tensorflow.contrib.timeseries.python.timeseries import model\nfrom tensorflow.contrib.timeseries.python.timeseries import model_utils\nfrom tensorflow.contrib.timeseries.python.timeseries.feature_keys import PredictionFeatures\nfrom tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures\n\nfrom tensorflow.python.estimator import estimator_lib\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variable_scope\n\n\nclass ARModel(model.TimeSeriesModel):\n \"\"\"Auto-regressive model, both linear and non-linear.\n\n Features to the model include time and values of input_window_size timesteps,\n and times for output_window_size timesteps. These are passed through zero or\n more hidden layers, and then fed to a loss function (e.g. squared loss).\n\n Note that this class can also be used to regress against time only by setting\n the input_window_size to zero.\n \"\"\"\n SQUARED_LOSS = \"squared_loss\"\n NORMAL_LIKELIHOOD_LOSS = \"normal_likelihood_loss\"\n\n def __init__(self,\n periodicities,\n input_window_size,\n output_window_size,\n num_features,\n num_time_buckets=10,\n loss=NORMAL_LIKELIHOOD_LOSS,\n hidden_layer_sizes=None):\n \"\"\"Constructs an auto-regressive model.\n\n Args:\n periodicities: periodicities of the input data, in the same units as the\n time feature. Note this can be a single value or a list of values for\n multiple periodicities.\n input_window_size: Number of past time steps of data to look at when doing\n the regression.\n output_window_size: Number of future time steps to predict. Note that\n setting it to > 1 empiricaly seems to give a better fit.\n num_features: number of input features per time step.\n num_time_buckets: Number of buckets into which to divide (time %\n periodicity) for generating time based features.\n loss: Loss function to use for training. Currently supported values are\n SQUARED_LOSS and NORMAL_LIKELIHOOD_LOSS. Note that for\n NORMAL_LIKELIHOOD_LOSS, we train the covariance term as well. For\n SQUARED_LOSS, the evaluation loss is reported based on un-scaled\n observations and predictions, while the training loss is computed on\n normalized data (if input statistics are available).\n hidden_layer_sizes: list of sizes of hidden layers.\n \"\"\"\n self.input_window_size = input_window_size\n self.output_window_size = output_window_size\n if hidden_layer_sizes is None:\n hidden_layer_sizes = []\n self.hidden_layer_sizes = hidden_layer_sizes\n self.window_size = self.input_window_size + self.output_window_size\n self.loss = loss\n super(ARModel, self).__init__(\n num_features=num_features)\n assert num_time_buckets > 0\n self._buckets = int(num_time_buckets)\n if periodicities is None or not periodicities:\n periodicities = []\n elif (not isinstance(periodicities, list) and\n not isinstance(periodicities, tuple)):\n periodicities = [periodicities]\n self._periods = [int(p) for p in periodicities]\n for p in self._periods:\n assert p > 0\n assert len(self._periods) or self.input_window_size\n assert output_window_size > 0\n\n def get_start_state(self):\n # State which matches the format we'll return later. Typically this will not\n # be used by the model directly, but the shapes and dtypes should match so\n # that the serving input_receiver_fn gets placeholder shapes correct.\n return (array_ops.zeros([self.input_window_size], dtype=dtypes.int64),\n array_ops.zeros(\n [self.input_window_size, self.num_features], dtype=self.dtype))\n\n # TODO(allenl,agarwal): Support sampling for AR.\n def random_model_parameters(self, seed=None):\n pass\n\n def generate(self, number_of_series, series_length,\n model_parameters=None, seed=None):\n pass\n\n def _predicted_covariance_op(self, activations, num_values):\n activation, activation_size = activations[-1]\n if self.loss == ARModel.NORMAL_LIKELIHOOD_LOSS:\n log_sigma_square = model_utils.fully_connected(\n activation,\n activation_size,\n self.output_window_size * num_values,\n name=\"log_sigma_square\",\n activation=None)\n predicted_covariance = gen_math_ops.exp(log_sigma_square)\n predicted_covariance = array_ops.reshape(\n predicted_covariance, [-1, self.output_window_size, num_values])\n else:\n shape = array_ops.stack([\n array_ops.shape(activation)[0],\n constant_op.constant(self.output_window_size),\n constant_op.constant(num_values)\n ])\n predicted_covariance = array_ops.ones(shape=shape, dtype=activation.dtype)\n return predicted_covariance\n\n def _predicted_mean_op(self, activations):\n activation, activation_size = activations[-1]\n predicted_mean = model_utils.fully_connected(\n activation,\n activation_size,\n self.output_window_size * self.num_features,\n name=\"predicted_mean\",\n activation=None)\n return array_ops.reshape(predicted_mean,\n [-1, self.output_window_size, self.num_features])\n\n def _create_hidden_stack(self, activation, activation_size):\n activations = []\n for layer_number, layer_size in enumerate(self.hidden_layer_sizes):\n # TODO(agarwal): Migrate to fully_connected in tf slim\n activation = model_utils.fully_connected(\n activation, activation_size, layer_size,\n name=\"layer_{}\".format(layer_number))\n activation_size = layer_size\n activations.append((activation, activation_size))\n return activations\n\n def prediction_ops(self, times, values):\n \"\"\"Compute model predictions given input data.\n\n Args:\n times: A [batch size, self.window_size] integer Tensor, the first\n self.input_window_size times in each part of the batch indicating\n input features, and the last self.output_window_size times indicating\n prediction times.\n values: A [batch size, self.input_window_size, self.num_features] Tensor\n with input features.\n Returns:\n Tuple (predicted_mean, predicted_covariance), where each element is a\n Tensor with shape [batch size, self.output_window_size,\n self.num_features].\n \"\"\"\n times.get_shape().assert_is_compatible_with([None, self.window_size])\n activations = []\n if self.input_window_size:\n values.get_shape().assert_is_compatible_with(\n [None, self.input_window_size, self.num_features])\n # Create input features.\n if self._periods:\n _, time_features = self._compute_time_features(times)\n activation_size = self.window_size * self._buckets * len(self._periods)\n activation = array_ops.reshape(time_features, [-1, activation_size])\n else:\n activation_size = 0\n activation = None\n\n if self.input_window_size:\n inp = array_ops.slice(values, [0, 0, 0], [-1, self.input_window_size, -1])\n inp_size = self.input_window_size * self.num_features\n inp = array_ops.reshape(inp, [-1, inp_size])\n if activation is not None:\n activation = array_ops.concat([inp, activation], 1)\n else:\n activation = inp\n activation_size += inp_size\n assert activation_size\n activations.append((activation, activation_size))\n # Create hidden layers.\n activations += self._create_hidden_stack(activation, activation_size)\n # Create mean and convariance ops.\n predicted_mean = self._predicted_mean_op(activations)\n predicted_covariance = self._predicted_covariance_op(activations,\n self.num_features)\n return {\"activations\": activations,\n \"mean\": predicted_mean,\n \"covariance\": predicted_covariance}\n\n def loss_op(self, targets, prediction_ops):\n \"\"\"Create loss_op.\"\"\"\n prediction = prediction_ops[\"mean\"]\n if self.loss == ARModel.NORMAL_LIKELIHOOD_LOSS:\n covariance = prediction_ops[\"covariance\"]\n sigma = math_ops.sqrt(gen_math_ops.maximum(covariance, 1e-5))\n normal = distributions.Normal(loc=targets, scale=sigma)\n loss_op = -math_ops.reduce_sum(normal.log_prob(prediction))\n else:\n assert self.loss == ARModel.SQUARED_LOSS, self.loss\n loss_op = math_ops.reduce_sum(math_ops.square(prediction - targets))\n loss_op /= math_ops.cast(\n math_ops.reduce_prod(array_ops.shape(targets)), loss_op.dtype)\n return loss_op\n\n # TODO(allenl, agarwal): Consider better ways of warm-starting predictions.\n def predict(self, features):\n \"\"\"Computes predictions multiple steps into the future.\n\n Args:\n features: A dictionary with the following key/value pairs:\n PredictionFeatures.TIMES: A [batch size, predict window size]\n integer Tensor of times, after the window of data indicated by\n `STATE_TUPLE`, to make predictions for.\n PredictionFeatures.STATE_TUPLE: A tuple of (times, values), times with\n shape [batch size, self.input_window_size], values with shape [batch\n size, self.input_window_size, self.num_features] representing a\n segment of the time series before `TIMES`. This data is used\n to start of the autoregressive computation. This should have data for\n at least self.input_window_size timesteps.\n Returns:\n A dictionary with keys, \"mean\", \"covariance\". The\n values are Tensors of shape [batch_size, predict window size,\n num_features] and correspond to the values passed in `TIMES`.\n \"\"\"\n predict_times = math_ops.cast(\n ops.convert_to_tensor(features[PredictionFeatures.TIMES]), dtypes.int32)\n batch_size = array_ops.shape(predict_times)[0]\n num_predict_values = array_ops.shape(predict_times)[1]\n prediction_iterations = ((num_predict_values + self.output_window_size - 1)\n // self.output_window_size)\n # Pad predict_times so as to have exact multiple of self.output_window_size\n # values per example.\n padding_size = (prediction_iterations * self.output_window_size -\n num_predict_values)\n padding = array_ops.zeros([batch_size, padding_size], predict_times.dtype)\n predict_times = control_flow_ops.cond(\n padding_size > 0, lambda: array_ops.concat([predict_times, padding], 1),\n lambda: predict_times)\n state = features[PredictionFeatures.STATE_TUPLE]\n (state_times, state_values) = state\n state_times = math_ops.cast(\n ops.convert_to_tensor(state_times), dtypes.int32)\n state_values = ops.convert_to_tensor(state_values, dtype=self.dtype)\n\n initial_input_times = predict_times[:, :self.output_window_size]\n if self.input_window_size > 0:\n initial_input_times = array_ops.concat(\n [state_times[:, -self.input_window_size:], initial_input_times], 1)\n values_size = array_ops.shape(state_values)[1]\n times_size = array_ops.shape(state_times)[1]\n with ops.control_dependencies([\n check_ops.assert_greater_equal(values_size, self.input_window_size),\n check_ops.assert_equal(values_size, times_size)\n ]):\n initial_input_values = state_values[:, -self.input_window_size:, :]\n else:\n initial_input_values = 0\n\n # Iterate over the predict_times, predicting self.output_window_size values\n # in each iteration.\n def _while_condition(iteration_number, *unused_args):\n return math_ops.less(iteration_number, prediction_iterations)\n\n def _while_body(iteration_number, input_times, input_values,\n mean_ta, covariance_ta):\n \"\"\"Predict self.output_window_size values.\"\"\"\n prediction_ops = self.prediction_ops(input_times, input_values)\n predicted_mean = prediction_ops[\"mean\"]\n predicted_covariance = prediction_ops[\"covariance\"]\n offset = self.output_window_size * gen_math_ops.minimum(\n iteration_number + 1, prediction_iterations - 1)\n if self.input_window_size > 0:\n if self.output_window_size < self.input_window_size:\n new_input_values = array_ops.concat(\n [input_values[:, self.output_window_size:, :], predicted_mean], 1)\n new_input_times = array_ops.concat([\n input_times[:, self.output_window_size:],\n predict_times[:, offset:offset + self.output_window_size]\n ], 1)\n else:\n new_input_values = predicted_mean[:, -self.input_window_size:, :]\n new_input_times = predict_times[\n :,\n offset - self.input_window_size:offset + self.output_window_size]\n else:\n new_input_values = input_values\n new_input_times = predict_times[:,\n offset:offset + self.output_window_size]\n new_input_times.set_shape(initial_input_times.get_shape())\n new_mean_ta = mean_ta.write(iteration_number, predicted_mean)\n if isinstance(covariance_ta, tensor_array_ops.TensorArray):\n new_covariance_ta = covariance_ta.write(iteration_number,\n predicted_covariance)\n else:\n new_covariance_ta = covariance_ta\n return (iteration_number + 1,\n new_input_times,\n new_input_values,\n new_mean_ta,\n new_covariance_ta)\n\n # Note that control_flow_ops.while_loop doesn't seem happy with None. Hence\n # using 0 for cases where we don't want to predict covariance.\n covariance_ta_init = (tensor_array_ops.TensorArray(\n dtype=self.dtype, size=prediction_iterations)\n if self.loss != ARModel.SQUARED_LOSS else 0.)\n mean_ta_init = tensor_array_ops.TensorArray(\n dtype=self.dtype, size=prediction_iterations)\n _, _, _, mean_ta, covariance_ta = control_flow_ops.while_loop(\n _while_condition, _while_body, [\n 0, initial_input_times, initial_input_values, mean_ta_init,\n covariance_ta_init\n ])\n\n def _parse_ta(values_ta):\n \"\"\"Helper function to parse the returned TensorArrays.\"\"\"\n\n if not isinstance(values_ta, tensor_array_ops.TensorArray):\n return None\n predictions_length = prediction_iterations * self.output_window_size\n # Shape [prediction_iterations, batch_size, self.output_window_size,\n # self.num_features]\n values_packed = values_ta.stack()\n # Transpose to move batch dimension outside.\n output_values = array_ops.reshape(\n array_ops.transpose(values_packed, [1, 0, 2, 3]),\n array_ops.stack([batch_size, predictions_length, -1]))\n # Clip to desired size\n return output_values[:, :num_predict_values, :]\n\n predicted_mean = _parse_ta(mean_ta)\n predicted_covariance = _parse_ta(covariance_ta)\n if predicted_covariance is None:\n predicted_covariance = array_ops.ones_like(predicted_mean)\n\n # Transform and scale the mean and covariance appropriately.\n predicted_mean = self._scale_back_data(predicted_mean)\n predicted_covariance = self._scale_back_variance(predicted_covariance)\n\n return {\"mean\": predicted_mean,\n \"covariance\": predicted_covariance}\n\n def _process_window(self, features, mode):\n \"\"\"Compute model outputs on a single window of data.\"\"\"\n # TODO(agarwal): Use exogenous features\n times = math_ops.cast(features[TrainEvalFeatures.TIMES], dtypes.int64)\n values = math_ops.cast(features[TrainEvalFeatures.VALUES], dtype=self.dtype)\n original_values = values\n\n # Extra shape checking for the window size (above that in\n # `head.create_estimator_spec`).\n expected_times_shape = [None, self.window_size]\n if not times.get_shape().is_compatible_with(expected_times_shape):\n raise ValueError(\n (\"ARModel with input_window_size={input_window_size} \"\n \"and output_window_size={output_window_size} expects \"\n \"feature '{times_feature}' to have shape (batch_size, \"\n \"{window_size}) (for any batch_size), but got shape {times_shape}. \"\n \"If you are using RandomWindowInputFn, set \"\n \"window_size={window_size} or adjust the input_window_size and \"\n \"output_window_size arguments to ARModel.\").format(\n input_window_size=self.input_window_size,\n output_window_size=self.output_window_size,\n times_feature=TrainEvalFeatures.TIMES,\n window_size=self.window_size,\n times_shape=times.get_shape()))\n values = self._scale_data(values)\n if self.input_window_size > 0:\n input_values = values[:, :self.input_window_size, :]\n else:\n input_values = None\n prediction_ops = self.prediction_ops(times, input_values)\n prediction = prediction_ops[\"mean\"]\n covariance = prediction_ops[\"covariance\"]\n targets = array_ops.slice(values, [0, self.input_window_size, 0],\n [-1, -1, -1])\n targets.get_shape().assert_is_compatible_with(prediction.get_shape())\n if (mode == estimator_lib.ModeKeys.EVAL\n and self.loss == ARModel.SQUARED_LOSS):\n # Report an evaluation loss which matches the expected\n # (observed - predicted) ** 2.\n # Note that this affects only evaluation; the training loss is unaffected.\n loss = self.loss_op(\n self._scale_back_data(targets),\n {\"mean\": self._scale_back_data(prediction_ops[\"mean\"])})\n else:\n loss = self.loss_op(targets, prediction_ops)\n\n # Scale back the prediction.\n prediction = self._scale_back_data(prediction)\n covariance = self._scale_back_variance(covariance)\n\n return model.ModelOutputs(\n loss=loss,\n end_state=(times[:, -self.input_window_size:],\n values[:, -self.input_window_size:, :]),\n predictions={\"mean\": prediction, \"covariance\": covariance,\n \"observed\": original_values[:, -self.output_window_size:]},\n prediction_times=times[:, -self.output_window_size:])\n\n def get_batch_loss(self, features, mode, state):\n \"\"\"Computes predictions and a loss.\n\n Args:\n features: A dictionary (such as is produced by a chunker) with the\n following key/value pairs (shapes are given as required for training):\n TrainEvalFeatures.TIMES: A [batch size, self.window_size] integer\n Tensor with times for each observation. To train on longer\n sequences, the data should first be chunked.\n TrainEvalFeatures.VALUES: A [batch size, self.window_size,\n self.num_features] Tensor with values for each observation.\n When evaluating, `TIMES` and `VALUES` must have a window size of at\n least self.window_size, but it may be longer, in which case the last\n window_size - self.input_window_size times (or fewer if this is not\n divisible by self.output_window_size) will be evaluated on with\n non-overlapping output windows (and will have associated\n predictions). This is primarily to support qualitative\n evaluation/plotting, and is not a recommended way to compute evaluation\n losses (since there is no overlap in the output windows, which for\n window-based models is an undesirable bias).\n mode: The tf.estimator.ModeKeys mode to use (TRAIN or EVAL).\n state: Unused\n Returns:\n A model.ModelOutputs object.\n Raises:\n ValueError: If `mode` is not TRAIN or EVAL, or if static shape information\n is incorrect.\n \"\"\"\n features = {feature_name: ops.convert_to_tensor(feature_value)\n for feature_name, feature_value in features.items()}\n if mode == estimator_lib.ModeKeys.TRAIN:\n # For training, we require the window size to be self.window_size as\n # iterating sequentially on larger windows could introduce a bias.\n return self._process_window(features, mode=mode)\n elif mode == estimator_lib.ModeKeys.EVAL:\n # For evaluation, we allow the user to pass in a larger window, in which\n # case we try to cover as much of the window as possible without\n # overlap. Quantitative evaluation is more efficient/correct with fixed\n # windows matching self.window_size (as with training), but this looping\n # allows easy plotting of \"in-sample\" predictions.\n times = features[TrainEvalFeatures.TIMES]\n times.get_shape().assert_has_rank(2)\n static_window_size = times.get_shape()[1].value\n if (static_window_size is not None\n and static_window_size < self.window_size):\n raise ValueError(\n (\"ARModel requires a window of at least input_window_size + \"\n \"output_window_size to evaluate on (input_window_size={}, \"\n \"output_window_size={}, and got shape {} for feature '{}' (batch \"\n \"size, window size)).\").format(\n self.input_window_size, self.output_window_size,\n times.get_shape(), TrainEvalFeatures.TIMES))\n num_iterations = ((array_ops.shape(times)[1] - self.input_window_size)\n // self.output_window_size)\n output_size = num_iterations * self.output_window_size\n # Rather than dealing with overlapping windows of output, discard a bit at\n # the beginning if output windows don't cover evenly.\n crop_length = output_size + self.input_window_size\n features = {feature_name: feature_value[:, -crop_length:]\n for feature_name, feature_value in features.items()}\n # Note that, unlike the ARModel's predict() while_loop and the\n # SequentialTimeSeriesModel while_loop, each iteration here can run in\n # parallel, since we are not feeding predictions or state from previous\n # iterations.\n def _while_condition(iteration_number, loss_ta, mean_ta, covariance_ta):\n del loss_ta, mean_ta, covariance_ta # unused\n return iteration_number < num_iterations\n\n def _while_body(iteration_number, loss_ta, mean_ta, covariance_ta):\n \"\"\"Perform a processing step on a single window of data.\"\"\"\n base_offset = iteration_number * self.output_window_size\n model_outputs = self._process_window(\n features={\n feature_name:\n feature_value[:, base_offset:base_offset + self.window_size]\n for feature_name, feature_value in features.items()},\n mode=mode)\n # This code needs to be updated if new predictions are added in\n # self._process_window\n assert len(model_outputs.predictions) == 3\n assert \"mean\" in model_outputs.predictions\n assert \"covariance\" in model_outputs.predictions\n assert \"observed\" in model_outputs.predictions\n return (iteration_number + 1,\n loss_ta.write(\n iteration_number, model_outputs.loss),\n mean_ta.write(\n iteration_number, model_outputs.predictions[\"mean\"]),\n covariance_ta.write(\n iteration_number, model_outputs.predictions[\"covariance\"]))\n _, loss_ta, mean_ta, covariance_ta = control_flow_ops.while_loop(\n _while_condition, _while_body,\n [0,\n tensor_array_ops.TensorArray(dtype=self.dtype, size=num_iterations),\n tensor_array_ops.TensorArray(dtype=self.dtype, size=num_iterations),\n tensor_array_ops.TensorArray(dtype=self.dtype, size=num_iterations)])\n values = math_ops.cast(features[TrainEvalFeatures.VALUES],\n dtype=self.dtype)\n batch_size = array_ops.shape(times)[0]\n prediction_shape = [batch_size, self.output_window_size * num_iterations,\n self.num_features]\n previous_state_times, previous_state_values = state\n # Make sure returned state always has windows of self.input_window_size,\n # even if we were passed fewer than self.input_window_size points this\n # time.\n if self.input_window_size > 0:\n new_state_times = array_ops.concat(\n [previous_state_times,\n math_ops.cast(times, dtype=dtypes.int64)],\n axis=1)[:, -self.input_window_size:]\n new_state_times.set_shape((None, self.input_window_size))\n new_state_values = array_ops.concat(\n [previous_state_values,\n self._scale_data(values)], axis=1)[:, -self.input_window_size:, :]\n new_state_values.set_shape((None, self.input_window_size,\n self.num_features))\n else:\n # There is no state to keep, and the strided slices above do not handle\n # input_window_size=0.\n new_state_times = previous_state_times\n new_state_values = previous_state_values\n return model.ModelOutputs(\n loss=math_ops.reduce_mean(loss_ta.stack(), axis=0),\n end_state=(new_state_times, new_state_values),\n predictions={\n \"mean\": array_ops.reshape(\n array_ops.transpose(mean_ta.stack(), [1, 0, 2, 3]),\n prediction_shape),\n \"covariance\": array_ops.reshape(\n array_ops.transpose(covariance_ta.stack(), [1, 0, 2, 3]),\n prediction_shape),\n \"observed\": values[:, -output_size:]},\n prediction_times=times[:, -output_size:])\n else:\n raise ValueError(\n \"Unknown mode '{}' passed to get_batch_loss.\".format(mode))\n\n def _compute_time_features(self, time):\n \"\"\"Compute some features on the time value.\"\"\"\n batch_size = array_ops.shape(time)[0]\n num_periods = len(self._periods)\n # Reshape to 3D.\n periods = constant_op.constant(\n self._periods, shape=[1, 1, num_periods, 1], dtype=time.dtype)\n time = array_ops.reshape(time, [batch_size, -1, 1, 1])\n window_offset = time / self._periods\n # Cast to appropriate type and scale to [0, 1) range\n mod = (math_ops.cast(time % periods, self.dtype) * self._buckets /\n math_ops.cast(periods, self.dtype))\n # Bucketize based on some fixed width intervals. For a value t and interval\n # [a, b), we return (t - a) if a <= t < b, else 0.\n intervals = array_ops.reshape(\n math_ops.range(self._buckets, dtype=self.dtype),\n [1, 1, 1, self._buckets])\n mod = nn_ops.relu(mod - intervals)\n mod = array_ops.where(mod < 1.0, mod, array_ops.zeros_like(mod))\n return window_offset, mod\n\n\nclass AnomalyMixtureARModel(ARModel):\n \"\"\"Model data as a mixture of normal and anomaly distributions.\n\n Note that this model works by changing the loss function to reduce the penalty\n when predicting an anomalous target. However the predictions are still based\n on anomalous input features, and this may affect the quality of fit. One\n possible solution is to downweight/filter anomalous inputs, but that requires\n more sequential processing instead of completely random windows.\n \"\"\"\n\n GAUSSIAN_ANOMALY = \"gaussian\"\n CAUCHY_ANOMALY = \"cauchy\"\n\n def __init__(self,\n periodicities,\n anomaly_prior_probability,\n input_window_size,\n output_window_size,\n num_features,\n anomaly_distribution=GAUSSIAN_ANOMALY,\n num_time_buckets=10,\n hidden_layer_sizes=None):\n assert (anomaly_prior_probability < 1.0 and\n anomaly_prior_probability > 0.0)\n self._anomaly_prior_probability = anomaly_prior_probability\n assert anomaly_distribution in [\n AnomalyMixtureARModel.GAUSSIAN_ANOMALY,\n AnomalyMixtureARModel.CAUCHY_ANOMALY]\n self._anomaly_distribution = anomaly_distribution\n super(AnomalyMixtureARModel, self).__init__(\n periodicities=periodicities,\n num_features=num_features,\n num_time_buckets=num_time_buckets,\n input_window_size=input_window_size,\n output_window_size=output_window_size,\n loss=ARModel.NORMAL_LIKELIHOOD_LOSS,\n hidden_layer_sizes=hidden_layer_sizes)\n\n def _create_anomaly_ops(self, times, values, prediction_ops_dict):\n anomaly_log_param = variable_scope.get_variable(\n \"anomaly_log_param\",\n shape=[],\n dtype=self.dtype,\n initializer=init_ops.zeros_initializer())\n # Anomaly param is the variance for Gaussian and scale for Cauchy\n # distribution.\n prediction_ops_dict[\"anomaly_params\"] = gen_math_ops.exp(anomaly_log_param)\n\n def prediction_ops(self, times, values):\n prediction_ops_dict = super(AnomalyMixtureARModel, self).prediction_ops(\n times, values)\n self._create_anomaly_ops(times, values, prediction_ops_dict)\n return prediction_ops_dict\n\n def _anomaly_log_prob(self, targets, prediction_ops):\n prediction = prediction_ops[\"mean\"]\n if self._anomaly_distribution == AnomalyMixtureARModel.GAUSSIAN_ANOMALY:\n anomaly_variance = prediction_ops[\"anomaly_params\"]\n anomaly_sigma = math_ops.sqrt(\n gen_math_ops.maximum(anomaly_variance, 1e-5))\n normal = distributions.Normal(loc=targets, scale=anomaly_sigma)\n log_prob = normal.log_prob(prediction)\n else:\n assert self._anomaly_distribution == AnomalyMixtureARModel.CAUCHY_ANOMALY\n anomaly_scale = prediction_ops[\"anomaly_params\"]\n cauchy = distributions.StudentT(\n df=array_ops.ones([], dtype=anomaly_scale.dtype),\n loc=targets,\n scale=anomaly_scale)\n log_prob = cauchy.log_prob(prediction)\n return log_prob\n\n def loss_op(self, targets, prediction_ops):\n \"\"\"Create loss_op.\"\"\"\n prediction = prediction_ops[\"mean\"]\n covariance = prediction_ops[\"covariance\"]\n # Normal data log probability.\n sigma = math_ops.sqrt(gen_math_ops.maximum(covariance, 1e-5))\n normal1 = distributions.Normal(loc=targets, scale=sigma)\n log_prob1 = normal1.log_prob(prediction)\n log_prob1 += math_ops.log(1 - self._anomaly_prior_probability)\n # Anomaly log probability.\n log_prob2 = self._anomaly_log_prob(targets, prediction_ops)\n log_prob2 += math_ops.log(self._anomaly_prior_probability)\n # We need to compute log(exp(log_prob1) + exp(log_prob2). For numerical\n # stability, we rewrite the expression as below.\n p1 = gen_math_ops.minimum(log_prob1, log_prob2)\n p2 = gen_math_ops.maximum(log_prob1, log_prob2)\n mixed_log_prob = p2 + math_ops.log(1 + gen_math_ops.exp(p1 - p2))\n loss_op = -math_ops.reduce_sum(mixed_log_prob)\n loss_op /= math_ops.cast(\n math_ops.reduce_prod(array_ops.shape(targets)), self.dtype)\n return loss_op\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Python wrappers for Datasets and Iterators.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.data.python.ops import batching\nfrom tensorflow.contrib.data.python.ops import enumerate_ops\nfrom tensorflow.contrib.data.python.ops import error_ops\nfrom tensorflow.contrib.data.python.ops import grouping\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.ops import gen_dataset_ops\nfrom tensorflow.python.ops import gen_io_ops\nfrom tensorflow.python.util import deprecation\n\n\nclass Dataset(dataset_ops.Dataset):\n \"\"\"Represents a potentially large set of elements.\n\n A `Dataset` can be used to represent an input pipeline as a\n collection of elements (nested structures of tensors) and a \"logical\n plan\" of transformations that act on those elements.\n \"\"\"\n\n def __init__(self, dataset):\n super(Dataset, self).__init__()\n self._dataset = dataset\n\n @deprecation.deprecated(None, \"Use `ds._as_variant_tensor()`.\")\n def make_dataset_resource(self):\n return self._as_variant_tensor()\n\n def _as_variant_tensor(self):\n return self._dataset._as_variant_tensor() # pylint: disable=protected-access\n\n @property\n def output_classes(self):\n return self._dataset.output_classes\n\n @property\n def output_shapes(self):\n return self._dataset.output_shapes\n\n @property\n def output_types(self):\n return self._dataset.output_types\n\n @staticmethod\n @deprecation.deprecated(None, \"Use `tf.data.Dataset.from_tensors()`.\")\n def from_tensors(tensors):\n \"\"\"Creates a `Dataset` with a single element, comprising the given tensors.\n\n Args:\n tensors: A nested structure of tensors.\n\n Returns:\n A `Dataset`.\n \"\"\"\n return Dataset(dataset_ops.TensorDataset(tensors))\n\n @staticmethod\n @deprecation.deprecated(None, \"Use `tf.data.Dataset.from_tensor_slices()`.\")\n def from_tensor_slices(tensors):\n \"\"\"Creates a `Dataset` whose elements are slices of the given tensors.\n\n Args:\n tensors: A nested structure of tensors, each having the same size in the\n 0th dimension.\n\n Returns:\n A `Dataset`.\n \"\"\"\n return Dataset(dataset_ops.TensorSliceDataset(tensors))\n\n @staticmethod\n @deprecation.deprecated(None,\n \"Use `tf.data.Dataset.from_sparse_tensor_slices()`.\")\n def from_sparse_tensor_slices(sparse_tensor):\n \"\"\"Splits each rank-N `tf.SparseTensor` in this dataset row-wise.\n\n Args:\n sparse_tensor: A `tf.SparseTensor`.\n\n Returns:\n A `Dataset` of rank-(N-1) sparse tensors.\n \"\"\"\n return Dataset(dataset_ops.SparseTensorSliceDataset(sparse_tensor))\n\n @staticmethod\n @deprecation.deprecated(None, \"Use `tf.data.Dataset.from_generator()`.\")\n def from_generator(generator, output_types, output_shapes=None):\n \"\"\"Creates a `Dataset` whose elements are generated by `generator`.\n\n The `generator` argument must be a callable object that returns\n an object that support the `iter()` protocol (e.g. a generator function).\n The elements generated by `generator` must be compatible with the given\n `output_types` and (optional) `output_shapes` arguments.\n\n For example:\n\n ```python\n import itertools\n\n def gen():\n for i in itertools.count(1):\n yield (i, [1] * i)\n\n ds = Dataset.from_generator(\n gen, (tf.int64, tf.int64), (tf.TensorShape([]), tf.TensorShape([None])))\n value = ds.make_one_shot_iterator().get_next()\n\n sess.run(value) # (1, array([1]))\n sess.run(value) # (2, array([1, 1]))\n ```\n\n Args:\n generator: A callable object that takes no arguments and returns an\n object that supports the `iter()` protocol.\n output_types: A nested structure of `tf.DType` objects corresponding to\n each component of an element yielded by `generator`.\n output_shapes: (Optional.) A nested structure of `tf.TensorShape`\n objects corresponding to each component of an element yielded by\n `generator`.\n\n Returns:\n A `Dataset`.\n \"\"\"\n return Dataset(dataset_ops.Dataset.from_generator(\n generator, output_types, output_shapes))\n\n @staticmethod\n @deprecation.deprecated(None, \"Use `tf.data.Dataset.range()`.\")\n def range(*args):\n \"\"\"Creates a `Dataset` of a step-separated range of values.\n\n For example:\n\n ```python\n Dataset.range(5) == [0, 1, 2, 3, 4]\n Dataset.range(2, 5) == [2, 3, 4]\n Dataset.range(1, 5, 2) == [1, 3]\n Dataset.range(1, 5, -2) == []\n Dataset.range(5, 1) == []\n Dataset.range(5, 1, -2) == [5, 3]\n ```\n\n Args:\n *args: follow same semantics as python's xrange.\n len(args) == 1 -> start = 0, stop = args[0], step = 1\n len(args) == 2 -> start = args[0], stop = args[1], step = 1\n len(args) == 3 -> start = args[0], stop = args[1, stop = args[2]\n\n Returns:\n A `RangeDataset`.\n\n Raises:\n ValueError: if len(args) == 0.\n \"\"\"\n return Dataset(dataset_ops.RangeDataset(*args))\n\n @staticmethod\n @deprecation.deprecated(None, \"Use `tf.data.Dataset.zip()`.\")\n def zip(datasets):\n \"\"\"Creates a `Dataset` by zipping together the given datasets.\n\n This method has similar semantics to the built-in `zip()` function\n in Python, with the main difference being that the `datasets`\n argument can be an arbitrary nested structure of `Dataset` objects.\n For example:\n\n ```python\n # NOTE: The following examples use `{ ... }` to represent the\n # contents of a dataset.\n a = { 1, 2, 3 }\n b = { 4, 5, 6 }\n c = { (7, 8), (9, 10), (11, 12) }\n d = { 13, 14 }\n\n # The nested structure of the `datasets` argument determines the\n # structure of elements in the resulting dataset.\n Dataset.zip((a, b)) == { (1, 4), (2, 5), (3, 6) }\n Dataset.zip((b, a)) == { (4, 1), (5, 2), (6, 3) }\n\n # The `datasets` argument may contain an arbitrary number of\n # datasets.\n Dataset.zip((a, b, c)) == { (1, 4, (7, 8)),\n (2, 5, (9, 10)),\n (3, 6, (11, 12)) }\n\n # The number of elements in the resulting dataset is the same as\n # the size of the smallest dataset in `datasets`.\n Dataset.zip((a, d)) == { (1, 13), (2, 14) }\n ```\n\n Args:\n datasets: A nested structure of datasets.\n\n Returns:\n A `Dataset`.\n \"\"\"\n return Dataset(dataset_ops.ZipDataset(datasets))\n\n def concatenate(self, dataset):\n \"\"\"Creates a `Dataset` by concatenating given dataset with this dataset.\n\n ```python\n # NOTE: The following examples use `{ ... }` to represent the\n # contents of a dataset.\n a = { 1, 2, 3 }\n b = { 4, 5, 6, 7 }\n\n # Input dataset and dataset to be concatenated should have same\n # nested structures and output types.\n # c = { (8, 9), (10, 11), (12, 13) }\n # d = { 14.0, 15.0, 16.0 }\n # a.concatenate(c) and a.concatenate(d) would result in error.\n\n a.concatenate(b) == { 1, 2, 3, 4, 5, 6, 7 }\n ```\n\n Args:\n dataset: `Dataset` to be concatenated.\n\n Returns:\n A `Dataset`.\n \"\"\"\n return Dataset(dataset_ops.ConcatenateDataset(self._dataset, dataset))\n\n def prefetch(self, buffer_size):\n \"\"\"Creates a `Dataset` that prefetches elements from this dataset.\n\n Args:\n buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the\n maximum number elements that will be buffered when prefetching.\n\n Returns:\n A `Dataset`.\n \"\"\"\n return Dataset(dataset_ops.PrefetchDataset(self._dataset, buffer_size))\n\n @staticmethod\n @deprecation.deprecated(None, \"Use `tf.data.Dataset.list_files()`.\")\n def list_files(file_pattern):\n \"\"\"A dataset of all files matching a pattern.\n\n Example:\n If we had the following files on our filesystem:\n - /path/to/dir/a.txt\n - /path/to/dir/b.py\n - /path/to/dir/c.py\n If we pass \"/path/to/dir/*.py\" as the directory, the dataset would\n produce:\n - /path/to/dir/b.py\n - /path/to/dir/c.py\n\n Args:\n file_pattern: A string or scalar string `tf.Tensor`, representing\n the filename pattern that will be matched.\n\n Returns:\n A `Dataset` of strings corresponding to file names.\n \"\"\"\n return Dataset.from_tensor_slices(gen_io_ops.matching_files(file_pattern))\n\n def repeat(self, count=None):\n \"\"\"Repeats this dataset `count` times.\n\n Args:\n count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the\n number of times the elements of this dataset should be repeated. The\n default behavior (if `count` is `None` or `-1`) is for the elements to\n be repeated indefinitely.\n\n Returns:\n A `Dataset`.\n \"\"\"\n return Dataset(dataset_ops.RepeatDataset(self._dataset, count))\n\n @deprecation.deprecated(\n None, \"Use `ds.apply(tf.contrib.data.enumerate_dataset())`.\")\n def enumerate(self, start=0):\n \"\"\"Deprecated: Use `Dataset.apply(tf.contrib.data.enumerate_dataset(..)`.\"\"\"\n\n return self.apply(enumerate_ops.enumerate_dataset(start))\n\n def shuffle(self, buffer_size, seed=None):\n \"\"\"Randomly shuffles the elements of this dataset.\n\n Args:\n buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the\n number of elements from this dataset from which the new\n dataset will sample.\n seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the\n random seed that will be used to create the distribution. See\n @{tf.set_random_seed} for behavior.\n\n Returns:\n A `Dataset`.\n \"\"\"\n return Dataset(dataset_ops.ShuffleDataset(self._dataset, buffer_size, seed))\n\n def cache(self, filename=\"\"):\n \"\"\"Caches the elements in this dataset.\n\n Args:\n filename: A `tf.string` scalar `tf.Tensor`, representing the name of a\n directory on the filesystem to use for caching tensors in this Dataset.\n If a filename is not provided, the dataset will be cached in memory.\n\n Returns:\n A `Dataset`.\n \"\"\"\n return Dataset(dataset_ops.CacheDataset(self._dataset, filename))\n\n def take(self, count):\n \"\"\"Creates a `Dataset` with at most `count` elements from this dataset.\n\n Args:\n count: A `tf.int64` scalar `tf.Tensor`, representing the number of\n elements of this dataset that should be taken to form the new dataset.\n If `count` is -1, or if `count` is greater than the size of this\n dataset, the new dataset will contain all elements of this dataset.\n\n Returns:\n A `Dataset`.\n \"\"\"\n return Dataset(dataset_ops.TakeDataset(self._dataset, count))\n\n def skip(self, count):\n \"\"\"Creates a `Dataset` that skips `count` elements from this dataset.\n\n Args:\n count: A `tf.int64` scalar `tf.Tensor`, representing the number\n of elements of this dataset that should be skipped to form the\n new dataset. If `count` is greater than the size of this\n dataset, the new dataset will contain no elements. If `count`\n is -1, skips the entire dataset.\n\n Returns:\n A `Dataset`.\n \"\"\"\n return Dataset(dataset_ops.SkipDataset(self._dataset, count))\n\n def shard(self, num_shards, index):\n \"\"\"Creates a `Dataset` that includes only 1/`num_shards` of this dataset.\n\n This dataset operator is very useful when running distributed training, as\n it allows each worker to read a unique subset.\n\n When reading a single input file, you can skip elements as follows:\n\n ```python\n d = tf.data.TFRecordDataset(FLAGS.input_file)\n d = d.shard(FLAGS.num_workers, FLAGS.worker_index)\n d = d.repeat(FLAGS.num_epochs)\n d = d.shuffle(FLAGS.shuffle_buffer_size)\n d = d.map(parser_fn, num_parallel_calls=FLAGS.num_map_threads)\n ```\n\n Important caveats:\n\n - Be sure to shard before you use any randomizing operator (such as\n shuffle).\n - Generally it is best if the shard operator is used early in the dataset\n pipeline. For example, when reading from a set of TFRecord files, shard\n before converting the dataset to input samples. This avoids reading every\n file on every worker. The following is an example of an efficient\n sharding strategy within a complete pipeline:\n\n ```python\n d = tf.data.Dataset.list_files(FLAGS.pattern)\n d = d.shard(FLAGS.num_workers, FLAGS.worker_index)\n d = d.repeat(FLAGS.num_epochs)\n d = d.shuffle(FLAGS.shuffle_buffer_size)\n d = d.interleave(tf.data.TFRecordDataset,\n cycle_length=FLAGS.num_readers, block_length=1)\n d = d.map(parser_fn, num_parallel_calls=FLAGS.num_map_threads)\n ```\n\n Args:\n num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of\n shards operating in parallel.\n index: A `tf.int64` scalar `tf.Tensor`, representing the worker index.\n\n Returns:\n A `Dataset`.\n\n Raises:\n ValueError: if `num_shards` or `index` are illegal values. Note: error\n checking is done on a best-effort basis, and aren't guaranteed to be\n caught upon dataset creation. (e.g. providing in a placeholder tensor\n bypasses the early checking, and will instead result in an error during\n a session.run call.)\n \"\"\"\n return Dataset(self._dataset.shard(num_shards, index))\n\n @deprecation.deprecated(\n None, \"Use `ds.apply(tf.contrib.data.ignore_errors())`.\")\n def ignore_errors(self):\n \"\"\"Deprecated: Use `Dataset.apply(tf.contrib.data.ignore_errors())`.\"\"\"\n\n return self.apply(error_ops.ignore_errors())\n\n def batch(self, batch_size):\n \"\"\"Combines consecutive elements of this dataset into batches.\n\n Args:\n batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of\n consecutive elements of this dataset to combine in a single batch.\n\n Returns:\n A `Dataset`.\n \"\"\"\n return Dataset(dataset_ops.BatchDataset(self._dataset, batch_size))\n\n def padded_batch(self, batch_size, padded_shapes, padding_values=None):\n \"\"\"Combines consecutive elements of this dataset into padded batches.\n\n Like `Dataset.dense_to_sparse_batch()`, this method combines\n multiple consecutive elements of this dataset, which might have\n different shapes, into a single element. The tensors in the\n resulting element have an additional outer dimension, and are\n padded to the respective shape in `padded_shapes`.\n\n Args:\n batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of\n consecutive elements of this dataset to combine in a single batch.\n padded_shapes: A nested structure of `tf.TensorShape` or\n `tf.int64` vector tensor-like objects representing the shape\n to which the respective component of each input element should\n be padded prior to batching. Any unknown dimensions\n (e.g. `tf.Dimension(None)` in a `tf.TensorShape` or `-1` in a\n tensor-like object) will be padded to the maximum size of that\n dimension in each batch.\n padding_values: (Optional.) A nested structure of scalar-shaped\n `tf.Tensor`, representing the padding values to use for the\n respective components. Defaults are `0` for numeric types and\n the empty string for string types.\n\n Returns:\n A `Dataset`.\n \"\"\"\n return Dataset(\n dataset_ops.PaddedBatchDataset(self._dataset, batch_size, padded_shapes,\n padding_values))\n\n @deprecation.deprecated(\n None, \"Use `ds.apply(tf.contrib.data.dense_to_sparse_batch())`.\")\n def dense_to_sparse_batch(self, batch_size, row_shape):\n \"\"\"Use: `Dataset.apply(tf.contrib.data.dense_to_sparse_batch(...))`.\"\"\"\n\n return self.apply(batching.dense_to_sparse_batch(batch_size, row_shape))\n\n @deprecation.deprecated(\n None, \"Use `ds.apply(tf.contrib.data.group_by_window())`.\")\n def group_by_window(self, key_func, reduce_func, window_size):\n \"\"\"Deprecated: Use `Dataset.apply(tf.contrib.data.group_by_window(...))`.\"\"\"\n\n return self.apply(\n grouping.group_by_window(key_func, reduce_func, window_size))\n\n @deprecation.deprecated_args(\n None,\n \"Replace `num_threads=T` with `num_parallel_calls=T`. Replace \"\n \"`output_buffer_size=N` with `ds.prefetch(N)` on the returned dataset.\",\n \"num_threads\", \"output_buffer_size\")\n def map(self,\n map_func,\n num_threads=None,\n output_buffer_size=None,\n num_parallel_calls=None):\n \"\"\"Maps `map_func` across this dataset.\n\n Args:\n map_func: A function mapping a nested structure of tensors (having\n shapes and types defined by `self.output_shapes` and\n `self.output_types`) to another nested structure of tensors.\n num_threads: (Optional.) Deprecated, use `num_parallel_calls` instead.\n output_buffer_size: (Optional.) A `tf.int64` scalar `tf.Tensor`,\n representing the maximum number of processed elements that will be\n buffered.\n num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,\n representing the number elements to process in parallel. If not\n specified, elements will be processed sequentially.\n\n Returns:\n A `Dataset`.\n \"\"\"\n if num_threads is None and num_parallel_calls is None:\n ret = Dataset(dataset_ops.MapDataset(self._dataset, map_func))\n else:\n if num_threads is None:\n ret = Dataset(\n dataset_ops.ParallelMapDataset(self._dataset, map_func,\n num_parallel_calls))\n else:\n ret = Dataset(\n dataset_ops.ParallelMapDataset(self._dataset, map_func,\n num_threads))\n if output_buffer_size is not None:\n ret = ret.prefetch(output_buffer_size)\n return ret\n\n def flat_map(self, map_func):\n \"\"\"Maps `map_func` across this dataset and flattens the result.\n\n Args:\n map_func: A function mapping a nested structure of tensors (having shapes\n and types defined by `self.output_shapes` and `self.output_types`) to a\n `Dataset`.\n\n Returns:\n A `Dataset`.\n \"\"\"\n return Dataset(dataset_ops.FlatMapDataset(self._dataset, map_func))\n\n def interleave(self, map_func, cycle_length, block_length=1):\n \"\"\"Maps `map_func` across this dataset, and interleaves the results.\n\n For example, you can use `Dataset.interleave()` to process many input files\n concurrently:\n\n ```python\n # Preprocess 4 files concurrently, and interleave blocks of 16 records from\n # each file.\n filenames = [\"/var/data/file1.txt\", \"/var/data/file2.txt\", ...]\n dataset = (Dataset.from_tensor_slices(filenames)\n .interleave(lambda x:\n TextLineDataset(x).map(parse_fn, num_parallel_calls=1),\n cycle_length=4, block_length=16))\n ```\n\n The `cycle_length` and `block_length` arguments control the order in which\n elements are produced. `cycle_length` controls the number of input elements\n that are processed concurrently. If you set `cycle_length` to 1, this\n transformation will handle one input element at a time, and will produce\n identical results = to @{tf.data.Dataset.flat_map}. In general,\n this transformation will apply `map_func` to `cycle_length` input elements,\n open iterators on the returned `Dataset` objects, and cycle through them\n producing `block_length` consecutive elements from each iterator, and\n consuming the next input element each time it reaches the end of an\n iterator.\n\n For example:\n\n ```python\n # NOTE: The following examples use `{ ... }` to represent the\n # contents of a dataset.\n a = { 1, 2, 3, 4, 5 }\n\n # NOTE: New lines indicate \"block\" boundaries.\n a.interleave(lambda x: Dataset.from_tensors(x).repeat(6),\n cycle_length=2, block_length=4) == {\n 1, 1, 1, 1,\n 2, 2, 2, 2,\n 1, 1,\n 2, 2,\n 3, 3, 3, 3,\n 4, 4, 4, 4,\n 3, 3,\n 4, 4,\n 5, 5, 5, 5,\n 5, 5,\n }\n ```\n\n NOTE: The order of elements yielded by this transformation is\n deterministic, as long as `map_func` is a pure function. If\n `map_func` contains any stateful operations, the order in which\n that state is accessed is undefined.\n\n Args:\n map_func: A function mapping a nested structure of tensors (having shapes\n and types defined by `self.output_shapes` and `self.output_types`) to a\n `Dataset`.\n cycle_length: The number of elements from this dataset that will be\n processed concurrently.\n block_length: The number of consecutive elements to produce from each\n input element before cycling to another input element.\n\n Returns:\n A `Dataset`.\n \"\"\"\n return Dataset(\n dataset_ops.InterleaveDataset(self._dataset, map_func, cycle_length,\n block_length))\n\n @deprecation.deprecated(None, \"Use `ds.apply(tf.contrib.data.unbatch())`.\")\n def unbatch(self):\n \"\"\"Deprecated: Use `Dataset.apply(tf.contrib.data.unbatch()`.\"\"\"\n\n return self.apply(batching.unbatch())\n\n def filter(self, predicate):\n \"\"\"Filters this dataset according to `predicate`.\n\n Args:\n predicate: A function mapping a nested structure of tensors (having shapes\n and types defined by `self.output_shapes` and `self.output_types`) to a\n scalar `tf.bool` tensor.\n\n Returns:\n A `Dataset`.\n \"\"\"\n return Dataset(dataset_ops.FilterDataset(self._dataset, predicate))\n\n def apply(self, transformation_func):\n \"\"\"Apply a transformation function to this dataset.\n\n `apply` enables chaining of custom `Dataset` transformations, which are\n represented as functions that take one `Dataset` argument and return a\n transformed `Dataset`.\n\n For example:\n\n ```\n dataset = (dataset.map(lambda x: x ** 2)\n .(group_by_window(key_func, reduce_func, window_size))\n .map(lambda x: x ** 3))\n ```\n\n Args:\n transformation_func: A function that takes one `Dataset` argument and\n returns a `Dataset`.\n\n Returns:\n The `Dataset` returned by applying `transformation_func` to this dataset.\n \"\"\"\n dataset = transformation_func(self)\n if not isinstance(dataset, dataset_ops.Dataset):\n raise TypeError(\"`transformation_func` must return a Dataset.\")\n return Dataset(dataset)\n\n\ndef get_single_element(dataset):\n \"\"\"Returns the single element in `dataset` as a nested structure of tensors.\n\n This function enables you to use a @{tf.data.Dataset} in a stateless\n \"tensor-in tensor-out\" expression, without creating a @{tf.data.Iterator}.\n This can be useful when your preprocessing transformations are expressed\n as a `Dataset`, and you want to use the transformation at serving time.\n For example:\n\n ```python\n input_batch = tf.placeholder(tf.string, shape=[BATCH_SIZE])\n\n def preprocessing_fn(input_str):\n # ...\n return image, label\n\n dataset = (tf.data.Dataset.from_tensor_slices(input_batch)\n .map(preprocessing_fn, num_parallel_calls=BATCH_SIZE)\n .batch(BATCH_SIZE))\n\n image_batch, label_batch = tf.contrib.data.get_single_element(dataset)\n ```\n\n Args:\n dataset: A @{tf.data.Dataset} object containing a single element.\n\n Returns:\n A nested structure of @{tf.Tensor} objects, corresponding to the single\n element of `dataset`.\n\n Raises:\n TypeError: if `dataset` is not a `tf.data.Dataset` object.\n InvalidArgumentError (at runtime): if `dataset` does not contain exactly\n one element.\n \"\"\"\n if not isinstance(dataset, dataset_ops.Dataset):\n raise TypeError(\"`dataset` must be a `tf.data.Dataset` object.\")\n return nest.pack_sequence_as(\n dataset.output_types,\n gen_dataset_ops.dataset_to_single_element(\n dataset._as_variant_tensor(), # pylint: disable=protected-access\n output_types=nest.flatten(dataset.output_types),\n output_shapes=nest.flatten(dataset.output_shapes)))\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Bijector unit-test utilities.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.distributions import uniform as uniform_lib\n\n\ndef assert_finite(array):\n if not np.isfinite(array).all():\n raise AssertionError(\"array was not all finite. %s\" % array[:15])\n\n\ndef assert_strictly_increasing(array):\n np.testing.assert_array_less(0., np.diff(array))\n\n\ndef assert_strictly_decreasing(array):\n np.testing.assert_array_less(np.diff(array), 0.)\n\n\ndef assert_strictly_monotonic(array):\n if array[0] < array[-1]:\n assert_strictly_increasing(array)\n else:\n assert_strictly_decreasing(array)\n\n\ndef assert_scalar_congruency(bijector,\n lower_x,\n upper_x,\n n=int(10e3),\n rtol=0.01,\n sess=None):\n \"\"\"Assert `bijector`'s forward/inverse/inverse_log_det_jacobian are congruent.\n\n We draw samples `X ~ U(lower_x, upper_x)`, then feed these through the\n `bijector` in order to check that:\n\n 1. the forward is strictly monotonic.\n 2. the forward/inverse methods are inverses of each other.\n 3. the jacobian is the correct change of measure.\n\n This can only be used for a Bijector mapping open subsets of the real line\n to themselves. This is due to the fact that this test compares the `prob`\n before/after transformation with the Lebesgue measure on the line.\n\n Args:\n bijector: Instance of Bijector\n lower_x: Python scalar.\n upper_x: Python scalar. Must have `lower_x < upper_x`, and both must be in\n the domain of the `bijector`. The `bijector` should probably not produce\n huge variation in values in the interval `(lower_x, upper_x)`, or else\n the variance based check of the Jacobian will require small `rtol` or\n huge `n`.\n n: Number of samples to draw for the checks.\n rtol: Positive number. Used for the Jacobian check.\n sess: `tf.Session`. Defaults to the default session.\n\n Raises:\n AssertionError: If tests fail.\n \"\"\"\n\n # Checks and defaults.\n assert bijector.event_ndims.eval() == 0\n if sess is None:\n sess = ops.get_default_session()\n\n # Should be monotonic over this interval\n ten_x_pts = np.linspace(lower_x, upper_x, num=10).astype(np.float32)\n if bijector.dtype is not None:\n ten_x_pts = ten_x_pts.astype(bijector.dtype.as_numpy_dtype)\n forward_on_10_pts = bijector.forward(ten_x_pts)\n\n # Set the lower/upper limits in the range of the bijector.\n lower_y, upper_y = sess.run(\n [bijector.forward(lower_x), bijector.forward(upper_x)])\n if upper_y < lower_y: # If bijector.forward is a decreasing function.\n lower_y, upper_y = upper_y, lower_y\n\n # Uniform samples from the domain, range.\n uniform_x_samps = uniform_lib.Uniform(\n low=lower_x, high=upper_x).sample(n, seed=0)\n uniform_y_samps = uniform_lib.Uniform(\n low=lower_y, high=upper_y).sample(n, seed=1)\n\n # These compositions should be the identity.\n inverse_forward_x = bijector.inverse(bijector.forward(uniform_x_samps))\n forward_inverse_y = bijector.forward(bijector.inverse(uniform_y_samps))\n\n # For a < b, and transformation y = y(x),\n # (b - a) = \\int_a^b dx = \\int_{y(a)}^{y(b)} |dx/dy| dy\n # \"change_measure_dy_dx\" below is a Monte Carlo approximation to the right\n # hand side, which should then be close to the left, which is (b - a).\n dy_dx = math_ops.exp(bijector.inverse_log_det_jacobian(uniform_y_samps))\n # E[|dx/dy|] under Uniform[lower_y, upper_y]\n # = \\int_{y(a)}^{y(b)} |dx/dy| dP(u), where dP(u) is the uniform measure\n expectation_of_dy_dx_under_uniform = math_ops.reduce_mean(dy_dx)\n # dy = dP(u) * (upper_y - lower_y)\n change_measure_dy_dx = (\n (upper_y - lower_y) * expectation_of_dy_dx_under_uniform)\n\n # We'll also check that dy_dx = 1 / dx_dy.\n dx_dy = math_ops.exp(\n bijector.forward_log_det_jacobian(bijector.inverse(uniform_y_samps)))\n\n [\n forward_on_10_pts_v,\n dy_dx_v,\n dx_dy_v,\n change_measure_dy_dx_v,\n uniform_x_samps_v,\n uniform_y_samps_v,\n inverse_forward_x_v,\n forward_inverse_y_v,\n ] = sess.run([\n forward_on_10_pts,\n dy_dx,\n dx_dy,\n change_measure_dy_dx,\n uniform_x_samps,\n uniform_y_samps,\n inverse_forward_x,\n forward_inverse_y,\n ])\n\n assert_strictly_monotonic(forward_on_10_pts_v)\n # Composition of forward/inverse should be the identity.\n np.testing.assert_allclose(\n inverse_forward_x_v, uniform_x_samps_v, atol=1e-5, rtol=1e-3)\n np.testing.assert_allclose(\n forward_inverse_y_v, uniform_y_samps_v, atol=1e-5, rtol=1e-3)\n # Change of measure should be correct.\n np.testing.assert_allclose(\n upper_x - lower_x, change_measure_dy_dx_v, atol=0, rtol=rtol)\n # Inverse Jacobian should be equivalent to the reciprocal of the forward\n # Jacobian.\n np.testing.assert_allclose(\n dy_dx_v, np.divide(1., dx_dy_v), atol=1e-5, rtol=1e-3)\n\n\ndef assert_bijective_and_finite(bijector, x, y, atol=0, rtol=1e-5, sess=None):\n \"\"\"Assert that forward/inverse (along with jacobians) are inverses and finite.\n\n It is recommended to use x and y values that are very very close to the edge\n of the Bijector's domain.\n\n Args:\n bijector: A Bijector instance.\n x: np.array of values in the domain of bijector.forward.\n y: np.array of values in the domain of bijector.inverse.\n atol: Absolute tolerance.\n rtol: Relative tolerance.\n sess: TensorFlow session. Defaults to the default session.\n\n Raises:\n AssertionError: If tests fail.\n \"\"\"\n sess = sess or ops.get_default_session()\n\n # These are the incoming points, but people often create a crazy range of\n # values for which these end up being bad, especially in 16bit.\n assert_finite(x)\n assert_finite(y)\n\n f_x = bijector.forward(x)\n g_y = bijector.inverse(y)\n\n [\n x_from_x,\n y_from_y,\n ildj_f_x,\n fldj_x,\n ildj_y,\n fldj_g_y,\n f_x_v,\n g_y_v,\n ] = sess.run([\n bijector.inverse(f_x),\n bijector.forward(g_y),\n bijector.inverse_log_det_jacobian(f_x),\n bijector.forward_log_det_jacobian(x),\n bijector.inverse_log_det_jacobian(y),\n bijector.forward_log_det_jacobian(g_y),\n f_x,\n g_y,\n ])\n\n assert_finite(x_from_x)\n assert_finite(y_from_y)\n assert_finite(ildj_f_x)\n assert_finite(fldj_x)\n assert_finite(ildj_y)\n assert_finite(fldj_g_y)\n assert_finite(f_x_v)\n assert_finite(g_y_v)\n\n np.testing.assert_allclose(x_from_x, x, atol=atol, rtol=rtol)\n np.testing.assert_allclose(y_from_y, y, atol=atol, rtol=rtol)\n np.testing.assert_allclose(-ildj_f_x, fldj_x, atol=atol, rtol=rtol)\n np.testing.assert_allclose(-ildj_y, fldj_g_y, atol=atol, rtol=rtol)\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for anno module.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport ast\n\nfrom tensorflow.contrib.py2tf.pyct import anno\nfrom tensorflow.python.platform import test\n\n\nclass AnnoTest(test.TestCase):\n\n def test_basic(self):\n node = ast.Name()\n\n self.assertFalse(anno.hasanno(node, 'foo'))\n with self.assertRaises(AttributeError):\n anno.getanno(node, 'foo')\n\n anno.setanno(node, 'foo', 3)\n self.assertTrue(anno.hasanno(node, 'foo'))\n self.assertEqual(3, anno.getanno(node, 'foo'))\n\n anno.delanno(node, 'foo')\n self.assertFalse(anno.hasanno(node, 'foo'))\n with self.assertRaises(AttributeError):\n anno.getanno(node, 'foo')\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for PowerSign.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport numpy as np\n\nfrom tensorflow.contrib.opt.python.training import powersign\nfrom tensorflow.contrib.opt.python.training import sign_decay\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\ndef py_linear_decay_fn(decay_steps):\n def linear_decay(step):\n step = min(step, decay_steps)\n return float(decay_steps - step) / decay_steps\n return linear_decay\n\n\ndef powersign_update_numpy(params,\n g_t,\n m,\n lr,\n base=math.e,\n beta=0.9,\n py_sign_decay_fn=None,\n t=None):\n m_t = beta * m + (1 - beta) * g_t\n if py_sign_decay_fn is None:\n sign_decayed = 1.0\n else:\n sign_decayed = py_sign_decay_fn(t-1)\n multiplier = base ** (sign_decayed * np.sign(g_t) * np.sign(m_t))\n params_t = params - lr * multiplier * g_t\n return params_t, m_t\n\n\nclass PowerSignTest(test.TestCase):\n\n def _testDense(self,\n use_resource=False,\n learning_rate=0.1,\n sign_decay_fn=None,\n py_sign_decay_fn=None,\n base=math.e,\n beta=0.9):\n for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:\n with self.test_session(use_gpu=True):\n # Initialize variables for numpy implementation.\n m0, m1 = 0.0, 0.0\n var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)\n grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)\n var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)\n grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)\n\n if use_resource:\n var0 = resource_variable_ops.ResourceVariable(var0_np)\n var1 = resource_variable_ops.ResourceVariable(var1_np)\n global_step = resource_variable_ops.ResourceVariable(\n 0, trainable=False)\n else:\n var0 = variables.Variable(var0_np)\n var1 = variables.Variable(var1_np)\n global_step = variables.Variable(\n 0, trainable=False)\n grads0 = constant_op.constant(grads0_np)\n grads1 = constant_op.constant(grads1_np)\n\n opt = powersign.PowerSignOptimizer(\n learning_rate=learning_rate,\n base=base,\n beta=beta,\n sign_decay_fn=sign_decay_fn,\n )\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),\n global_step=global_step)\n neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),\n global_step=global_step)\n\n if context.in_graph_mode():\n self.evaluate(variables.global_variables_initializer())\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], self.evaluate(var0))\n self.assertAllClose([3.0, 4.0], self.evaluate(var1))\n\n # Run 7 steps of powersign\n # first 4 steps with positive gradient\n # last 3 steps with negative gradient (sign(gm) should be -1)\n for t in range(1, 8):\n if t < 5:\n if context.in_graph_mode():\n self.evaluate(update)\n elif t > 1:\n opt.apply_gradients(zip([grads0, grads1], [var0, var1]),\n global_step=global_step)\n else:\n if context.in_graph_mode():\n self.evaluate(neg_update)\n elif t > 1:\n opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),\n global_step=global_step)\n\n var0_np, m0 = powersign_update_numpy(\n var0_np,\n grads0_np if t < 5 else -grads0_np,\n m0,\n learning_rate,\n base=base,\n beta=beta,\n py_sign_decay_fn=py_sign_decay_fn,\n t=t,\n )\n var1_np, m1 = powersign_update_numpy(\n var1_np,\n grads1_np if t < 5 else -grads1_np,\n m1,\n learning_rate,\n base=base,\n beta=beta,\n py_sign_decay_fn=py_sign_decay_fn,\n t=t,\n )\n\n # Validate updated params\n self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))\n self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))\n\n def testDense(self):\n decay_steps = 10\n sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)\n py_sign_decay_fn = py_linear_decay_fn(decay_steps)\n self._testDense(use_resource=False)\n self._testDense(use_resource=False,\n learning_rate=0.1,\n base=10.0,\n beta=0.8)\n self._testDense(use_resource=False,\n sign_decay_fn=sign_decay_fn,\n py_sign_decay_fn=py_sign_decay_fn)\n\n self._testDense(use_resource=True)\n self._testDense(use_resource=True, learning_rate=0.1, base=10.0, beta=0.8)\n self._testDense(use_resource=True,\n sign_decay_fn=sign_decay_fn,\n py_sign_decay_fn=py_sign_decay_fn)\n\n def _testSparse(self,\n use_resource=False,\n learning_rate=0.1,\n sign_decay_fn=None,\n py_sign_decay_fn=None,\n base=math.e,\n beta=0.9):\n with self.test_session(use_gpu=True):\n for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:\n # Initialize variables for numpy implementation.\n m0, m1 = 0.0, 0.0\n var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)\n grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)\n var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)\n grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)\n\n if use_resource:\n var0 = resource_variable_ops.ResourceVariable(var0_np)\n var1 = resource_variable_ops.ResourceVariable(var1_np)\n global_step = resource_variable_ops.ResourceVariable(\n 0, trainable=False)\n else:\n var0 = variables.Variable(var0_np)\n var1 = variables.Variable(var1_np)\n global_step = variables.Variable(\n 0, trainable=False)\n grads0_np_indices = np.array([0, 1], dtype=np.int32)\n grads0 = ops.IndexedSlices(\n constant_op.constant(grads0_np),\n constant_op.constant(grads0_np_indices), constant_op.constant([2]))\n grads1_np_indices = np.array([0, 1], dtype=np.int32)\n grads1 = ops.IndexedSlices(\n constant_op.constant(grads1_np),\n constant_op.constant(grads1_np_indices), constant_op.constant([2]))\n opt = powersign.PowerSignOptimizer(\n learning_rate=learning_rate,\n base=base,\n beta=beta,\n sign_decay_fn=sign_decay_fn,\n )\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),\n global_step=global_step)\n neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),\n global_step=global_step)\n variables.global_variables_initializer().run()\n\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], var0.eval())\n self.assertAllClose([3.0, 4.0], var1.eval())\n\n # Run 3 steps of powersign\n # first 4 steps with positive gradient\n # last 3 steps with negative gradient (sign(gm) should be -1)\n for t in range(1, 8):\n if t < 5:\n update.run()\n else:\n neg_update.run()\n\n var0_np, m0 = powersign_update_numpy(\n var0_np,\n grads0_np if t < 5 else -grads0_np,\n m0,\n learning_rate,\n base=base,\n beta=beta,\n py_sign_decay_fn=py_sign_decay_fn,\n t=t,\n )\n var1_np, m1 = powersign_update_numpy(\n var1_np,\n grads1_np if t < 5 else -grads1_np,\n m1,\n learning_rate,\n base=base,\n beta=beta,\n py_sign_decay_fn=py_sign_decay_fn,\n t=t,\n )\n\n # Validate updated params\n self.assertAllCloseAccordingToType(var0_np, var0.eval())\n self.assertAllCloseAccordingToType(var1_np, var1.eval())\n\n def testSparse(self):\n decay_steps = 10\n sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)\n py_sign_decay_fn = py_linear_decay_fn(decay_steps)\n self._testSparse(use_resource=False)\n self._testSparse(use_resource=False,\n learning_rate=0.01,\n base=2.0,\n beta=0.8)\n self._testSparse(use_resource=False,\n sign_decay_fn=sign_decay_fn,\n py_sign_decay_fn=py_sign_decay_fn)\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Functional tests for shape inference helper classes.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.core.framework import tensor_shape_pb2\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import googletest\n\n\nclass DimensionTest(test_util.TensorFlowTestCase):\n\n def testDimension(self):\n dim = tensor_shape.Dimension(12)\n self.assertEqual(12, dim.value)\n self.assertEqual(12, int(dim))\n self.assertEqual(dim, tensor_shape.Dimension(12))\n self.assertEqual(tensor_shape.Dimension(15),\n dim + tensor_shape.Dimension(3))\n self.assertEqual(tensor_shape.Dimension(15), dim + 3)\n self.assertEqual(tensor_shape.Dimension(24),\n dim * tensor_shape.Dimension(2))\n self.assertEqual(tensor_shape.Dimension(24), dim * 2)\n self.assertEqual(\n tensor_shape.Dimension(6), dim // tensor_shape.Dimension(2))\n self.assertEqual(tensor_shape.Dimension(6), dim // 2)\n self.assertEqual(tensor_shape.Dimension(12),\n dim.merge_with(tensor_shape.Dimension(12)))\n self.assertEqual(tensor_shape.Dimension(12), dim.merge_with(12))\n self.assertLess(tensor_shape.Dimension(12), tensor_shape.Dimension(13))\n self.assertGreater(tensor_shape.Dimension(13), tensor_shape.Dimension(12))\n self.assertLessEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(12))\n self.assertLessEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(13))\n self.assertGreater(tensor_shape.Dimension(13), tensor_shape.Dimension(12))\n self.assertGreaterEqual(tensor_shape.Dimension(12),\n tensor_shape.Dimension(12))\n self.assertGreaterEqual(tensor_shape.Dimension(13),\n tensor_shape.Dimension(12))\n self.assertNotEqual(dim, (12,))\n with self.assertRaises(ValueError):\n dim.merge_with(tensor_shape.Dimension(13))\n\n def testUnknownDimension(self):\n dim = tensor_shape.Dimension(None)\n self.assertIs(None, dim.value)\n self.assertEqual(dim.value, tensor_shape.Dimension(None).value)\n self.assertEqual(tensor_shape.Dimension(None).value,\n (dim + tensor_shape.Dimension(None)).value)\n self.assertEqual(tensor_shape.Dimension(None).value,\n (dim * tensor_shape.Dimension(None)).value)\n self.assertEqual(\n tensor_shape.Dimension(None).value,\n (dim // tensor_shape.Dimension(None)).value)\n self.assertEqual(tensor_shape.Dimension(None).value,\n dim.merge_with(tensor_shape.Dimension(None)).value)\n self.assertIs(None,\n tensor_shape.Dimension(None) < tensor_shape.Dimension(None))\n self.assertIs(None,\n tensor_shape.Dimension(None) <= tensor_shape.Dimension(None))\n self.assertIs(None,\n tensor_shape.Dimension(None) > tensor_shape.Dimension(None))\n self.assertIs(None,\n tensor_shape.Dimension(None) >= tensor_shape.Dimension(None))\n\n def testKnownAndUnknownDimensions(self):\n known = tensor_shape.Dimension(12)\n unknown = tensor_shape.Dimension(None)\n self.assertEqual(\n tensor_shape.Dimension(None).value, (known + unknown).value)\n self.assertEqual(\n tensor_shape.Dimension(None).value, (unknown + known).value)\n self.assertEqual(\n tensor_shape.Dimension(None).value, (known * unknown).value)\n self.assertEqual(\n tensor_shape.Dimension(None).value, (unknown * known).value)\n self.assertEqual(\n tensor_shape.Dimension(None).value, (known // unknown).value)\n self.assertEqual(\n tensor_shape.Dimension(None).value, (unknown // known).value)\n self.assertEqual(\n tensor_shape.Dimension(12), known.merge_with(unknown))\n self.assertEqual(\n tensor_shape.Dimension(12), unknown.merge_with(known))\n self.assertIs(None,\n tensor_shape.Dimension(12) < tensor_shape.Dimension(None))\n self.assertIs(None,\n tensor_shape.Dimension(12) <= tensor_shape.Dimension(None))\n self.assertIs(None,\n tensor_shape.Dimension(12) > tensor_shape.Dimension(None))\n self.assertIs(None,\n tensor_shape.Dimension(12) >= tensor_shape.Dimension(None))\n self.assertIs(None,\n tensor_shape.Dimension(None) < tensor_shape.Dimension(12))\n self.assertIs(None,\n tensor_shape.Dimension(None) <= tensor_shape.Dimension(12))\n self.assertIs(None,\n tensor_shape.Dimension(None) > tensor_shape.Dimension(12))\n self.assertIs(None,\n tensor_shape.Dimension(None) >= tensor_shape.Dimension(12))\n\n def testAsDimension(self):\n self.assertEqual(tensor_shape.Dimension(12),\n tensor_shape.as_dimension(tensor_shape.Dimension(12)))\n self.assertEqual(tensor_shape.Dimension(12), tensor_shape.as_dimension(12))\n self.assertEqual(\n tensor_shape.Dimension(None).value,\n tensor_shape.as_dimension(tensor_shape.Dimension(None)).value)\n self.assertEqual(tensor_shape.Dimension(None).value,\n tensor_shape.as_dimension(None).value)\n\n def testEquality(self):\n self.assertTrue(tensor_shape.Dimension(12) == tensor_shape.Dimension(12))\n self.assertFalse(tensor_shape.Dimension(12) == tensor_shape.Dimension(13))\n self.assertIs(None,\n tensor_shape.Dimension(12) == tensor_shape.Dimension(None))\n self.assertIs(None,\n tensor_shape.Dimension(None) == tensor_shape.Dimension(12))\n self.assertIs(None,\n tensor_shape.Dimension(None) == tensor_shape.Dimension(None))\n self.assertTrue(tensor_shape.Dimension(12) == \"12\")\n self.assertTrue(tensor_shape.Dimension(12) == 24.0 / 2)\n\n # None indicates ambiguous comparison, but comparison vs the wrong type\n # is unambigously False.\n self.assertIsNotNone(tensor_shape.Dimension(12) == \"_\")\n self.assertIsNotNone(tensor_shape.Dimension(None) == 12.99)\n self.assertFalse(tensor_shape.Dimension(12) == \"_\")\n self.assertFalse(tensor_shape.Dimension(None) == 12.99)\n\n self.assertIs(None, tensor_shape.Dimension(None) == \"13\")\n self.assertIs(None, tensor_shape.Dimension(None) == None) # pylint: disable=g-equals-none\n self.assertFalse(tensor_shape.Dimension(12) == 12.99)\n\n def testInequality(self):\n self.assertTrue(tensor_shape.Dimension(12) != tensor_shape.Dimension(13))\n self.assertFalse(tensor_shape.Dimension(12) != tensor_shape.Dimension(12))\n self.assertIs(None,\n tensor_shape.Dimension(12) != tensor_shape.Dimension(None))\n self.assertIs(None,\n tensor_shape.Dimension(None) != tensor_shape.Dimension(12))\n self.assertIs(None,\n tensor_shape.Dimension(None) != tensor_shape.Dimension(None))\n\n # None indicates ambiguous comparison, but comparison vs the wrong type\n # is unambigously False.\n self.assertIsNotNone(tensor_shape.Dimension(12) != \"_\")\n self.assertIsNotNone(tensor_shape.Dimension(None) != 12.99)\n self.assertTrue(tensor_shape.Dimension(12) != \"_\")\n self.assertTrue(tensor_shape.Dimension(None) != 12.99)\n\n self.assertIs(None, tensor_shape.Dimension(None) != \"13\")\n self.assertIs(None, tensor_shape.Dimension(None) != None) # pylint: disable=g-equals-none\n self.assertTrue(tensor_shape.Dimension(12) != 12.99)\n\n def testRepr(self):\n self.assertEqual(repr(tensor_shape.Dimension(7)), \"Dimension(7)\")\n self.assertEqual(repr(tensor_shape.Dimension(None)), \"Dimension(None)\")\n\n def testStr(self):\n self.assertEqual(str(tensor_shape.Dimension(7)), \"7\")\n self.assertEqual(str(tensor_shape.Dimension(None)), \"?\")\n\n\nclass ShapeTest(test_util.TensorFlowTestCase):\n\n def testUnknownShape(self):\n s = tensor_shape.TensorShape(None)\n with self.assertRaises(ValueError):\n s.assert_is_fully_defined()\n self.assertIs(None, s.ndims)\n with self.assertRaises(ValueError):\n len(s)\n self.assertFalse(s)\n self.assertIs(None, s.dims)\n with self.assertRaises(ValueError):\n for _ in tensor_shape.TensorShape(None):\n pass\n\n def testFullyDefinedShape(self):\n s = tensor_shape.TensorShape([tensor_shape.Dimension(\n 3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])\n s.assert_is_fully_defined()\n self.assertEqual(3, s.ndims)\n self.assertEqual(3, len(s))\n self.assertTrue(s)\n s.assert_has_rank(3)\n self.assertEqual([tensor_shape.Dimension(3),\n tensor_shape.Dimension(4),\n tensor_shape.Dimension(7)], s.dims)\n self.assertEqual(tensor_shape.Dimension(3), s[0])\n self.assertEqual(tensor_shape.Dimension(4), s[1])\n self.assertEqual(tensor_shape.Dimension(7), s[2])\n self.assertEqual([3, 4, 7], s.as_list())\n s.assert_is_compatible_with([3, 4, 7])\n s.assert_same_rank([6, 3, 7])\n for d1, d2 in zip(s, [3, 4, 7]):\n assert d1.value == d2\n\n def testPartiallyDefinedShape(self):\n s = tensor_shape.TensorShape([tensor_shape.Dimension(\n 3), tensor_shape.Dimension(None), tensor_shape.Dimension(7)])\n with self.assertRaises(ValueError):\n s.assert_is_fully_defined()\n self.assertEqual(3, s.ndims)\n self.assertEqual(3, len(s))\n self.assertTrue(s)\n s.assert_has_rank(3)\n self.assertEqual(tensor_shape.Dimension(3), s[0])\n self.assertEqual(tensor_shape.Dimension(None).value, s[1].value)\n self.assertEqual(tensor_shape.Dimension(7), s[2])\n s.assert_same_rank([6, 3, 7])\n for d1, d2 in zip(s, [3, None, 7]):\n assert d1.value == d2\n\n def testMergeFullShapes(self):\n self.assertEqual([3, 4, 7],\n tensor_shape.TensorShape([3, 4, 7]).merge_with(\n tensor_shape.TensorShape([3, 4, 7])).as_list())\n with self.assertRaises(ValueError):\n tensor_shape.TensorShape([3, 4, 7]).merge_with(\n tensor_shape.TensorShape([6, 3, 7]))\n\n def testMergePartialShapes(self):\n s1 = tensor_shape.TensorShape([tensor_shape.Dimension(\n 3), tensor_shape.Dimension(None), tensor_shape.Dimension(7)])\n s2 = tensor_shape.TensorShape([tensor_shape.Dimension(\n None), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])\n self.assertEqual([3, 4, 7], s1.merge_with(s2).as_list())\n\n def testMergeFullAndUnknownShape(self):\n self.assertEqual([3, 4, 7],\n tensor_shape.TensorShape([3, 4, 7]).merge_with(\n tensor_shape.TensorShape(None)).as_list())\n\n def testSlice(self):\n known = tensor_shape.TensorShape([0, 1, 2, 3, 4])\n self.assertEqual(tensor_shape.Dimension(2), known[2])\n tensor_shape.TensorShape([1, 2, 3]).assert_is_compatible_with(known[1:4])\n\n unknown = tensor_shape.TensorShape(None)\n self.assertEqual(tensor_shape.Dimension(None).value, unknown[2].value)\n tensor_shape.TensorShape(\n [None, None, None]).assert_is_compatible_with(unknown[1:4])\n\n def testConcatenate(self):\n tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(\n tensor_shape.TensorShape([1, 2]).concatenate(\n tensor_shape.TensorShape([3, 4])))\n tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(\n tensor_shape.TensorShape([1, 2]).concatenate(\n tensor_shape.TensorShape(None)))\n tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(\n tensor_shape.TensorShape(None).concatenate(\n tensor_shape.TensorShape([3, 4])))\n tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(\n tensor_shape.TensorShape(None).concatenate(\n tensor_shape.TensorShape(None)))\n tensor_shape.TensorShape([1, 2, 3]).assert_is_compatible_with(\n tensor_shape.TensorShape([1, 2]).concatenate(\n tensor_shape.Dimension(3)))\n\n def _testMostSpecificCompatibleShapeHelper(self, x, y, expected):\n mcs = tensor_shape.TensorShape(x).most_specific_compatible_shape(\n tensor_shape.TensorShape(y))\n mcs_dims = mcs.dims\n if expected is None or mcs_dims is None:\n self.assertIs(expected, mcs_dims)\n else:\n self.assertEqual(expected, mcs.as_list())\n\n def testMostSpecificCompatibleShape(self):\n self._testMostSpecificCompatibleShapeHelper([1, 2], None, None)\n self._testMostSpecificCompatibleShapeHelper(None, [1, 2], None)\n self._testMostSpecificCompatibleShapeHelper([1, 2], [1, 2, 3, 4], None)\n self._testMostSpecificCompatibleShapeHelper([1, 2, 3, 4], [1, 2], None)\n self._testMostSpecificCompatibleShapeHelper([1, 2], [1, 2], [1, 2])\n self._testMostSpecificCompatibleShapeHelper([None, 2, 3], [1, 1, 3],\n [None, None, 3])\n self._testMostSpecificCompatibleShapeHelper([1, 1, 3], [None, 2, 3],\n [None, None, 3])\n\n def testHelpers(self):\n tensor_shape.TensorShape([]).assert_is_compatible_with(\n tensor_shape.scalar())\n tensor_shape.TensorShape([37]).assert_is_compatible_with(\n tensor_shape.vector(37))\n tensor_shape.TensorShape(\n [94, 43]).assert_is_compatible_with(tensor_shape.matrix(94, 43))\n\n def testTruedivFails(self):\n unknown = tensor_shape.Dimension(None)\n self.assertEqual((unknown // unknown).value, None)\n with self.assertRaisesRegexp(TypeError, r\"unsupported operand type\"):\n unknown / unknown # pylint: disable=pointless-statement\n\n def testConvertFromProto(self):\n def make_tensor_shape_proto(shape):\n return tensor_shape_pb2.TensorShapeProto(\n dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=x) for x in shape])\n proto = make_tensor_shape_proto([])\n self.assertEqual(tensor_shape.TensorShape([]),\n tensor_shape.TensorShape(proto))\n self.assertEqual(tensor_shape.TensorShape([]),\n tensor_shape.as_shape(proto))\n\n proto = make_tensor_shape_proto([1, 37, 42])\n self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),\n tensor_shape.TensorShape(proto))\n self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),\n tensor_shape.as_shape(proto))\n\n partial_proto_shape = tensor_shape.as_shape(\n make_tensor_shape_proto([-1, 37, 42]))\n partial_shape = tensor_shape.TensorShape([None, 37, 42])\n self.assertNotEqual(partial_proto_shape, partial_shape)\n self.assertEqual(partial_proto_shape[0].value, None)\n self.assertEqual(partial_proto_shape[1].value, 37)\n self.assertEqual(partial_proto_shape[2].value, 42)\n self.assertTrue(partial_shape.is_compatible_with(partial_proto_shape))\n\n def testStr(self):\n self.assertEqual(\"<unknown>\", str(tensor_shape.unknown_shape()))\n self.assertEqual(\"(?,)\", str(tensor_shape.unknown_shape(ndims=1)))\n self.assertEqual(\"(?, ?)\", str(tensor_shape.unknown_shape(ndims=2)))\n self.assertEqual(\"(?, ?, ?)\", str(tensor_shape.unknown_shape(ndims=3)))\n\n self.assertEqual(\"()\", str(tensor_shape.scalar()))\n self.assertEqual(\"(7,)\", str(tensor_shape.vector(7)))\n self.assertEqual(\"(3, 8)\", str(tensor_shape.matrix(3, 8)))\n self.assertEqual(\"(4, 5, 2)\", str(tensor_shape.TensorShape([4, 5, 2])))\n\n self.assertEqual(\"(32, ?, 1, 9)\",\n str(tensor_shape.TensorShape([32, None, 1, 9])))\n\n def testAsProto(self):\n self.assertTrue(tensor_shape.unknown_shape().as_proto().unknown_rank)\n self.assertFalse(\n tensor_shape.unknown_shape(ndims=3).as_proto().unknown_rank)\n self.assertFalse(\n tensor_shape.TensorShape([1, 2, 3]).as_proto().unknown_rank)\n self.assertFalse(\n tensor_shape.TensorShape([1, None, 3]).as_proto().unknown_rank)\n\n def testEquality(self):\n s1 = tensor_shape.TensorShape([tensor_shape.Dimension(\n 3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])\n s2 = tensor_shape.TensorShape([tensor_shape.Dimension(\n 3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])\n s3 = tensor_shape.TensorShape([tensor_shape.Dimension(3),\n tensor_shape.Dimension(4), None])\n\n self.assertTrue(s1 == s2)\n self.assertFalse(s1 != s2)\n self.assertFalse(s1 == \"a string\")\n self.assertTrue(s1 != \"a string\")\n self.assertNotEqual(s1, \"347\", \"Should not equal an ambiguous string.\")\n self.assertEqual(s1, [\"3\", \"4\", \"7\"])\n\n # Test with an unknown shape in s3\n self.assertTrue(s1 != s3)\n self.assertFalse(s3 == \"a string\")\n self.assertTrue(s3 != \"a string\")\n\n # eq and neq are not symmetric for unknown shapes.\n unk0 = tensor_shape.unknown_shape()\n self.assertFalse(unk0 == s1)\n self.assertFalse(s1 == unk0)\n with self.assertRaises(ValueError):\n unk0 != s1 # pylint: disable=pointless-statement\n with self.assertRaises(ValueError):\n s1 != unk0 # pylint: disable=pointless-statement\n unk1 = tensor_shape.unknown_shape()\n self.assertTrue(unk0 == unk1)\n self.assertTrue(unk1 == unk0)\n with self.assertRaises(ValueError):\n unk0 != unk1 # pylint: disable=pointless-statement\n with self.assertRaises(ValueError):\n unk1 != unk0 # pylint: disable=pointless-statement\n\n def testAsList(self):\n with self.assertRaisesRegexp(ValueError,\n \"not defined on an unknown TensorShape\"):\n tensor_shape.unknown_shape().as_list()\n self.assertAllEqual([None, None], tensor_shape.unknown_shape(2).as_list())\n self.assertAllEqual([2, None, 4], tensor_shape.TensorShape(\n (2, None, 4)).as_list())\n\nif __name__ == \"__main__\":\n googletest.main()\n", "# pylint: disable=g-bad-file-header\n# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.python.client.graph_util.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.core.framework import node_def_pb2\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import importer\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_nn_ops\nfrom tensorflow.python.ops import image_ops\nfrom tensorflow.python.ops import math_ops # pylint: disable=unused-import\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.tools import optimize_for_inference_lib\n\n\n@test_util.with_c_api\nclass OptimizeForInferenceTest(test.TestCase):\n\n def create_node_def(self, op, name, inputs):\n new_node = node_def_pb2.NodeDef()\n new_node.op = op\n new_node.name = name\n for input_name in inputs:\n new_node.input.extend([input_name])\n return new_node\n\n def create_constant_node_def(self, name, value, dtype, shape=None):\n node = self.create_node_def(\"Const\", name, [])\n self.set_attr_dtype(node, \"dtype\", dtype)\n self.set_attr_tensor(node, \"value\", value, dtype, shape)\n return node\n\n def set_attr_dtype(self, node, key, value):\n node.attr[key].CopyFrom(\n attr_value_pb2.AttrValue(type=value.as_datatype_enum))\n\n def set_attr_tensor(self, node, key, value, dtype, shape=None):\n node.attr[key].CopyFrom(\n attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(\n value, dtype=dtype, shape=shape)))\n\n def testOptimizeForInference(self):\n self.maxDiff = 1000\n unused_constant_name = \"unused_constant\"\n unconnected_add_name = \"unconnected_add\"\n a_constant_name = \"a_constant\"\n b_constant_name = \"b_constant\"\n a_check_name = \"a_check\"\n b_check_name = \"b_check\"\n a_identity_name = \"a_identity\"\n b_identity_name = \"b_identity\"\n add_name = \"add\"\n unused_output_add_name = \"unused_output_add\"\n graph_def = graph_pb2.GraphDef()\n unused_constant = self.create_constant_node_def(\n unused_constant_name, value=0, dtype=dtypes.float32, shape=[])\n graph_def.node.extend([unused_constant])\n unconnected_add_node = self.create_node_def(\n \"Add\", unconnected_add_name,\n [unused_constant_name, unused_constant_name])\n self.set_attr_dtype(unconnected_add_node, \"T\", dtypes.float32)\n graph_def.node.extend([unconnected_add_node])\n a_constant = self.create_constant_node_def(\n a_constant_name, value=1, dtype=dtypes.float32, shape=[])\n graph_def.node.extend([a_constant])\n a_check_node = self.create_node_def(\"CheckNumerics\", a_check_name,\n [a_constant_name])\n graph_def.node.extend([a_check_node])\n a_identity_node = self.create_node_def(\n \"Identity\", a_identity_name, [a_constant_name, \"^\" + a_check_name])\n graph_def.node.extend([a_identity_node])\n b_constant = self.create_constant_node_def(\n b_constant_name, value=1, dtype=dtypes.float32, shape=[])\n graph_def.node.extend([b_constant])\n b_check_node = self.create_node_def(\"CheckNumerics\", b_check_name,\n [b_constant_name])\n graph_def.node.extend([b_check_node])\n b_identity_node = self.create_node_def(\n \"Identity\", b_identity_name, [b_constant_name, \"^\" + b_check_name])\n graph_def.node.extend([b_identity_node])\n add_node = self.create_node_def(\"Add\", add_name,\n [a_identity_name, b_identity_name])\n self.set_attr_dtype(add_node, \"T\", dtypes.float32)\n graph_def.node.extend([add_node])\n unused_output_add_node = self.create_node_def(\"Add\", unused_output_add_name,\n [add_name, b_constant_name])\n self.set_attr_dtype(unused_output_add_node, \"T\", dtypes.float32)\n graph_def.node.extend([unused_output_add_node])\n\n expected_output = graph_pb2.GraphDef()\n a_constant = self.create_constant_node_def(\n a_constant_name, value=1, dtype=dtypes.float32, shape=[])\n expected_output.node.extend([a_constant])\n b_constant = self.create_constant_node_def(\n b_constant_name, value=1, dtype=dtypes.float32, shape=[])\n expected_output.node.extend([b_constant])\n add_node = self.create_node_def(\"Add\", add_name,\n [a_constant_name, b_constant_name])\n self.set_attr_dtype(add_node, \"T\", dtypes.float32)\n expected_output.node.extend([add_node])\n\n output = optimize_for_inference_lib.optimize_for_inference(\n graph_def, [], [add_name], dtypes.float32.as_datatype_enum)\n self.assertProtoEquals(expected_output, output)\n\n def testFoldBatchNorms(self):\n with self.test_session() as sess:\n inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]\n input_op = constant_op.constant(\n np.array(inputs), shape=[1, 1, 6, 2], dtype=dtypes.float32)\n weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]\n weights_op = constant_op.constant(\n np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)\n conv_op = nn_ops.conv2d(\n input_op, weights_op, [1, 1, 1, 1], padding=\"SAME\", name=\"conv_op\")\n mean_op = constant_op.constant(\n np.array([10, 20]), shape=[2], dtype=dtypes.float32)\n variance_op = constant_op.constant(\n np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)\n beta_op = constant_op.constant(\n np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)\n gamma_op = constant_op.constant(\n np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)\n test_util.set_producer_version(ops.get_default_graph(), 8)\n gen_nn_ops._batch_norm_with_global_normalization(\n conv_op,\n mean_op,\n variance_op,\n beta_op,\n gamma_op,\n 0.00001,\n False,\n name=\"output\")\n original_graph_def = sess.graph_def\n original_result = sess.run([\"output:0\"])\n optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(\n original_graph_def)\n\n with self.test_session() as sess:\n _ = importer.import_graph_def(\n optimized_graph_def, input_map={}, name=\"optimized\")\n optimized_result = sess.run([\"optimized/output:0\"])\n\n self.assertAllClose(original_result, optimized_result)\n\n for node in optimized_graph_def.node:\n self.assertNotEqual(\"BatchNormWithGlobalNormalization\", node.op)\n\n def testFoldFusedBatchNorms(self):\n for data_format, use_gpu in [(\"NHWC\", False), (\"NCHW\", True)]:\n with self.test_session(use_gpu=use_gpu) as sess:\n inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]\n input_op = constant_op.constant(\n np.array(inputs),\n shape=[1, 1, 6, 2] if data_format == \"NHWC\" else [1, 2, 1, 6],\n dtype=dtypes.float32)\n weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]\n weights_op = constant_op.constant(\n np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)\n conv_op = nn_ops.conv2d(\n input_op,\n weights_op, [1, 1, 1, 1],\n padding=\"SAME\",\n data_format=data_format,\n name=\"conv_op\")\n mean_op = constant_op.constant(\n np.array([10, 20]), shape=[2], dtype=dtypes.float32)\n variance_op = constant_op.constant(\n np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)\n beta_op = constant_op.constant(\n np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)\n gamma_op = constant_op.constant(\n np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)\n ops.get_default_graph().graph_def_versions.producer = 9\n gen_nn_ops._fused_batch_norm(\n conv_op,\n gamma_op,\n beta_op,\n mean_op,\n variance_op,\n 0.00001,\n is_training=False,\n data_format=data_format,\n name=\"output\")\n original_graph_def = sess.graph_def\n original_result = sess.run([\"output:0\"])\n optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(\n original_graph_def)\n\n with self.test_session(use_gpu=use_gpu) as sess:\n _ = importer.import_graph_def(\n optimized_graph_def, input_map={}, name=\"optimized\")\n optimized_result = sess.run([\"optimized/output:0\"])\n\n self.assertAllClose(\n original_result, optimized_result, rtol=1e-04, atol=1e-06)\n\n for node in optimized_graph_def.node:\n self.assertNotEqual(\"FusedBatchNorm\", node.op)\n\n def testFuseResizePadAndConv(self):\n with self.test_session() as sess:\n inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]\n input_op = constant_op.constant(\n np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)\n resize_op = image_ops.resize_bilinear(\n input_op, [12, 4], align_corners=False)\n pad_op = array_ops.pad(resize_op, [[0, 0], [1, 1], [2, 2], [0, 0]],\n mode=\"REFLECT\")\n weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]\n weights_op = constant_op.constant(\n np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)\n nn_ops.conv2d(\n pad_op, weights_op, [1, 1, 1, 1], padding=\"VALID\", name=\"output\")\n original_graph_def = sess.graph_def\n original_result = sess.run([\"output:0\"])\n optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(\n original_graph_def, [\"output\"])\n\n with self.test_session() as sess:\n _ = importer.import_graph_def(\n optimized_graph_def, input_map={}, name=\"optimized\")\n optimized_result = sess.run([\"optimized/output:0\"])\n\n self.assertAllClose(original_result, optimized_result)\n\n for node in optimized_graph_def.node:\n self.assertNotEqual(\"Conv2D\", node.op)\n self.assertNotEqual(\"MirrorPad\", node.op)\n self.assertNotEqual(\"ResizeBilinear\", node.op)\n\n def testFuseResizeAndConv(self):\n with self.test_session() as sess:\n inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]\n input_op = constant_op.constant(\n np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)\n resize_op = image_ops.resize_bilinear(\n input_op, [12, 4], align_corners=False)\n weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]\n weights_op = constant_op.constant(\n np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)\n nn_ops.conv2d(\n resize_op, weights_op, [1, 1, 1, 1], padding=\"VALID\", name=\"output\")\n original_graph_def = sess.graph_def\n original_result = sess.run([\"output:0\"])\n optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(\n original_graph_def, [\"output\"])\n\n with self.test_session() as sess:\n _ = importer.import_graph_def(\n optimized_graph_def, input_map={}, name=\"optimized\")\n optimized_result = sess.run([\"optimized/output:0\"])\n\n self.assertAllClose(original_result, optimized_result)\n\n for node in optimized_graph_def.node:\n self.assertNotEqual(\"Conv2D\", node.op)\n self.assertNotEqual(\"MirrorPad\", node.op)\n\n\n def testFusePadAndConv(self):\n with self.test_session() as sess:\n inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]\n input_op = constant_op.constant(\n np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)\n pad_op = array_ops.pad(input_op, [[0, 0], [1, 1], [2, 2], [0, 0]],\n mode=\"REFLECT\")\n weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]\n weights_op = constant_op.constant(\n np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)\n nn_ops.conv2d(\n pad_op, weights_op, [1, 1, 1, 1], padding=\"VALID\", name=\"output\")\n original_graph_def = sess.graph_def\n original_result = sess.run([\"output:0\"])\n optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(\n original_graph_def, [\"output\"])\n\n with self.test_session() as sess:\n _ = importer.import_graph_def(\n optimized_graph_def, input_map={}, name=\"optimized\")\n optimized_result = sess.run([\"optimized/output:0\"])\n\n self.assertAllClose(original_result, optimized_result)\n\n for node in optimized_graph_def.node:\n self.assertNotEqual(\"Conv2D\", node.op)\n self.assertNotEqual(\"ResizeBilinear\", node.op)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=line-too-long\n# pylint: disable=invalid-name\n# pylint: disable=unused-import\n\"\"\"NASNet-A models for Keras.\n\nNASNet refers to Neural Architecture Search Network, a family of models\nthat were designed automatically by learning the model architectures\ndirectly on the dataset of interest.\n\nHere we consider NASNet-A, the highest performance model that was found\nfor the CIFAR-10 dataset, and then extended to ImageNet 2012 dataset,\nobtaining state of the art performance on CIFAR-10 and ImageNet 2012.\nOnly the NASNet-A models, and their respective weights, which are suited\nfor ImageNet 2012 are provided.\n\nThe below table describes the performance on ImageNet 2012:\n--------------------------------------------------------------------------------\n Architecture | Top-1 Acc | Top-5 Acc | Multiply-Adds | Params (M)\n--------------------------------------------------------------------------------\n| NASNet-A (4 @ 1056) | 74.0 % | 91.6 % | 564 M | 5.3 |\n| NASNet-A (6 @ 4032) | 82.7 % | 96.2 % | 23.8 B | 88.9 |\n--------------------------------------------------------------------------------\n\nReferences:\n - [Learning Transferable Architectures for Scalable Image Recognition]\n (https://arxiv.org/abs/1707.07012)\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom tensorflow.python.keras._impl.keras import backend as K\nfrom tensorflow.python.keras._impl.keras.applications.imagenet_utils import _obtain_input_shape\nfrom tensorflow.python.keras._impl.keras.applications.imagenet_utils import decode_predictions\nfrom tensorflow.python.keras._impl.keras.applications.inception_v3 import preprocess_input\nfrom tensorflow.python.keras._impl.keras.engine.topology import get_source_inputs\nfrom tensorflow.python.keras._impl.keras.layers import Activation\nfrom tensorflow.python.keras._impl.keras.layers import add\nfrom tensorflow.python.keras._impl.keras.layers import AveragePooling2D\nfrom tensorflow.python.keras._impl.keras.layers import BatchNormalization\nfrom tensorflow.python.keras._impl.keras.layers import concatenate\nfrom tensorflow.python.keras._impl.keras.layers import Conv2D\nfrom tensorflow.python.keras._impl.keras.layers import Cropping2D\nfrom tensorflow.python.keras._impl.keras.layers import Dense\nfrom tensorflow.python.keras._impl.keras.layers import GlobalAveragePooling2D\nfrom tensorflow.python.keras._impl.keras.layers import GlobalMaxPooling2D\nfrom tensorflow.python.keras._impl.keras.layers import Input\nfrom tensorflow.python.keras._impl.keras.layers import MaxPooling2D\nfrom tensorflow.python.keras._impl.keras.layers import SeparableConv2D\nfrom tensorflow.python.keras._impl.keras.layers import ZeroPadding2D\nfrom tensorflow.python.keras._impl.keras.models import Model\nfrom tensorflow.python.keras._impl.keras.utils.data_utils import get_file\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util.tf_export import tf_export\n\n\nNASNET_MOBILE_WEIGHT_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.8/NASNet-mobile.h5'\nNASNET_MOBILE_WEIGHT_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.8/NASNet-mobile-no-top.h5'\nNASNET_LARGE_WEIGHT_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.8/NASNet-large.h5'\nNASNET_LARGE_WEIGHT_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.8/NASNet-large-no-top.h5'\n\n\ndef NASNet(input_shape=None,\n penultimate_filters=4032,\n num_blocks=6,\n stem_block_filters=96,\n skip_reduction=True,\n filter_multiplier=2,\n include_top=True,\n weights=None,\n input_tensor=None,\n pooling=None,\n classes=1000,\n default_size=None):\n \"\"\"Instantiates a NASNet model.\n\n Note that only TensorFlow is supported for now,\n therefore it only works with the data format\n `image_data_format='channels_last'` in your Keras config\n at `~/.keras/keras.json`.\n\n Arguments:\n input_shape: Optional shape tuple, only to be specified\n if `include_top` is False (otherwise the input shape\n has to be `(331, 331, 3)` for NASNetLarge or\n `(224, 224, 3)` for NASNetMobile\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 32.\n E.g. `(224, 224, 3)` would be one valid value.\n penultimate_filters: Number of filters in the penultimate layer.\n NASNet models use the notation `NASNet (N @ P)`, where:\n - N is the number of blocks\n - P is the number of penultimate filters\n num_blocks: Number of repeated blocks of the NASNet model.\n NASNet models use the notation `NASNet (N @ P)`, where:\n - N is the number of blocks\n - P is the number of penultimate filters\n stem_block_filters: Number of filters in the initial stem block\n skip_reduction: Whether to skip the reduction step at the tail\n end of the network. Set to `False` for CIFAR models.\n filter_multiplier: Controls the width of the network.\n - If `filter_multiplier` < 1.0, proportionally decreases the number\n of filters in each layer.\n - If `filter_multiplier` > 1.0, proportionally increases the number\n of filters in each layer.\n - If `filter_multiplier` = 1, default number of filters from the\n paper are used at each layer.\n include_top: Whether to include the fully-connected\n layer at the top of the network.\n weights: `None` (random initialization) or\n `imagenet` (ImageNet weights)\n input_tensor: Optional Keras tensor (i.e. output of\n `layers.Input()`)\n to use as image input for the model.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model\n will be the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a\n 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: Optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified.\n default_size: Specifies the default image size of the model\n\n Returns:\n A Keras model instance.\n\n Raises:\n ValueError: In case of invalid argument for `weights`,\n invalid input shape or invalid `penultimate_filters` value.\n RuntimeError: If attempting to run this model with a\n backend that does not support separable convolutions.\n \"\"\"\n if K.backend() != 'tensorflow':\n raise RuntimeError('Only Tensorflow backend is currently supported, '\n 'as other backends do not support '\n 'separable convolution.')\n\n if not (weights in {'imagenet', None} or os.path.exists(weights)):\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization), `imagenet` '\n '(pre-training on ImageNet), '\n 'or the path to the weights file to be loaded.')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as ImageNet with `include_top` '\n 'as true, `classes` should be 1000')\n\n if default_size is None:\n default_size = 331\n\n # Determine proper input shape and default size.\n input_shape = _obtain_input_shape(\n input_shape,\n default_size=default_size,\n min_size=32,\n data_format=K.image_data_format(),\n require_flatten=include_top or weights,\n weights=weights)\n\n if K.image_data_format() != 'channels_last':\n logging.warning('The NASNet family of models is only available '\n 'for the input data format \"channels_last\" '\n '(width, height, channels). '\n 'However your settings specify the default '\n 'data format \"channels_first\" (channels, width, height).'\n ' You should set `image_data_format=\"channels_last\"` '\n 'in your Keras config located at ~/.keras/keras.json. '\n 'The model being returned right now will expect inputs '\n 'to follow the \"channels_last\" data format.')\n K.set_image_data_format('channels_last')\n old_data_format = 'channels_first'\n else:\n old_data_format = None\n\n if input_tensor is None:\n img_input = Input(shape=input_shape)\n else:\n if not K.is_keras_tensor(input_tensor):\n img_input = Input(tensor=input_tensor, shape=input_shape)\n else:\n img_input = input_tensor\n\n if penultimate_filters % 24 != 0:\n raise ValueError(\n 'For NASNet-A models, the value of `penultimate_filters` '\n 'needs to be divisible by 24. Current value: %d' % penultimate_filters)\n\n channel_dim = 1 if K.image_data_format() == 'channels_first' else -1\n filters = penultimate_filters // 24\n\n if not skip_reduction:\n x = Conv2D(\n stem_block_filters, (3, 3),\n strides=(2, 2),\n padding='valid',\n use_bias=False,\n name='stem_conv1',\n kernel_initializer='he_normal')(\n img_input)\n else:\n x = Conv2D(\n stem_block_filters, (3, 3),\n strides=(1, 1),\n padding='same',\n use_bias=False,\n name='stem_conv1',\n kernel_initializer='he_normal')(\n img_input)\n\n x = BatchNormalization(\n axis=channel_dim, momentum=0.9997, epsilon=1e-3, name='stem_bn1')(\n x)\n\n p = None\n if not skip_reduction: # imagenet / mobile mode\n x, p = _reduction_a_cell(\n x, p, filters // (filter_multiplier**2), block_id='stem_1')\n x, p = _reduction_a_cell(\n x, p, filters // filter_multiplier, block_id='stem_2')\n\n for i in range(num_blocks):\n x, p = _normal_a_cell(x, p, filters, block_id='%d' % (i))\n\n x, p0 = _reduction_a_cell(\n x, p, filters * filter_multiplier, block_id='reduce_%d' % (num_blocks))\n\n p = p0 if not skip_reduction else p\n\n for i in range(num_blocks):\n x, p = _normal_a_cell(\n x, p, filters * filter_multiplier, block_id='%d' % (num_blocks + i + 1))\n\n x, p0 = _reduction_a_cell(\n x,\n p,\n filters * filter_multiplier**2,\n block_id='reduce_%d' % (2 * num_blocks))\n\n p = p0 if not skip_reduction else p\n\n for i in range(num_blocks):\n x, p = _normal_a_cell(\n x,\n p,\n filters * filter_multiplier**2,\n block_id='%d' % (2 * num_blocks + i + 1))\n\n x = Activation('relu')(x)\n\n if include_top:\n x = GlobalAveragePooling2D()(x)\n x = Dense(classes, activation='softmax', name='predictions')(x)\n else:\n if pooling == 'avg':\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n if input_tensor is not None:\n inputs = get_source_inputs(input_tensor)\n else:\n inputs = img_input\n\n model = Model(inputs, x, name='NASNet')\n\n # load weights\n if weights == 'imagenet':\n if default_size == 224: # mobile version\n if include_top:\n weight_path = NASNET_MOBILE_WEIGHT_PATH\n model_name = 'nasnet_mobile.h5'\n else:\n weight_path = NASNET_MOBILE_WEIGHT_PATH_NO_TOP\n model_name = 'nasnet_mobile_no_top.h5'\n\n weights_file = get_file(model_name, weight_path, cache_subdir='models')\n model.load_weights(weights_file)\n\n elif default_size == 331: # large version\n if include_top:\n weight_path = NASNET_LARGE_WEIGHT_PATH\n model_name = 'nasnet_large.h5'\n else:\n weight_path = NASNET_LARGE_WEIGHT_PATH_NO_TOP\n model_name = 'nasnet_large_no_top.h5'\n\n weights_file = get_file(model_name, weight_path, cache_subdir='models')\n model.load_weights(weights_file)\n else:\n raise ValueError('ImageNet weights can only be loaded with NASNetLarge'\n ' or NASNetMobile')\n elif weights is not None:\n model.load_weights(weights)\n\n if old_data_format:\n K.set_image_data_format(old_data_format)\n\n return model\n\n\n@tf_export('keras.applications.NASNetLarge',\n 'keras.applications.nasnet.NASNetLarge')\ndef NASNetLarge(input_shape=None,\n include_top=True,\n weights='imagenet',\n input_tensor=None,\n pooling=None,\n classes=1000):\n \"\"\"Instantiates a NASNet model in ImageNet mode.\n\n Note that only TensorFlow is supported for now,\n therefore it only works with the data format\n `image_data_format='channels_last'` in your Keras config\n at `~/.keras/keras.json`.\n\n Arguments:\n input_shape: Optional shape tuple, only to be specified\n if `include_top` is False (otherwise the input shape\n has to be `(331, 331, 3)` for NASNetLarge.\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 32.\n E.g. `(224, 224, 3)` would be one valid value.\n include_top: Whether to include the fully-connected\n layer at the top of the network.\n weights: `None` (random initialization) or\n `imagenet` (ImageNet weights)\n input_tensor: Optional Keras tensor (i.e. output of\n `layers.Input()`)\n to use as image input for the model.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model\n will be the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a\n 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: Optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified.\n\n Returns:\n A Keras model instance.\n\n Raises:\n ValueError: in case of invalid argument for `weights`,\n or invalid input shape.\n RuntimeError: If attempting to run this model with a\n backend that does not support separable convolutions.\n \"\"\"\n return NASNet(\n input_shape,\n penultimate_filters=4032,\n num_blocks=6,\n stem_block_filters=96,\n skip_reduction=False,\n filter_multiplier=2,\n include_top=include_top,\n weights=weights,\n input_tensor=input_tensor,\n pooling=pooling,\n classes=classes,\n default_size=331)\n\n\n@tf_export('keras.applications.NASNetMobile',\n 'keras.applications.nasnet.NASNetMobile')\ndef NASNetMobile(input_shape=None,\n include_top=True,\n weights='imagenet',\n input_tensor=None,\n pooling=None,\n classes=1000):\n \"\"\"Instantiates a Mobile NASNet model in ImageNet mode.\n\n Note that only TensorFlow is supported for now,\n therefore it only works with the data format\n `image_data_format='channels_last'` in your Keras config\n at `~/.keras/keras.json`.\n\n Arguments:\n input_shape: Optional shape tuple, only to be specified\n if `include_top` is False (otherwise the input shape\n has to be `(224, 224, 3)` for NASNetMobile\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 32.\n E.g. `(224, 224, 3)` would be one valid value.\n include_top: Whether to include the fully-connected\n layer at the top of the network.\n weights: `None` (random initialization) or\n `imagenet` (ImageNet weights)\n input_tensor: Optional Keras tensor (i.e. output of\n `layers.Input()`)\n to use as image input for the model.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model\n will be the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a\n 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: Optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified.\n\n Returns:\n A Keras model instance.\n\n Raises:\n ValueError: In case of invalid argument for `weights`,\n or invalid input shape.\n RuntimeError: If attempting to run this model with a\n backend that does not support separable convolutions.\n \"\"\"\n return NASNet(\n input_shape,\n penultimate_filters=1056,\n num_blocks=4,\n stem_block_filters=32,\n skip_reduction=False,\n filter_multiplier=2,\n include_top=include_top,\n weights=weights,\n input_tensor=input_tensor,\n pooling=pooling,\n classes=classes,\n default_size=224)\n\n\ndef _separable_conv_block(ip,\n filters,\n kernel_size=(3, 3),\n strides=(1, 1),\n block_id=None):\n \"\"\"Adds 2 blocks of [relu-separable conv-batchnorm].\n\n Arguments:\n ip: Input tensor\n filters: Number of output filters per layer\n kernel_size: Kernel size of separable convolutions\n strides: Strided convolution for downsampling\n block_id: String block_id\n\n Returns:\n A Keras tensor\n \"\"\"\n channel_dim = 1 if K.image_data_format() == 'channels_first' else -1\n\n with K.name_scope('separable_conv_block_%s' % block_id):\n x = Activation('relu')(ip)\n x = SeparableConv2D(\n filters,\n kernel_size,\n strides=strides,\n name='separable_conv_1_%s' % block_id,\n padding='same',\n use_bias=False,\n kernel_initializer='he_normal')(\n x)\n x = BatchNormalization(\n axis=channel_dim,\n momentum=0.9997,\n epsilon=1e-3,\n name='separable_conv_1_bn_%s' % (block_id))(\n x)\n x = Activation('relu')(x)\n x = SeparableConv2D(\n filters,\n kernel_size,\n name='separable_conv_2_%s' % block_id,\n padding='same',\n use_bias=False,\n kernel_initializer='he_normal')(\n x)\n x = BatchNormalization(\n axis=channel_dim,\n momentum=0.9997,\n epsilon=1e-3,\n name='separable_conv_2_bn_%s' % (block_id))(\n x)\n return x\n\n\ndef _adjust_block(p, ip, filters, block_id=None):\n \"\"\"Adjusts the input `previous path` to match the shape of the `input`.\n\n Used in situations where the output number of filters needs to be changed.\n\n Arguments:\n p: Input tensor which needs to be modified\n ip: Input tensor whose shape needs to be matched\n filters: Number of output filters to be matched\n block_id: String block_id\n\n Returns:\n Adjusted Keras tensor\n \"\"\"\n channel_dim = 1 if K.image_data_format() == 'channels_first' else -1\n img_dim = 2 if K.image_data_format() == 'channels_first' else -2\n\n ip_shape = K.int_shape(ip)\n\n if p is not None:\n p_shape = K.int_shape(p)\n\n with K.name_scope('adjust_block'):\n if p is None:\n p = ip\n\n elif p_shape[img_dim] != ip_shape[img_dim]:\n with K.name_scope('adjust_reduction_block_%s' % block_id):\n p = Activation('relu', name='adjust_relu_1_%s' % block_id)(p)\n\n p1 = AveragePooling2D(\n (1, 1),\n strides=(2, 2),\n padding='valid',\n name='adjust_avg_pool_1_%s' % block_id)(\n p)\n p1 = Conv2D(\n filters // 2, (1, 1),\n padding='same',\n use_bias=False,\n name='adjust_conv_1_%s' % block_id,\n kernel_initializer='he_normal')(\n p1)\n\n p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)\n p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)\n p2 = AveragePooling2D(\n (1, 1),\n strides=(2, 2),\n padding='valid',\n name='adjust_avg_pool_2_%s' % block_id)(\n p2)\n p2 = Conv2D(\n filters // 2, (1, 1),\n padding='same',\n use_bias=False,\n name='adjust_conv_2_%s' % block_id,\n kernel_initializer='he_normal')(\n p2)\n\n p = concatenate([p1, p2], axis=channel_dim)\n p = BatchNormalization(\n axis=channel_dim,\n momentum=0.9997,\n epsilon=1e-3,\n name='adjust_bn_%s' % block_id)(\n p)\n\n elif p_shape[channel_dim] != filters:\n with K.name_scope('adjust_projection_block_%s' % block_id):\n p = Activation('relu')(p)\n p = Conv2D(\n filters, (1, 1),\n strides=(1, 1),\n padding='same',\n name='adjust_conv_projection_%s' % block_id,\n use_bias=False,\n kernel_initializer='he_normal')(\n p)\n p = BatchNormalization(\n axis=channel_dim,\n momentum=0.9997,\n epsilon=1e-3,\n name='adjust_bn_%s' % block_id)(\n p)\n return p\n\n\ndef _normal_a_cell(ip, p, filters, block_id=None):\n \"\"\"Adds a Normal cell for NASNet-A (Fig. 4 in the paper).\n\n Arguments:\n ip: Input tensor `x`\n p: Input tensor `p`\n filters: Number of output filters\n block_id: String block_id\n\n Returns:\n A Keras tensor\n \"\"\"\n channel_dim = 1 if K.image_data_format() == 'channels_first' else -1\n\n with K.name_scope('normal_A_block_%s' % block_id):\n p = _adjust_block(p, ip, filters, block_id)\n\n h = Activation('relu')(ip)\n h = Conv2D(\n filters, (1, 1),\n strides=(1, 1),\n padding='same',\n name='normal_conv_1_%s' % block_id,\n use_bias=False,\n kernel_initializer='he_normal')(\n h)\n h = BatchNormalization(\n axis=channel_dim,\n momentum=0.9997,\n epsilon=1e-3,\n name='normal_bn_1_%s' % block_id)(\n h)\n\n with K.name_scope('block_1'):\n x1_1 = _separable_conv_block(\n h, filters, kernel_size=(5, 5), block_id='normal_left1_%s' % block_id)\n x1_2 = _separable_conv_block(\n p, filters, block_id='normal_right1_%s' % block_id)\n x1 = add([x1_1, x1_2], name='normal_add_1_%s' % block_id)\n\n with K.name_scope('block_2'):\n x2_1 = _separable_conv_block(\n p, filters, (5, 5), block_id='normal_left2_%s' % block_id)\n x2_2 = _separable_conv_block(\n p, filters, (3, 3), block_id='normal_right2_%s' % block_id)\n x2 = add([x2_1, x2_2], name='normal_add_2_%s' % block_id)\n\n with K.name_scope('block_3'):\n x3 = AveragePooling2D(\n (3, 3),\n strides=(1, 1),\n padding='same',\n name='normal_left3_%s' % (block_id))(\n h)\n x3 = add([x3, p], name='normal_add_3_%s' % block_id)\n\n with K.name_scope('block_4'):\n x4_1 = AveragePooling2D(\n (3, 3),\n strides=(1, 1),\n padding='same',\n name='normal_left4_%s' % (block_id))(\n p)\n x4_2 = AveragePooling2D(\n (3, 3),\n strides=(1, 1),\n padding='same',\n name='normal_right4_%s' % (block_id))(\n p)\n x4 = add([x4_1, x4_2], name='normal_add_4_%s' % block_id)\n\n with K.name_scope('block_5'):\n x5 = _separable_conv_block(\n h, filters, block_id='normal_left5_%s' % block_id)\n x5 = add([x5, h], name='normal_add_5_%s' % block_id)\n\n x = concatenate(\n [p, x1, x2, x3, x4, x5],\n axis=channel_dim,\n name='normal_concat_%s' % block_id)\n return x, ip\n\n\ndef _reduction_a_cell(ip, p, filters, block_id=None):\n \"\"\"Adds a Reduction cell for NASNet-A (Fig. 4 in the paper).\n\n Arguments:\n ip: Input tensor `x`\n p: Input tensor `p`\n filters: Number of output filters\n block_id: String block_id\n\n Returns:\n A Keras tensor\n \"\"\"\n channel_dim = 1 if K.image_data_format() == 'channels_first' else -1\n\n with K.name_scope('reduction_A_block_%s' % block_id):\n p = _adjust_block(p, ip, filters, block_id)\n\n h = Activation('relu')(ip)\n h = Conv2D(\n filters, (1, 1),\n strides=(1, 1),\n padding='same',\n name='reduction_conv_1_%s' % block_id,\n use_bias=False,\n kernel_initializer='he_normal')(\n h)\n h = BatchNormalization(\n axis=channel_dim,\n momentum=0.9997,\n epsilon=1e-3,\n name='reduction_bn_1_%s' % block_id)(\n h)\n\n with K.name_scope('block_1'):\n x1_1 = _separable_conv_block(\n h,\n filters, (5, 5),\n strides=(2, 2),\n block_id='reduction_left1_%s' % block_id)\n x1_2 = _separable_conv_block(\n p,\n filters, (7, 7),\n strides=(2, 2),\n block_id='reduction_1_%s' % block_id)\n x1 = add([x1_1, x1_2], name='reduction_add_1_%s' % block_id)\n\n with K.name_scope('block_2'):\n x2_1 = MaxPooling2D(\n (3, 3),\n strides=(2, 2),\n padding='same',\n name='reduction_left2_%s' % block_id)(\n h)\n x2_2 = _separable_conv_block(\n p,\n filters, (7, 7),\n strides=(2, 2),\n block_id='reduction_right2_%s' % block_id)\n x2 = add([x2_1, x2_2], name='reduction_add_2_%s' % block_id)\n\n with K.name_scope('block_3'):\n x3_1 = AveragePooling2D(\n (3, 3),\n strides=(2, 2),\n padding='same',\n name='reduction_left3_%s' % block_id)(\n h)\n x3_2 = _separable_conv_block(\n p,\n filters, (5, 5),\n strides=(2, 2),\n block_id='reduction_right3_%s' % block_id)\n x3 = add([x3_1, x3_2], name='reduction_add3_%s' % block_id)\n\n with K.name_scope('block_4'):\n x4 = AveragePooling2D(\n (3, 3),\n strides=(1, 1),\n padding='same',\n name='reduction_left4_%s' % block_id)(\n x1)\n x4 = add([x2, x4])\n\n with K.name_scope('block_5'):\n x5_1 = _separable_conv_block(\n x1, filters, (3, 3), block_id='reduction_left4_%s' % block_id)\n x5_2 = MaxPooling2D(\n (3, 3),\n strides=(2, 2),\n padding='same',\n name='reduction_right5_%s' % block_id)(\n h)\n x5 = add([x5_1, x5_2], name='reduction_add4_%s' % block_id)\n\n x = concatenate(\n [x2, x3, x4, x5],\n axis=channel_dim,\n name='reduction_concat_%s' % block_id)\n return x, ip\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for shape_ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.signal.python.kernel_tests import test_util\nfrom tensorflow.contrib.signal.python.ops import shape_ops\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nclass FrameTest(test.TestCase):\n\n def test_mapping_of_indices_without_padding(self):\n with self.test_session(use_gpu=True):\n tensor = constant_op.constant(np.arange(9152), dtypes.int32)\n tensor = array_ops.expand_dims(tensor, 0)\n\n result = shape_ops.frame(tensor, 512, 180, pad_end=False).eval()\n\n expected = np.tile(np.arange(512), (49, 1))\n expected += np.tile(np.arange(49) * 180, (512, 1)).T\n\n expected = np.expand_dims(expected, axis=0)\n expected = np.array(expected, dtype=np.int32)\n\n self.assertAllEqual(expected, result)\n\n def test_mapping_of_indices_with_padding(self):\n with self.test_session(use_gpu=True):\n tensor = constant_op.constant(np.arange(10000), dtypes.int32)\n tensor = array_ops.expand_dims(tensor, 0)\n\n result = shape_ops.frame(tensor, 512, 192, pad_end=True).eval()\n\n expected = np.tile(np.arange(512), (53, 1))\n expected += np.tile(np.arange(53) * 192, (512, 1)).T\n\n expected[expected >= 10000] = 0\n\n expected = np.expand_dims(expected, axis=0)\n expected = np.array(expected, dtype=np.int32)\n\n self.assertAllEqual(expected, result)\n\n def test_invalid_inputs(self):\n # Rank 0 input signal.\n with self.assertRaises(ValueError):\n shape_ops.frame(1, 1, 1)\n\n # If the rank is unknown, do not raise an exception.\n shape_ops.frame(array_ops.placeholder(dtypes.float32), 1, 1)\n\n # Non-scalar frame_length.\n with self.assertRaises(ValueError):\n shape_ops.frame([1], [1], 1)\n\n # Non-scalar frame_step.\n with self.assertRaises(ValueError):\n shape_ops.frame([1], 1, [1])\n\n # Non-scalar pad_value.\n with self.assertRaises(ValueError):\n shape_ops.frame([1], 1, 1, pad_end=True, pad_value=[1])\n\n def test_length_zero(self):\n signal = constant_op.constant([], dtype=dtypes.float32)\n frame_length = 2\n frame_step = 1\n\n with self.test_session(use_gpu=True):\n result = shape_ops.frame(signal, frame_length, frame_step,\n pad_end=True, pad_value=99).eval()\n self.assertEqual((0, 2), result.shape)\n\n result = shape_ops.frame(signal, frame_length, frame_step,\n pad_end=False).eval()\n self.assertEqual((0, 2), result.shape)\n\n def test_shape_inference(self):\n signal = array_ops.placeholder(dtypes.int32, shape=[1, 1])\n frame_length = 2\n frame_step = 1\n # Shape inference is able to detect the rank and inner-most dimension\n # if frame_length is known at graph definition time.\n result = shape_ops.frame(signal, frame_length, frame_step,\n pad_end=True, pad_value=99)\n self.assertEqual([1, 1, 2], result.shape.as_list())\n\n result = shape_ops.frame(signal, frame_length, frame_step,\n pad_end=False)\n self.assertEqual([1, 0, 2], result.shape.as_list())\n\n # If frame_length is not known, rank and (known) outer and inner dimensions\n # are inferred.\n signal = array_ops.placeholder(dtypes.int32, shape=[1, 2, 3, 4])\n frame_length = array_ops.placeholder(dtypes.int32, shape=[])\n frame_step = 1\n result = shape_ops.frame(signal, frame_length, frame_step,\n pad_end=True, pad_value=99, axis=1)\n self.assertEqual([1, None, None, 3, 4], result.shape.as_list())\n\n result = shape_ops.frame(signal, frame_length, frame_step,\n pad_end=False, axis=1)\n self.assertEqual([1, None, None, 3, 4], result.shape.as_list())\n\n # If frame_length and inner-most dimension is known, rank, inner dimensions,\n # and known outer dimensions are inferred.\n signal = array_ops.placeholder(dtypes.int32,\n shape=[None, 5, None, 20, 5, 3])\n frame_length = 4\n frame_step = 3\n result = shape_ops.frame(signal, frame_length, frame_step,\n pad_end=True, pad_value=99, axis=3)\n self.assertEqual([None, 5, None, 7, 4, 5, 3], result.shape.as_list())\n\n result = shape_ops.frame(signal, frame_length, frame_step,\n pad_end=False, axis=3)\n self.assertEqual([None, 5, None, 6, 4, 5, 3], result.shape.as_list())\n\n # Test that shape inference is consistent with actual returned shapes for\n # small values of signal_length, frame_length, frame_step, and pad_end in\n # [True, False].\n frame_step = 1\n for signal_length in range(2):\n signal = [0] * signal_length\n for frame_length in range(2):\n for pad_end in [False, True]:\n op = shape_ops.frame(signal, frame_length, frame_step,\n pad_end=pad_end, pad_value=99)\n with self.test_session(use_gpu=True):\n result = op.eval()\n self.assertEqual(op.shape.as_list(), list(result.shape))\n\n def test_basic_mono(self):\n signal = np.arange(6)\n frame_length = 3\n frame_step = 2\n\n with self.test_session(use_gpu=True):\n for rank in range(5):\n nd_signal = np.reshape(signal, (1,) * rank + signal.shape)\n\n # With padding, we pad the last frame with pad_value.\n result = shape_ops.frame(nd_signal, frame_length, frame_step,\n pad_end=True, pad_value=99).eval()\n expected_inner_frames = np.array([[0, 1, 2], [2, 3, 4], [4, 5, 99]])\n expected = np.reshape(\n expected_inner_frames, (1,) * rank + expected_inner_frames.shape)\n self.assertAllEqual(expected, result)\n\n # Without padding, we drop the last frame.\n expected_inner_frames = np.array([[0, 1, 2], [2, 3, 4]])\n expected = np.reshape(\n expected_inner_frames, (1,) * rank + expected_inner_frames.shape)\n result = shape_ops.frame(nd_signal, frame_length, frame_step,\n pad_end=False).eval()\n self.assertAllEqual(expected, result)\n\n def test_basic_stereo(self):\n signal = np.vstack([np.arange(6),\n np.arange(6) + 10])\n frame_length = 3\n frame_step = 2\n\n with self.test_session(use_gpu=True):\n for rank in range(5):\n nd_signal = np.reshape(signal, (1,) * rank + signal.shape)\n\n # With padding, we pad the last frame with pad_value.\n result = shape_ops.frame(nd_signal, frame_length, frame_step,\n pad_end=True, pad_value=99).eval()\n expected_inner_frames = np.array([\n [[0, 1, 2], [2, 3, 4], [4, 5, 99]],\n [[10, 11, 12], [12, 13, 14], [14, 15, 99]]])\n expected = np.reshape(\n expected_inner_frames, (1,) * rank + expected_inner_frames.shape)\n self.assertAllEqual(expected, result)\n\n # Without padding, we drop the last frame.\n expected_inner_frames = np.array([[[0, 1, 2], [2, 3, 4]],\n [[10, 11, 12], [12, 13, 14]]])\n expected = np.reshape(\n expected_inner_frames, (1,) * rank + expected_inner_frames.shape)\n result = shape_ops.frame(nd_signal, frame_length, frame_step,\n pad_end=False).eval()\n self.assertAllEqual(expected, result)\n\n def test_complex_shape(self):\n signal = np.vstack([np.arange(6),\n np.arange(6) + 10,\n np.arange(6) + 20,\n np.arange(6) + 30,\n np.arange(6) + 40,\n np.arange(6) + 50])\n signal = np.reshape(signal, (2, 1, 3, 1, 6))\n frame_length = 3\n frame_step = 2\n\n with self.test_session(use_gpu=True):\n # With padding, we pad the last frame with pad_value.\n result = shape_ops.frame(signal, frame_length, frame_step,\n pad_end=True, pad_value=99).eval()\n # Resulting shape is (2, 1, 3, 1, 3, 3).\n expected = [[[[[[0, 1, 2], [2, 3, 4], [4, 5, 99]]],\n [[[10, 11, 12], [12, 13, 14], [14, 15, 99]]],\n [[[20, 21, 22], [22, 23, 24], [24, 25, 99]]]]],\n [[[[[30, 31, 32], [32, 33, 34], [34, 35, 99]]],\n [[[40, 41, 42], [42, 43, 44], [44, 45, 99]]],\n [[[50, 51, 52], [52, 53, 54], [54, 55, 99]]]]]]\n self.assertAllEqual(expected, result)\n\n result = shape_ops.frame(signal, frame_length, frame_step,\n pad_end=False).eval()\n # Resulting shape is (2, 1, 3, 1, 3, 2).\n expected = [[[[[[0, 1, 2], [2, 3, 4]]],\n [[[10, 11, 12], [12, 13, 14]]],\n [[[20, 21, 22], [22, 23, 24]]]]],\n [[[[[30, 31, 32], [32, 33, 34]]],\n [[[40, 41, 42], [42, 43, 44]]],\n [[[50, 51, 52], [52, 53, 54]]]]]]\n self.assertAllEqual(expected, result)\n\n def test_axis(self):\n signal = np.reshape(np.arange(16), (2, 4, 2))\n with self.test_session(use_gpu=True):\n result = shape_ops.frame(signal, frame_length=2, frame_step=2,\n pad_end=True, axis=1)\n expected = np.reshape(np.arange(16), (2, 2, 2, 2))\n self.assertAllEqual(expected, result.eval())\n\n result = shape_ops.frame(signal, frame_length=2, frame_step=1,\n pad_end=True, axis=1)\n expected = [[[[0, 1], [2, 3]],\n [[2, 3], [4, 5]],\n [[4, 5], [6, 7]],\n [[6, 7], [0, 0]]],\n [[[8, 9], [10, 11]],\n [[10, 11], [12, 13]],\n [[12, 13], [14, 15]],\n [[14, 15], [0, 0]]]]\n self.assertAllEqual(expected, result.eval())\n\n result = shape_ops.frame(signal, frame_length=3, frame_step=1,\n pad_end=True, axis=1)\n expected = [[[[0, 1], [2, 3], [4, 5]],\n [[2, 3], [4, 5], [6, 7]],\n [[4, 5], [6, 7], [0, 0]],\n [[6, 7], [0, 0], [0, 0]]],\n [[[8, 9], [10, 11], [12, 13]],\n [[10, 11], [12, 13], [14, 15]],\n [[12, 13], [14, 15], [0, 0]],\n [[14, 15], [0, 0], [0, 0]]]]\n self.assertAllEqual(expected, result.eval())\n\n def test_window_larger_than_signal(self):\n signal = constant_op.constant([[1, 2], [11, 12]], dtype=dtypes.float32)\n frame_length = 4\n frame_step = 1\n\n with self.test_session(use_gpu=True):\n result = shape_ops.frame(signal, frame_length, frame_step,\n pad_end=True, pad_value=99).eval()\n self.assertAllClose([[[1, 2, 99, 99], [2, 99, 99, 99]],\n [[11, 12, 99, 99], [12, 99, 99, 99]]], result)\n\n result = shape_ops.frame(signal, frame_length, frame_step,\n pad_end=False).eval()\n self.assertEqual((2, 0, 4), result.shape)\n\n frame_step = 2\n result = shape_ops.frame(signal, frame_length, frame_step,\n pad_end=True, pad_value=99).eval()\n self.assertAllClose([[[1, 2, 99, 99]], [[11, 12, 99, 99]]], result)\n\n result = shape_ops.frame(signal, frame_length, frame_step,\n pad_end=False).eval()\n self.assertEqual((2, 0, 4), result.shape)\n\n def test_preserves_type(self):\n signal = math_ops.range(10, dtype=dtypes.float64)\n frame_length = 2\n frame_step = 3\n\n with self.test_session(use_gpu=True):\n result = shape_ops.frame(signal, frame_length, frame_step)\n self.assertEqual(result.dtype, signal.dtype)\n\n def test_dynamic_tensor(self):\n # Show that frame works even when the dimensions of its input are\n # not known at graph creation time.\n input_signal = np.vstack([np.arange(4), np.arange(4) + 10,\n np.arange(4) + 20])\n frame_length = 2\n frame_step = 2\n\n with self.test_session(use_gpu=True) as sess:\n signal_placeholder = array_ops.placeholder(shape=(None, None),\n dtype=dtypes.float32)\n result = sess.run(shape_ops.frame(\n signal_placeholder, frame_length, frame_step),\n feed_dict={signal_placeholder: input_signal})\n self.assertAllEqual([[[0, 1], [2, 3]],\n [[10, 11], [12, 13]],\n [[20, 21], [22, 23]]], result)\n\n def test_gradient_numerical(self):\n with self.test_session(use_gpu=True):\n signal_shape = (2, 128)\n signal = array_ops.ones(signal_shape)\n frame_length = 33\n frame_step = 9\n frames = shape_ops.frame(signal, frame_length, frame_step)\n error = test.compute_gradient_error(\n signal, signal_shape, frames, frames.shape.as_list())\n self.assertLess(error, 2e-5)\n\n def test_constant_folding(self):\n \"\"\"frame should be constant foldable for constant inputs.\"\"\"\n for pad_end in [False, True]:\n g = ops.Graph()\n with g.as_default():\n frame_length, frame_step = 32, 16\n signal_shape = (2, 128)\n signal = array_ops.ones(signal_shape)\n frames = shape_ops.frame(signal, frame_length, frame_step,\n pad_end=pad_end)\n rewritten_graph = test_util.grappler_optimize(g, [frames])\n self.assertEqual(1, len(rewritten_graph.node))\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Scan dataset transformation.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.data.util import sparse\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import gen_dataset_ops\n\n\nclass _ScanDataset(dataset_ops.Dataset):\n \"\"\"A dataset that scans a function across its input.\"\"\"\n\n def __init__(self, input_dataset, initial_state, scan_func):\n \"\"\"See `scan()` for details.\"\"\"\n super(_ScanDataset, self).__init__()\n self._input_dataset = input_dataset\n\n with ops.name_scope(\"initial_state\"):\n self._initial_state = nest.pack_sequence_as(initial_state, [\n ops.convert_to_tensor(t, name=\"component_%d\" % i)\n for i, t in enumerate(nest.flatten(initial_state))\n ])\n\n # Compute initial values for the state shapes and types based on\n # the initial state. These will be refined by running\n # `tf_scan_func` one or more times below.\n # TODO(b/68937811): Allow the initial state to be a tf.SparseTensor.\n self._state_shapes = nest.pack_sequence_as(\n self._initial_state,\n [t.shape for t in nest.flatten(self._initial_state)])\n self._state_types = nest.pack_sequence_as(\n self._initial_state,\n [t.dtype for t in nest.flatten(self._initial_state)])\n\n # Will be populated by calling `tf_scan_func`.\n self._output_classes = None\n self._output_shapes = None\n self._output_types = None\n\n # Iteratively rerun the scan function until reaching a fixed pont on\n # `self._state_shapes`.\n need_to_rerun = True\n while need_to_rerun:\n\n flat_state_shapes = nest.flatten(self._state_shapes)\n flat_state_types = nest.flatten(self._state_types)\n\n # Create a list in which `tf_scan_func` will store the s\n flat_new_state_shapes = []\n\n @function.Defun(*(flat_state_types + nest.flatten(\n sparse.as_dense_types(input_dataset.output_types,\n input_dataset.output_classes))))\n def tf_scan_func(*args):\n \"\"\"A wrapper for Defun that facilitates shape inference.\"\"\"\n # Pass in shape information from the state and input_dataset.\n # TODO(b/69424092): Check that neither inputs nor outputs are sparse.\n dense_shapes = sparse.as_dense_shapes(input_dataset.output_shapes,\n input_dataset.output_classes)\n for arg, shape in zip(args,\n flat_state_shapes + nest.flatten(dense_shapes)):\n arg.set_shape(shape)\n\n pivot = len(flat_state_shapes)\n old_state = nest.pack_sequence_as(self._initial_state, args[:pivot])\n input_value = nest.pack_sequence_as(input_dataset.output_types,\n args[pivot:])\n\n ret = scan_func(old_state, input_value)\n if not isinstance(ret, collections.Sequence) or len(ret) != 2:\n raise TypeError(\"The scan function must return a pair comprising the \"\n \"new state and the output value.\")\n new_state, output_value = ret\n\n flat_new_state = [\n ops.convert_to_tensor(t) for t in nest.flatten(new_state)\n ]\n flat_output_value = [\n ops.convert_to_tensor(t) for t in nest.flatten(output_value)\n ]\n\n # Extract shape information from the returned values.\n flat_new_state_shapes.extend([t.shape for t in flat_new_state])\n self._output_shapes = nest.pack_sequence_as(\n output_value, [t.shape for t in flat_output_value])\n\n # Extract and validate type information from the returned values.\n for t, dtype in zip(flat_new_state, flat_state_types):\n if t.dtype != dtype:\n raise TypeError(\n \"The element types for the new state must match the initial \"\n \"state. Expected %s; got %s.\" %\n (self._state_types, nest.pack_sequence_as(\n self._state_types, [t.dtype for t in flat_new_state])))\n self._output_classes = nest.pack_sequence_as(\n output_value, [ops.Tensor for _ in flat_output_value])\n self._output_types = nest.pack_sequence_as(\n output_value, [t.dtype for t in flat_output_value])\n\n return flat_new_state + flat_output_value\n\n # Use the private method that will execute `tf_scan_func` but delay\n # adding it to the graph in case we need to rerun the function.\n tf_scan_func._create_definition_if_needed() # pylint: disable=protected-access\n\n weakened_state_shapes = [\n original.most_specific_compatible_shape(new)\n for original, new in zip(flat_state_shapes, flat_new_state_shapes)\n ]\n\n need_to_rerun = False\n for original_shape, weakened_shape in zip(flat_state_shapes,\n weakened_state_shapes):\n if original_shape.ndims is not None and (\n weakened_shape.ndims is None or\n original_shape.as_list() != weakened_shape.as_list()):\n need_to_rerun = True\n break\n\n if need_to_rerun:\n # NOTE(mrry): `self._output_shapes` will be overwritten when we rerun\n # `tf_scan_func`.\n self._state_shapes = nest.pack_sequence_as(self._state_shapes,\n weakened_state_shapes)\n\n self._scan_func = tf_scan_func\n\n def _as_variant_tensor(self):\n input_t = self._input_dataset._as_variant_tensor() # pylint: disable=protected-access\n return gen_dataset_ops.scan_dataset(\n input_t,\n nest.flatten(self._initial_state),\n self._scan_func.captured_inputs,\n f=self._scan_func,\n output_types=nest.flatten(\n sparse.as_dense_types(self.output_types, self.output_classes)),\n output_shapes=nest.flatten(\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)))\n\n @property\n def output_classes(self):\n return self._output_classes\n\n @property\n def output_shapes(self):\n return self._output_shapes\n\n @property\n def output_types(self):\n return self._output_types\n\n\ndef scan(initial_state, scan_func):\n \"\"\"A transformation that scans a function across an input dataset.\n\n This transformation is a stateful relative of @{tf.data.Dataset.map}.\n In addition to mapping `scan_func` across the elements of the input dataset,\n `scan()` accumulates one or more state tensors, whose initial values are\n `initial_state`.\n\n Args:\n initial_state: A nested structure of tensors, representing the initial state\n of the accumulator.\n scan_func: A function that maps `(old_state, input_element)` to\n `(new_state, output_element). It must take two arguments and return a\n pair of nested structures of tensors. The `new_state` must match the\n structure of `initial_state`.\n\n Returns:\n A `Dataset` transformation function, which can be passed to\n @{tf.data.Dataset.apply}.\n \"\"\"\n def _apply_fn(dataset):\n return _ScanDataset(dataset, initial_state, scan_func)\n\n return _apply_fn\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Activity analysis.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\n\nimport gast\n\nfrom tensorflow.contrib.py2tf.pyct import anno\nfrom tensorflow.contrib.py2tf.pyct import transformer\nfrom tensorflow.contrib.py2tf.pyct.static_analysis.annos import NodeAnno\n\n# TODO(mdan): Add support for PY3 (e.g. Param vs arg).\n\n\nclass Scope(object):\n \"\"\"Encloses local symbol definition and usage information.\n\n This can track for instance whether a symbol is modified in the current scope.\n Note that scopes do not necessarily align with Python's scopes. For example,\n the body of an if statement may be considered a separate scope.\n\n Attributes:\n modified: identifiers modified in this scope\n created: identifiers created in this scope\n used: identifiers referenced in this scope\n \"\"\"\n\n def __init__(self, parent, isolated=True):\n \"\"\"Create a new scope.\n\n Args:\n parent: A Scope or None.\n isolated: Whether the scope is isolated, that is, whether variables\n created in this scope should be visible to the parent scope.\n \"\"\"\n self.isolated = isolated\n self.parent = parent\n self.modified = set()\n self.created = set()\n self.used = set()\n self.params = set()\n self.returned = set()\n\n # TODO(mdan): Rename to `locals`\n @property\n def referenced(self):\n if not self.isolated and self.parent is not None:\n return self.used | self.parent.referenced\n return self.used\n\n def __repr__(self):\n return 'Scope{r=%s, c=%s, w=%s}' % (tuple(self.used), tuple(self.created),\n tuple(self.modified))\n\n def copy_from(self, other):\n self.modified = copy.copy(other.modified)\n self.created = copy.copy(other.created)\n self.used = copy.copy(other.used)\n self.params = copy.copy(other.params)\n self.returned = copy.copy(other.returned)\n\n def merge_from(self, other):\n self.modified |= other.modified\n self.created |= other.created\n self.used |= other.used\n self.params |= other.params\n self.returned |= other.returned\n\n def has(self, name):\n if name in self.modified or name in self.params:\n return True\n elif self.parent is not None:\n return self.parent.has(name)\n return False\n\n def is_modified_since_entry(self, name):\n if name in self.modified:\n return True\n elif self.parent is not None and not self.isolated:\n return self.parent.is_modified_since_entry(name)\n return False\n\n def is_param(self, name):\n if name in self.params:\n return True\n elif self.parent is not None and not self.isolated:\n return self.parent.is_param(name)\n return False\n\n def mark_read(self, name):\n self.used.add(name)\n if self.parent is not None and name not in self.created:\n self.parent.mark_read(name)\n\n def mark_param(self, name):\n self.params.add(name)\n\n def mark_creation(self, name):\n if name.is_composite():\n parent = name.parent\n if self.has(parent):\n # This is considered mutation of the parent, not creation.\n # TODO(mdan): Is that really so?\n return\n else:\n raise ValueError('Unknown symbol \"%s\".' % parent)\n self.created.add(name)\n\n def mark_write(self, name):\n self.modified.add(name)\n if self.isolated:\n self.mark_creation(name)\n else:\n if self.parent is None:\n self.mark_creation(name)\n else:\n if not self.parent.has(name):\n self.mark_creation(name)\n self.parent.mark_write(name)\n\n def mark_returned(self, name):\n self.returned.add(name)\n if not self.isolated and self.parent is not None:\n self.parent.mark_returned(name)\n\n\nclass ActivityAnalizer(transformer.Base):\n \"\"\"Annotates nodes with local scope information. See Scope.\"\"\"\n\n def __init__(self, context, parent_scope):\n super(ActivityAnalizer, self).__init__(context)\n self.scope = Scope(parent_scope)\n self._in_return_statement = False\n\n def _track_symbol(self, node):\n qn = anno.getanno(node, anno.Basic.QN)\n\n if isinstance(node.ctx, gast.Store):\n self.scope.mark_write(qn)\n elif isinstance(node.ctx, gast.Load):\n self.scope.mark_read(qn)\n elif isinstance(node.ctx, gast.Param):\n # Param contexts appear in function defs, so they have the meaning of\n # defining a variable.\n # TODO(mdan): This bay be incorrect with nested functions.\n # For nested functions, we'll have to add the notion of hiding args from\n # the parent scope, not writing to them.\n self.scope.mark_creation(qn)\n self.scope.mark_param(qn)\n else:\n raise ValueError('Unknown context %s for node %s.' % (type(node.ctx), qn))\n\n anno.setanno(node, NodeAnno.IS_LOCAL, self.scope.has(qn))\n anno.setanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY,\n self.scope.is_modified_since_entry(qn))\n anno.setanno(node, NodeAnno.IS_PARAM, self.scope.is_param(qn))\n\n if self._in_return_statement:\n self.scope.mark_returned(qn)\n\n def visit_Name(self, node):\n self.generic_visit(node)\n self._track_symbol(node)\n return node\n\n def visit_Attribute(self, node):\n self.generic_visit(node)\n self._track_symbol(node)\n return node\n\n def visit_Print(self, node):\n current_scope = self.scope\n args_scope = Scope(current_scope)\n self.scope = args_scope\n for n in node.values:\n self.visit(n)\n anno.setanno(node, NodeAnno.ARGS_SCOPE, args_scope)\n self.scope = current_scope\n return node\n\n def visit_Call(self, node):\n current_scope = self.scope\n args_scope = Scope(current_scope, isolated=False)\n self.scope = args_scope\n for n in node.args:\n self.visit(n)\n # TODO(mdan): Account starargs, kwargs\n for n in node.keywords:\n self.visit(n)\n anno.setanno(node, NodeAnno.ARGS_SCOPE, args_scope)\n self.scope = current_scope\n self.visit(node.func)\n return node\n\n def _process_block_node(self, node, block, scope_name):\n current_scope = self.scope\n block_scope = Scope(current_scope, isolated=False)\n self.scope = block_scope\n for n in block:\n self.visit(n)\n anno.setanno(node, scope_name, block_scope)\n self.scope = current_scope\n return node\n\n def _process_parallel_blocks(self, parent, children):\n # Because the scopes are not isolated, processing any child block\n # modifies the parent state causing the other child blocks to be\n # processed incorrectly. So we need to checkpoint the parent scope so that\n # each child sees the same context.\n before_parent = Scope(None)\n before_parent.copy_from(self.scope)\n after_children = []\n for child, scope_name in children:\n self.scope.copy_from(before_parent)\n parent = self._process_block_node(parent, child, scope_name)\n after_child = Scope(None)\n after_child.copy_from(self.scope)\n after_children.append(after_child)\n for after_child in after_children:\n self.scope.merge_from(after_child)\n return parent\n\n def visit_If(self, node):\n self.visit(node.test)\n node = self._process_parallel_blocks(node,\n ((node.body, NodeAnno.BODY_SCOPE),\n (node.orelse, NodeAnno.ORELSE_SCOPE)))\n return node\n\n def visit_For(self, node):\n self.visit(node.target)\n self.visit(node.iter)\n node = self._process_parallel_blocks(node,\n ((node.body, NodeAnno.BODY_SCOPE),\n (node.orelse, NodeAnno.ORELSE_SCOPE)))\n return node\n\n def visit_While(self, node):\n self.visit(node.test)\n node = self._process_parallel_blocks(node,\n ((node.body, NodeAnno.BODY_SCOPE),\n (node.orelse, NodeAnno.ORELSE_SCOPE)))\n return node\n\n def visit_Return(self, node):\n self._in_return_statement = True\n node = self.generic_visit(node)\n self._in_return_statement = False\n return node\n\n\ndef resolve(node, context, parent_scope=None):\n return ActivityAnalizer(context, parent_scope).visit(node)\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for doc generator traversal.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.tools.docs import generate_lib\nfrom tensorflow.tools.docs import parser\n\n\ndef test_function():\n \"\"\"Docstring for test_function.\"\"\"\n pass\n\n\nclass TestClass(object):\n \"\"\"Docstring for TestClass itself.\"\"\"\n\n class ChildClass(object):\n \"\"\"Docstring for a child class.\"\"\"\n\n class GrandChildClass(object):\n \"\"\"Docstring for a child of a child class.\"\"\"\n pass\n\n\nclass DummyVisitor(object):\n\n def __init__(self, index, duplicate_of):\n self.index = index\n self.duplicate_of = duplicate_of\n\n\nclass GenerateTest(googletest.TestCase):\n\n def test_write(self):\n if sys.version_info >= (3, 0):\n self.skipTest('Warning: Doc generation is not supported from python3.')\n\n module = sys.modules[__name__]\n\n index = {\n 'tf': sys, # Can be any module, this test doesn't care about content.\n 'tf.TestModule': module,\n 'tf.test_function': test_function,\n 'tf.TestModule.test_function': test_function,\n 'tf.TestModule.TestClass': TestClass,\n 'tf.TestModule.TestClass.ChildClass': TestClass.ChildClass,\n 'tf.TestModule.TestClass.ChildClass.GrandChildClass':\n TestClass.ChildClass.GrandChildClass,\n }\n\n tree = {\n 'tf': ['TestModule', 'test_function'],\n 'tf.TestModule': ['test_function', 'TestClass'],\n 'tf.TestModule.TestClass': ['ChildClass'],\n 'tf.TestModule.TestClass.ChildClass': ['GrandChildClass'],\n 'tf.TestModule.TestClass.ChildClass.GrandChildClass': []\n }\n\n duplicate_of = {'tf.test_function': 'tf.TestModule.test_function'}\n\n duplicates = {\n 'tf.TestModule.test_function': [\n 'tf.test_function', 'tf.TestModule.test_function'\n ]\n }\n\n base_dir = os.path.dirname(__file__)\n\n visitor = DummyVisitor(index, duplicate_of)\n\n reference_resolver = parser.ReferenceResolver.from_visitor(\n visitor=visitor, doc_index={}, py_module_names=['tf'])\n\n parser_config = parser.ParserConfig(\n reference_resolver=reference_resolver,\n duplicates=duplicates,\n duplicate_of=duplicate_of,\n tree=tree,\n index=index,\n reverse_index={},\n guide_index={},\n base_dir=base_dir)\n\n output_dir = googletest.GetTempDir()\n\n generate_lib.write_docs(output_dir, parser_config, yaml_toc=True)\n\n # Make sure that the right files are written to disk.\n self.assertTrue(os.path.exists(os.path.join(output_dir, 'index.md')))\n self.assertTrue(os.path.exists(os.path.join(output_dir, 'tf.md')))\n self.assertTrue(os.path.exists(os.path.join(output_dir, '_toc.yaml')))\n self.assertTrue(\n os.path.exists(os.path.join(output_dir, 'tf/TestModule.md')))\n self.assertFalse(\n os.path.exists(os.path.join(output_dir, 'tf/test_function.md')))\n self.assertTrue(\n os.path.exists(\n os.path.join(output_dir, 'tf/TestModule/TestClass.md')))\n self.assertTrue(\n os.path.exists(\n os.path.join(output_dir,\n 'tf/TestModule/TestClass/ChildClass.md')))\n self.assertTrue(\n os.path.exists(\n os.path.join(\n output_dir,\n 'tf/TestModule/TestClass/ChildClass/GrandChildClass.md')))\n # Make sure that duplicates are not written\n self.assertTrue(\n os.path.exists(\n os.path.join(output_dir, 'tf/TestModule/test_function.md')))\n\n\nif __name__ == '__main__':\n googletest.main()\n" ]
[ [ "tensorflow.python.ops.array_ops.constant", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.math_ops.reduce_max", "tensorflow.python.ops.array_ops.sequence_mask", "tensorflow.contrib.rnn.ops.gen_lstm_ops.lstm_block_cell", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.init_ops.constant_initializer", "tensorflow.python.ops.math_ops.to_int64", "tensorflow.python.framework.ops.RegisterGradient", "tensorflow.python.layers.base.InputSpec", "tensorflow.python.ops.array_ops.unstack", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.platform.resource_loader.get_path_to_datafile", "tensorflow.python.ops.array_ops.slice", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.rnn_cell_impl.LSTMStateTuple", "tensorflow.python.ops.nn_ops.bias_add_grad", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.ops.math_ops.range", "tensorflow.python.ops.array_ops.concat", "tensorflow.contrib.rnn.ops.gen_lstm_ops.block_lstm", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.array_ops.expand_dims" ], [ "tensorflow.python.ops.math_ops.log", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.control_flow_ops.while_loop", "tensorflow.python.ops.math_ops.exp", "tensorflow.python.ops.distributions.util.prefer_static_rank", "tensorflow.python.ops.gradients_impl.gradients", "tensorflow.python.ops.array_ops.rank", "tensorflow.python.ops.math_ops.abs", "tensorflow.python.ops.distributions.util.gen_new_seed", "tensorflow.python.ops.array_ops.where", "tensorflow.python.ops.math_ops.cast", "numpy.log", "tensorflow.python.ops.math_ops.minimum", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.ops.math_ops.is_finite", "tensorflow.python.ops.math_ops.equal", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.ops.array_ops.ones_like", "tensorflow.python.ops.math_ops.range", "tensorflow.python.ops.functional_ops.scan", "numpy.int32", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.math_ops.reduce_sum" ], [ "tensorflow.python.ops.array_ops.transpose", "numpy.absolute", "tensorflow.python.ops.nn_ops.depthwise_conv2d_native", "tensorflow.python.ops.gradient_checker.compute_gradient_error", "tensorflow.python.platform.test.is_gpu_available", "tensorflow.python.ops.nn_ops.depthwise_conv2d_native_backprop_input", "tensorflow.python.platform.test.main", "numpy.random.rand", "numpy.ravel", "tensorflow.python.ops.nn_impl.depthwise_conv2d", "tensorflow.python.ops.nn_ops.depthwise_conv2d_native_backprop_filter", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.framework.test_util.run_in_graph_and_eager_modes", "tensorflow.python.ops.variable_scope.variable_creator_scope", "tensorflow.contrib.eager.python.checkpointable.Checkpointable.__init__", "tensorflow.python.eager.context.in_eager_mode", "tensorflow.python.ops.state_ops.assign", "tensorflow.python.training.saver.latest_checkpoint", "tensorflow.python.layers.core.Dense.__init__", "tensorflow.python.ops.resource_variable_ops.ResourceVariable", "tensorflow.python.ops.init_ops.ones_initializer", "tensorflow.contrib.eager.python.checkpointable.Checkpointable", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.eager.context.graph_mode", "tensorflow.python.training.adam.AdamOptimizer.__init__", "tensorflow.contrib.eager.python.checkpointable.save", "tensorflow.contrib.eager.python.checkpointable._serialize_object_graph", "tensorflow.python.eager.test.main", "tensorflow.python.framework.ops.colocate_with", "tensorflow.contrib.eager.python.checkpointable.restore", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.variable_scope.variable", "tensorflow.python.training.training_util.create_global_step", "tensorflow.python.eager.context.in_graph_mode", "tensorflow.python.framework.ops.Graph", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.name_scope", "tensorflow.contrib.eager.python.network.Network.__init__", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.framework.tensor_shape.TensorShape", "numpy.full", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.math_ops.reduce_mean", "tensorflow.python.platform.test.main", "numpy.random.rand", "numpy.float32", "tensorflow.python.ops.math_ops.sqrt", "numpy.random.randint", "tensorflow.python.ops.math_ops.matmul", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.random.RandomState", "tensorflow.python.ops.array_ops.matrix_diag_part" ], [ "tensorflow.python.framework.ops.get_collection", "tensorflow.python.ops.variable_scope.get_variable", "tensorflow.python.ops.variables.Variable", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.platform.test.main", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.summary.summary.merge_all", "tensorflow.python.ops.variables.global_variables_initializer" ], [ "tensorflow.python.keras._impl.keras.backend.bias_add", "tensorflow.python.keras._impl.keras.utils.conv_utils.normalize_padding", "tensorflow.python.keras._impl.keras.backend.local_conv1d", "tensorflow.python.keras._impl.keras.constraints.serialize", "tensorflow.python.keras._impl.keras.engine.InputSpec", "tensorflow.python.keras._impl.keras.regularizers.serialize", "tensorflow.python.keras._impl.keras.initializers.get", "tensorflow.python.keras._impl.keras.backend.local_conv2d", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.keras._impl.keras.activations.serialize", "tensorflow.python.keras._impl.keras.activations.get", "tensorflow.python.keras._impl.keras.utils.conv_utils.normalize_data_format", "tensorflow.python.keras._impl.keras.initializers.serialize", "tensorflow.python.keras._impl.keras.utils.conv_utils.conv_output_length", "tensorflow.python.keras._impl.keras.utils.conv_utils.normalize_tuple", "tensorflow.python.keras._impl.keras.regularizers.get", "tensorflow.python.keras._impl.keras.constraints.get" ], [ "numpy.dot", "tensorflow.contrib.linear_optimizer.python.sdca_estimator.SDCALogisticClassifier", "tensorflow.contrib.linear_optimizer.python.sdca_estimator.SDCALinearRegressor", "tensorflow.contrib.layers.python.layers.feature_column.crossed_column", "tensorflow.contrib.layers.python.layers.feature_column.sparse_column_with_hash_bucket", "tensorflow.contrib.layers.python.layers.feature_column.weighted_sparse_column", "tensorflow.python.platform.test.main", "tensorflow.python.framework.sparse_tensor.SparseTensor", "tensorflow.contrib.layers.python.layers.feature_column.real_valued_column", "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.keras._impl.keras.layers.add", "tensorflow.python.keras._impl.keras.layers.Dropout", "numpy.where", "tensorflow.python.keras._impl.keras.callbacks.RemoteMonitor", "tensorflow.python.keras._impl.keras.backend.epsilon", "tensorflow.python.keras._impl.keras.testing_utils.get_test_data", "tensorflow.python.keras._impl.keras.layers.Dense", "tensorflow.python.platform.test.main", "tensorflow.python.keras._impl.keras.callbacks.CSVLogger", "tensorflow.python.keras._impl.keras.callbacks.ReduceLROnPlateau", "tensorflow.python.keras._impl.keras.utils.to_categorical", "tensorflow.python.keras._impl.keras.models.Model", "tensorflow.python.keras._impl.keras.backend.get_value", "tensorflow.python.keras._impl.keras.callbacks.LearningRateScheduler", "numpy.isnan", "tensorflow.python.keras._impl.keras.callbacks.EarlyStopping", "tensorflow.python.keras._impl.keras.callbacks.TensorBoard", "numpy.random.random", "numpy.random.seed", "tensorflow.python.keras._impl.keras.optimizers.SGD", "tensorflow.python.keras._impl.keras.callbacks.TerminateOnNaN", "tensorflow.python.keras._impl.keras.callbacks.ModelCheckpoint", "numpy.ones", "tensorflow.python.keras._impl.keras.initializers.Constant", "tensorflow.python.summary.writer.writer_cache.FileWriterCache.clear", "tensorflow.python.keras._impl.keras.models.Sequential", "tensorflow.python.keras._impl.keras.Input" ], [ "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.array_ops.fill", "tensorflow.contrib.training.python.training.tensor_queue_dataset.enqueue_in_queue_dataset", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices", "tensorflow.python.ops.math_ops.reduce_max", "tensorflow.contrib.training.python.training.tensor_queue_dataset.prepend_from_queue_and_padded_batch_dataset", "tensorflow.python.ops.array_ops.where", "tensorflow.python.ops.string_ops.as_string", "tensorflow.python.ops.array_ops.gather", "tensorflow.python.platform.test.main", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.framework.ops.control_dependencies", "numpy.array", "tensorflow.python.ops.array_ops.expand_dims", "numpy.empty", "numpy.random.randint" ], [ "tensorflow.python.ops.array_ops.ones_like", "tensorflow.python.estimator.canned.head.LossSpec", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.estimator.model_fn.EstimatorSpec", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.math_ops.add_n", "tensorflow.python.ops.array_ops.slice", "tensorflow.python.ops.metrics.mean", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.summary.summary.scalar", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.math_ops.multiply", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.estimator.canned.head._summary_key", "tensorflow.python.ops.control_flow_ops.no_op" ], [ "tensorflow.python.ops.distributions.util.AppendDocstring", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.distributions.util.embed_check_nonnegative_integer_form", "tensorflow.python.ops.distributions.util.gen_new_seed", "tensorflow.python.ops.distributions.util.embed_check_categorical_event_shape", "tensorflow.python.ops.special_math_ops.lbeta", "tensorflow.python.ops.check_ops.assert_positive", "tensorflow.python.ops.random_ops.random_gamma", "tensorflow.python.ops.distributions.util.log_combinations", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.array_ops.one_hot", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.math_ops.sqrt", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.ops.math_ops.cast" ], [ "numpy.lib.stride_tricks.as_strided", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.gradients_impl.gradients", "tensorflow.python.ops.math_ops.abs", "tensorflow.python.ops.math_ops.linspace", "numpy.arange", "numpy.std", "tensorflow.python.platform.test.main", "tensorflow.python.platform.test.compute_gradient_error", "numpy.zeros", "tensorflow.contrib.signal.python.ops.spectral_ops.stft", "numpy.fft.irfft", "tensorflow.contrib.signal.python.ops.spectral_ops.inverse_stft_window_fn", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.contrib.signal.python.ops.spectral_ops.inverse_stft", "numpy.sum", "numpy.random.random", "numpy.abs", "numpy.fft.rfft", "numpy.ones", "tensorflow.python.ops.spectral_ops_test_util.fft_kernel_label_map", "numpy.shape", "tensorflow.python.ops.random_ops.random_normal", "tensorflow.contrib.signal.python.ops.window_ops.hann_window", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.ops.math_ops.reduce_sum" ], [ "tensorflow.decode_csv", "tensorflow.cast", "tensorflow.estimator.DNNClassifier", "tensorflow.logging.set_verbosity", "tensorflow.feature_column.numeric_column", "tensorflow.data.TextLineDataset", "tensorflow.app.run" ], [ "tensorflow.python.ops.math_ops.log", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.control_flow_ops.while_loop", "tensorflow.python.ops.check_ops.assert_greater_equal", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.gen_math_ops.exp", "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.ops.gen_math_ops.maximum", "tensorflow.python.ops.math_ops.less", "tensorflow.contrib.distributions.Normal", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.tensor_array_ops.TensorArray", "tensorflow.contrib.timeseries.python.timeseries.model_utils.fully_connected", "tensorflow.python.ops.array_ops.slice", "tensorflow.python.ops.math_ops.square", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.contrib.timeseries.python.timeseries.model.ModelOutputs", "tensorflow.python.ops.check_ops.assert_equal", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.gen_math_ops.minimum", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.ops.array_ops.ones_like", "tensorflow.python.ops.math_ops.range", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.init_ops.zeros_initializer", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.nn_ops.relu", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.data.ops.dataset_ops.MapDataset", "tensorflow.contrib.data.python.ops.batching.unbatch", "tensorflow.python.data.ops.dataset_ops.SparseTensorSliceDataset", "tensorflow.contrib.data.python.ops.error_ops.ignore_errors", "tensorflow.python.data.ops.dataset_ops.CacheDataset", "tensorflow.python.data.ops.dataset_ops.FilterDataset", "tensorflow.python.data.ops.dataset_ops.ParallelMapDataset", "tensorflow.python.data.ops.dataset_ops.FlatMapDataset", "tensorflow.contrib.data.python.ops.enumerate_ops.enumerate_dataset", "tensorflow.python.data.ops.dataset_ops.RepeatDataset", "tensorflow.python.data.ops.dataset_ops.TensorDataset", "tensorflow.python.data.ops.dataset_ops.ZipDataset", "tensorflow.python.data.ops.dataset_ops.BatchDataset", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.data.ops.dataset_ops.PrefetchDataset", "tensorflow.python.data.ops.dataset_ops.ConcatenateDataset", "tensorflow.python.data.ops.dataset_ops.TakeDataset", "tensorflow.python.data.ops.dataset_ops.PaddedBatchDataset", "tensorflow.python.util.deprecation.deprecated_args", "tensorflow.contrib.data.python.ops.grouping.group_by_window", "tensorflow.python.data.ops.dataset_ops.RangeDataset", "tensorflow.python.data.util.nest.flatten", "tensorflow.python.data.ops.dataset_ops.ShuffleDataset", "tensorflow.python.data.ops.dataset_ops.SkipDataset", "tensorflow.python.data.ops.dataset_ops.TensorSliceDataset", "tensorflow.contrib.data.python.ops.batching.dense_to_sparse_batch", "tensorflow.python.data.ops.dataset_ops.Dataset.from_generator", "tensorflow.python.ops.gen_io_ops.matching_files", "tensorflow.python.data.ops.dataset_ops.InterleaveDataset" ], [ "numpy.linspace", "numpy.isfinite", "tensorflow.python.ops.distributions.uniform.Uniform", "tensorflow.python.framework.ops.get_default_session", "tensorflow.python.ops.math_ops.reduce_mean", "numpy.diff", "numpy.testing.assert_allclose", "numpy.divide" ], [ "tensorflow.contrib.py2tf.pyct.anno.hasanno", "tensorflow.contrib.py2tf.pyct.anno.getanno", "tensorflow.contrib.py2tf.pyct.anno.delanno", "tensorflow.contrib.py2tf.pyct.anno.setanno", "tensorflow.python.platform.test.main" ], [ "tensorflow.python.eager.context.in_graph_mode", "tensorflow.python.ops.resource_variable_ops.ResourceVariable", "numpy.sign", "tensorflow.python.ops.variables.Variable", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.platform.test.main", "tensorflow.contrib.opt.python.training.powersign.PowerSignOptimizer", "tensorflow.contrib.opt.python.training.sign_decay.get_linear_decay_fn", "numpy.array", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.framework.tensor_shape.matrix", "tensorflow.python.framework.tensor_shape.Dimension", "tensorflow.python.framework.tensor_shape.as_dimension", "tensorflow.python.framework.tensor_shape.vector", "tensorflow.python.platform.googletest.main", "tensorflow.python.framework.tensor_shape.unknown_shape", "tensorflow.python.framework.tensor_shape.as_shape", "tensorflow.core.framework.tensor_shape_pb2.TensorShapeProto.Dim" ], [ "tensorflow.python.tools.optimize_for_inference_lib.fuse_resize_and_conv", "tensorflow.python.ops.nn_ops.conv2d", "tensorflow.python.framework.importer.import_graph_def", "tensorflow.python.ops.gen_nn_ops._fused_batch_norm", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.tools.optimize_for_inference_lib.optimize_for_inference", "tensorflow.core.framework.node_def_pb2.NodeDef", "tensorflow.python.platform.test.main", "tensorflow.python.framework.tensor_util.make_tensor_proto", "tensorflow.core.framework.attr_value_pb2.AttrValue", "tensorflow.python.ops.image_ops.resize_bilinear", "tensorflow.python.ops.array_ops.pad", "tensorflow.python.tools.optimize_for_inference_lib.fold_batch_norms", "numpy.array", "tensorflow.python.ops.gen_nn_ops._batch_norm_with_global_normalization", "tensorflow.core.framework.graph_pb2.GraphDef" ], [ "tensorflow.python.keras._impl.keras.layers.BatchNormalization", "tensorflow.python.keras._impl.keras.layers.add", "tensorflow.python.keras._impl.keras.backend.set_image_data_format", "tensorflow.python.keras._impl.keras.layers.ZeroPadding2D", "tensorflow.python.keras._impl.keras.layers.SeparableConv2D", "tensorflow.python.keras._impl.keras.backend.image_data_format", "tensorflow.python.keras._impl.keras.layers.AveragePooling2D", "tensorflow.python.keras._impl.keras.backend.int_shape", "tensorflow.python.keras._impl.keras.layers.GlobalAveragePooling2D", "tensorflow.python.keras._impl.keras.layers.Input", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.keras._impl.keras.layers.Dense", "tensorflow.python.keras._impl.keras.layers.Cropping2D", "tensorflow.python.keras._impl.keras.backend.backend", "tensorflow.python.keras._impl.keras.models.Model", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.keras._impl.keras.layers.Conv2D", "tensorflow.python.keras._impl.keras.backend.is_keras_tensor", "tensorflow.python.keras._impl.keras.layers.GlobalMaxPooling2D", "tensorflow.python.keras._impl.keras.backend.name_scope", "tensorflow.python.keras._impl.keras.utils.data_utils.get_file", "tensorflow.python.keras._impl.keras.layers.Activation", "tensorflow.python.keras._impl.keras.engine.topology.get_source_inputs", "tensorflow.python.keras._impl.keras.layers.concatenate", "tensorflow.python.keras._impl.keras.layers.MaxPooling2D" ], [ "tensorflow.python.ops.math_ops.range", "numpy.expand_dims", "numpy.reshape", "numpy.arange", "tensorflow.python.framework.ops.Graph", "tensorflow.contrib.signal.python.kernel_tests.test_util.grappler_optimize", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.platform.test.main", "tensorflow.contrib.signal.python.ops.shape_ops.frame", "tensorflow.python.ops.array_ops.ones", "numpy.array", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.data.util.nest.pack_sequence_as", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.data.util.sparse.as_dense_shapes", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.data.util.sparse.as_dense_types", "tensorflow.python.data.util.nest.flatten" ], [ "tensorflow.contrib.py2tf.pyct.anno.setanno", "tensorflow.contrib.py2tf.pyct.anno.getanno" ], [ "tensorflow.tools.docs.generate_lib.write_docs", "tensorflow.tools.docs.parser.ParserConfig", "tensorflow.tools.docs.parser.ReferenceResolver.from_visitor", "tensorflow.python.platform.googletest.main", "tensorflow.python.platform.googletest.GetTempDir" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.7", "1.2", "1.4" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "1.4", "2.7", "2.2", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "1.0", "2.6", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.7", "1.10", "1.12" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.7", "1.4" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.7", "1.10", "1.12" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.7", "1.10", "1.4" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "1.4", "1.13", "2.3", "2.4", "2.2", "2.9", "1.5", "1.7", "2.5", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.7", "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.3", "2.2" ] } ]
DEVESHTARASIA/big-data-tutorial
[ "74e2aa1241c30913c5f12b9667f9d626002b98a2" ]
[ "tutorial/helpers.py" ]
[ "\"\"\"\nSmall helpers for code that is not shown in the notebooks\n\"\"\"\n\nfrom sklearn import neighbors, datasets, linear_model\nimport pylab as pl\nimport numpy as np\nfrom matplotlib.colors import ListedColormap\n\n# Create color maps for 3-class classification problem, as with iris\ncmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])\ncmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])\n\ndef plot_iris_knn():\n iris = datasets.load_iris()\n X = iris.data[:, :2] # we only take the first two features. We could\n # avoid this ugly slicing by using a two-dim dataset\n y = iris.target\n\n knn = neighbors.KNeighborsClassifier(n_neighbors=3)\n knn.fit(X, y)\n\n x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1\n y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1\n xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),\n np.linspace(y_min, y_max, 100))\n Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n pl.figure()\n pl.pcolormesh(xx, yy, Z, cmap=cmap_light)\n\n # Plot also the training points\n pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)\n pl.xlabel('sepal length (cm)')\n pl.ylabel('sepal width (cm)')\n pl.axis('tight')\n\n\ndef plot_polynomial_regression():\n rng = np.random.RandomState(0)\n x = 2*rng.rand(100) - 1\n\n f = lambda t: 1.2 * t**2 + .1 * t**3 - .4 * t **5 - .5 * t ** 9\n y = f(x) + .4 * rng.normal(size=100)\n\n x_test = np.linspace(-1, 1, 100)\n\n pl.figure()\n pl.scatter(x, y, s=4)\n\n X = np.array([x**i for i in range(5)]).T\n X_test = np.array([x_test**i for i in range(5)]).T\n regr = linear_model.LinearRegression()\n regr.fit(X, y)\n pl.plot(x_test, regr.predict(X_test), label='4th order')\n\n X = np.array([x**i for i in range(10)]).T\n X_test = np.array([x_test**i for i in range(10)]).T\n regr = linear_model.LinearRegression()\n regr.fit(X, y)\n pl.plot(x_test, regr.predict(X_test), label='9th order')\n\n pl.legend(loc='best')\n pl.axis('tight')\n pl.title('Fitting a 4th and a 9th order polynomial')\n\n pl.figure()\n pl.scatter(x, y, s=4)\n pl.plot(x_test, f(x_test), label=\"truth\")\n pl.axis('tight')\n pl.title('Ground truth (9th order polynomial)')" ]
[ [ "numpy.linspace", "sklearn.datasets.load_iris", "sklearn.neighbors.KNeighborsClassifier", "matplotlib.colors.ListedColormap", "sklearn.linear_model.LinearRegression", "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cosmic-cortex/torchkit
[ "9f44c8a500a4345d81feac14b6b200c5d190283a" ]
[ "torchkit/models/vision/segmentation/unet.py" ]
[ "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\n\r\n\r\ndef pad_to_shape(this, shp):\r\n \"\"\"\r\n Not a very safe function.\r\n \"\"\"\r\n return F.pad(this, (0, shp[3] - this.shape[3], 0, shp[2] - this.shape[2]))\r\n\r\n\r\nclass First(nn.Module):\r\n def __init__(self, in_channels, middle_channels, out_channels, dropout=False):\r\n super(First, self).__init__()\r\n\r\n layers = [\r\n nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(middle_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(out_channels),\r\n nn.ReLU(inplace=True)\r\n ]\r\n\r\n if dropout:\r\n assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'\r\n layers.append(nn.Dropout2d(p=dropout))\r\n\r\n self.first = nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n return self.first(x)\r\n\r\n\r\nclass Encoder(nn.Module):\r\n def __init__(\r\n self, in_channels, middle_channels, out_channels,\r\n dropout=False, downsample_kernel=2\r\n ):\r\n super(Encoder, self).__init__()\r\n\r\n layers = [\r\n nn.MaxPool2d(kernel_size=downsample_kernel),\r\n nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(middle_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(out_channels),\r\n nn.ReLU(inplace=True)\r\n ]\r\n\r\n if dropout:\r\n assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'\r\n layers.append(nn.Dropout2d(p=dropout))\r\n\r\n self.encoder = nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n return self.encoder(x)\r\n\r\n\r\nclass Center(nn.Module):\r\n def __init__(self, in_channels, middle_channels, out_channels, deconv_channels, dropout=False):\r\n super(Center, self).__init__()\r\n\r\n layers = [\r\n nn.MaxPool2d(kernel_size=2),\r\n nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(middle_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(out_channels),\r\n nn.ReLU(inplace=True),\r\n nn.ConvTranspose2d(out_channels, deconv_channels, kernel_size=2, stride=2)\r\n ]\r\n\r\n if dropout:\r\n assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'\r\n layers.append(nn.Dropout2d(p=dropout))\r\n\r\n self.center = nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n return self.center(x)\r\n\r\n\r\nclass Decoder(nn.Module):\r\n def __init__(self, in_channels, middle_channels, out_channels, deconv_channels, dropout=False):\r\n super(Decoder, self).__init__()\r\n\r\n layers = [\r\n nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(middle_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(out_channels),\r\n nn.ReLU(inplace=True),\r\n nn.ConvTranspose2d(out_channels, deconv_channels, kernel_size=2, stride=2)\r\n ]\r\n\r\n if dropout:\r\n assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'\r\n layers.append(nn.Dropout2d(p=dropout))\r\n\r\n self.decoder = nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n return self.decoder(x)\r\n\r\n\r\nclass Last(nn.Module):\r\n def __init__(self, in_channels, middle_channels, out_channels, softmax=False):\r\n super(Last, self).__init__()\r\n\r\n layers = [\r\n nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(middle_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(middle_channels, middle_channels, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(middle_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(middle_channels, out_channels, kernel_size=1),\r\n nn.Sigmoid()\r\n ]\r\n\r\n if softmax:\r\n layers.append(nn.Softmax2d())\r\n\r\n self.first = nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n return self.first(x)\r\n\r\n\r\nclass UNet(nn.Module):\r\n def __init__(self, in_channels, out_channels, softmax=False):\r\n super(UNet, self).__init__()\r\n self.first = First(in_channels, 64, 64)\r\n self.encoder_1 = Encoder(64, 128, 128)\r\n self.encoder_2 = Encoder(128, 256, 256)\r\n self.encoder_3 = Encoder(256, 512, 512)\r\n self.center = Center(512, 1024, 1024, 512)\r\n self.decoder_3 = Decoder(1024, 512, 512, 256)\r\n self.decoder_2 = Decoder(512, 256, 256, 128)\r\n self.decoder_1 = Decoder(256, 128, 128, 64)\r\n self.last = Last(128, 64, out_channels, softmax=softmax)\r\n\r\n def forward(self, x):\r\n x_first = self.first(x)\r\n x_enc_1 = self.encoder_1(x_first)\r\n x_enc_2 = self.encoder_2(x_enc_1)\r\n x_enc_3 = self.encoder_3(x_enc_2)\r\n x_cent = self.center(x_enc_3)\r\n x_dec_3 = self.decoder_3(torch.cat([pad_to_shape(x_cent, x_enc_3.shape), x_enc_3], dim=1))\r\n x_dec_2 = self.decoder_2(torch.cat([pad_to_shape(x_dec_3, x_enc_2.shape), x_enc_2], dim=1))\r\n x_dec_1 = self.decoder_1(torch.cat([pad_to_shape(x_dec_2, x_enc_1.shape), x_enc_1], dim=1))\r\n return self.last(torch.cat([pad_to_shape(x_dec_1, x_first.shape), x_first], dim=1))\r\n\r\n\r\nif __name__ == '__main__':\r\n pass\r\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout2d", "torch.nn.ConvTranspose2d", "torch.nn.Conv2d", "torch.nn.Softmax2d", "torch.nn.Sigmoid", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.functional.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
romquentin/decod_WM_Selection_and_maintenance
[ "fc1bf2f21959795fbea731f642cc750c2b61bce2" ]
[ "run_decoding/run_decoding_WM_across_epochs_and_conditions.py" ]
[ "\"\"\"Run decoding analyses in sensors space accross memory content and\nvisual perception for the working memory task and save decoding performance\"\"\"\n\n# Authors: Romain Quentin <[email protected]>\n# Jean-Remi King <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os\nimport os.path as op\nimport numpy as np\nimport mne\nfrom h5io import read_hdf5\nfrom mne.decoding import GeneralizingEstimator, LinearModel\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import Ridge\nfrom sklearn.metrics import make_scorer\nfrom sklearn.model_selection import StratifiedKFold\nfrom jr.gat import (AngularRegression, scorer_spearman,\n scorer_angle)\nfrom base import (complete_behavior, get_events_interactions)\nfrom config import path_data\nimport sys\nsubject = sys.argv[1] # read a swarm file for parralel computing on biowulf\n\noutput_folder = '/sensors_accross_epochs_and_conditions/'\n# Create result folder\nresults_folder = op.join(path_data + 'results/' + subject + output_folder)\nif not os.path.exists(results_folder):\n os.makedirs(results_folder)\n\n# read behavior\nfname = op.join(path_data, subject, 'behavior_Target.hdf5')\nevents = read_hdf5(fname)\nevents = complete_behavior(events)\nevents = get_events_interactions(events)\n# read stimulus epochs\nfname = op.join(path_data, subject, 'epochs_Target.fif')\nepochs_target = mne.read_epochs(fname)\nepochs_target.pick_types(meg=True, ref_meg=False)\nepochs_target.crop(-0.2, 0.9)\n# read cue epochs\nfname = op.join(path_data, subject, 'epochs_Cue.fif')\nepochs_cue = mne.read_epochs(fname)\nepochs_cue.pick_types(meg=True, ref_meg=False)\nepochs_cue.crop(0, 1.5)\n# read probe epochs\nfname = op.join(path_data, subject, 'epochs_Probe.fif')\nepochs_probe = mne.read_epochs(fname)\nepochs_probe.pick_types(meg=True, ref_meg=False)\nepochs_probe.crop(0, 0.9)\n# Concatenate the data of the three epochs\nX0 = epochs_target._data\nX1 = epochs_cue._data\nX2 = epochs_probe._data\nX = np.concatenate((X0, X1, X2), axis=2)\n\n# Define pair of analyses (train on the 2nd and test on the 1st )\npaired_analyses = [['target_sfreq_cue_left_sfreq', 'left_sfreq'],\n ['target_sfreq_cue_right_sfreq', 'right_sfreq'],\n ['left_sfreq', 'target_sfreq_cue_left_sfreq'],\n ['right_sfreq', 'target_sfreq_cue_right_sfreq'],\n ['target_angle_cue_left_angle', 'left_angle'],\n ['target_angle_cue_right_angle', 'right_angle'],\n ['left_angle', 'target_angle_cue_left_angle'],\n ['right_angle', 'target_angle_cue_right_angle']]\n# Loop across each pair of analyses\nfor paired_analysis in paired_analyses:\n y_test = np.array(events[paired_analysis[0]])\n y_train = np.array(events[paired_analysis[1]])\n # Define estimators depending on the analysis\n if 'angle' in paired_analysis[0][:14]:\n clf = make_pipeline(StandardScaler(),\n LinearModel(AngularRegression(Ridge(),\n independent=False)))\n scorer = scorer_angle\n kwargs = dict()\n gat = GeneralizingEstimator(clf, scoring=make_scorer(scorer),\n n_jobs=24, **kwargs)\n y_test = np.array(y_test, dtype=float)\n y_train = np.array(y_train, dtype=float)\n elif 'sfreq' in paired_analysis[0][:14]:\n clf = make_pipeline(StandardScaler(), LinearModel(Ridge()))\n scorer = scorer_spearman\n kwargs = dict()\n gat = GeneralizingEstimator(clf, scoring=make_scorer(scorer),\n n_jobs=24, **kwargs)\n y_test = np.array(y_test, dtype=float)\n y_train = np.array(y_train, dtype=float)\n # only consider trials with correct fixation\n sel = np.where(events['is_eye_fixed'] == 1)[0]\n y_train = y_train[sel]\n y_test = y_test[sel]\n X = np.concatenate((X0, X1, X2), axis=2)\n X = X[sel]\n # only consider non NaN values\n # Run decoding accross condition\n cv = StratifiedKFold(7)\n scores = list()\n scs = list()\n if np.isnan(y_train).any():\n sel = np.where(~np.isnan(y_train))[0]\n for train, test in cv.split(X[sel], y_train[sel]):\n gat.fit(X[sel][train], y_train[sel][train])\n score = gat.score(X[sel][test], y_test[sel][test])\n sc = gat.score(X[sel][test], y_train[sel][test]) # test on same\n scores.append(score)\n scs.append(sc)\n scores = np.mean(scores, axis=0)\n scs = np.mean(scs, axis=0)\n else:\n for train, test in cv.split(X, y_train):\n y_te = y_test[test]\n X_te = X[test]\n y_te = y_te[np.where(~np.isnan(y_te))[0]]\n X_te = X_te[np.where(~np.isnan(y_te))[0]]\n y_tr = y_train[train]\n X_tr = X[train]\n y_tr = y_tr[np.where(~np.isnan(y_tr))[0]]\n X_tr = X_tr[np.where(~np.isnan(y_tr))[0]]\n y_tr_te = y_train[test]\n X_tr_te = X[test]\n y_tr_te = y_tr_te[np.where(~np.isnan(y_tr_te))[0]]\n X_tr_te = X_tr_te[np.where(~np.isnan(y_tr_te))[0]]\n gat.fit(X_tr, y_tr)\n score = gat.score(X_te, y_te)\n sc = gat.score(X_tr_te, y_tr_te) # test on same\n scores.append(score)\n scs.append(sc)\n scores = np.mean(scores, axis=0)\n scs = np.mean(scs, axis=0)\n\n # save cross-validated scores\n fname = results_folder +\\\n '%s_scores_%s_cross_%s.npy' % (subject,\n paired_analysis[0],\n paired_analysis[1])\n np.save(fname, np.array(scores)) # save accross condition scores\n fname = results_folder +\\\n '%s_scores_%s.npy' % (subject, paired_analysis[1])\n np.save(fname, np.array(scs)) # save scores test/train on same condition\n" ]
[ [ "numpy.isnan", "sklearn.model_selection.StratifiedKFold", "numpy.concatenate", "sklearn.linear_model.Ridge", "numpy.mean", "sklearn.metrics.make_scorer", "sklearn.preprocessing.StandardScaler", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Pandinosaurus/dsbox
[ "aea56049025ed7e6e66427f8636286f8be1b6e03", "aea56049025ed7e6e66427f8636286f8be1b6e03", "aea56049025ed7e6e66427f8636286f8be1b6e03", "aea56049025ed7e6e66427f8636286f8be1b6e03" ]
[ "dsbox/ml/visualization/metrics.py", "tests/ml/neural_networks/processing/test_workflow.py", "dsbox/ml/markov/seqlearn/_utils/__init__.py", "tests/ml/markov/hmmlearn/test_gaussian_hmm.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.metrics import roc_curve, auc\n\n__author__ = \"Aurélien Massiot\"\n__credits__ = \"https://github.com/octo-technology/bdacore\"\n__license__ = \"Apache 2.0\"\n\n\ndef plot_confusion_matrix(confusion_matrix, classes_list, normalize=True, figsize=(10, 7), fontsize=14, cmap=\"Blues\"):\n \"\"\"\n Display a pretty confusion matrix.\n\n Parameters\n ----------\n confusion_matrix : array-like\n\n classes_list : list,\n classes list of the confusion matrix\n\n normalize : boolean,\n normalize confusion matrix\n\n figsize : tuple, optional (default=(10,7))\n set the figure size\n\n fontsize : int, optional (default=14)\n set the font size\n\n cmap : str, optional (default=\"Blues\")\n set the colormap\n\n Returns\n -------\n Confusion matrix figure\n\n\n Examples\n --------\n >>> from dsbox.ml.visualization.metrics import plot_confusion_matrix\n >>> array = [[ 8458, 227, 1730], \\\n [ 1073, 37590, 1613], \\\n [ 2390, 1159, 17540]]\n >>> classes_list = [\"A\", \"B\", \"C\"]\n >>> plot_confusion_matrix(array, classes_list)\n \"\"\"\n confusion_matrix = np.array(confusion_matrix)\n\n fig, ax = plt.subplots(figsize=figsize)\n\n if normalize:\n normalized_cm = np.array(confusion_matrix).astype('float') / np.array(confusion_matrix).sum(axis=1)[:,\n np.newaxis]\n df_cm = pd.DataFrame(\n normalized_cm, index=classes_list, columns=classes_list,\n )\n plt.matshow(df_cm, fignum=0, cmap=cmap)\n else:\n df_cm = pd.DataFrame(\n confusion_matrix, index=classes_list, columns=classes_list,\n )\n plt.matshow(df_cm, fignum=0, cmap=cmap)\n ax.set_xticks(np.arange(len(classes_list)))\n ax.set_yticks(np.arange(len(classes_list)))\n ax.set_xticklabels(classes_list)\n ax.set_yticklabels(classes_list)\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")\n\n for i in range(len(classes_list)):\n for j in range(len(classes_list)):\n ax.text(j, i, confusion_matrix[i, j], ha=\"center\", va=\"center\", color=\"grey\", fontsize=fontsize)\n\n plt.ylabel('True labels')\n plt.xlabel('Predicted labels')\n plt.show()\n\n\ndef plot_roc_curve(y_test, y_pred_probas, proba_step=None):\n \"\"\"\n Plot ROC curve with probabilities thresholds.\n \n Parameters\n ----------\n y_test : array-like\n true labels\n \n y_pred_probas : array-like\n predicted labels\n \n proba_step : int (optional) (default=None)\n if set, give the step for each probability display. If None, nothing is displayed.\n\n Examples\n --------\n \n >>> from dsbox.ml.visualization.metrics import plot_roc_curve\n >>> from sklearn import datasets\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.ensemble import RandomForestClassifier\n \n >>> X, y = datasets.make_moons(noise=0.3, random_state=0)\n >>> X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0)\n \n >>> clf = RandomForestClassifier(n_estimators=10, random_state=42)\n >>> _ = clf.fit(X_train, y_train)\n >>> y_pred_probas = clf.predict_proba(X_test)\n \n >>> plot_roc_curve(y_test, y_pred_probas, proba_step=2)\n\n \"\"\"\n fpr, tpr, thresholds = roc_curve(y_test, y_pred_probas[:, 1])\n auc_score = auc(fpr, tpr)\n\n plt.figure()\n lw = 1\n plt.plot(fpr, tpr, color='darkorange', lw=lw, marker='.')\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n if proba_step is not None:\n i = 0\n for x, y, txt in zip(fpr, tpr, thresholds):\n if i % proba_step == 0:\n plt.annotate(np.round(txt, 2), (x, y - 0.04), color='darkgray', fontsize=8)\n i += 1\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic (ROC) - AUC score: {}'.format(str(np.round(auc_score,3))))\n plt.show()\n", "import unittest\n\nimport numpy as np\n\n\nimport logging\n\nfrom dsbox.ml.neural_networks.keras_factory.text_models import LSTMFactory\nfrom dsbox.ml.neural_networks.processing.workflow import TextNeuralNetPipeline, ImageNeuralNetPipeline\n\nlogging.getLogger(\"tensorflow\").setLevel(logging.WARNING)\n\nnp.random.seed(42)\n\n\nclass TestPipeline(unittest.TestCase):\n def test_fit_predict_text_nn_pipeline_should_return_some_result(self):\n # given\n x_train = np.array(['this is really really awesome !',\n 'it is so awesome !',\n 'that sucks']\n )\n y_train = np.array([1, 1, 0])\n\n # when\n model = TextNeuralNetPipeline(factory_class=LSTMFactory, num_labels=2)\n model.fit(x_train, y_train, verbose=0)\n\n x_test = np.array(['it is really awesome !'])\n y_pred = model.predict(x_test)\n\n # then\n self.assertIsNotNone(y_pred)\n\n def test_fit_predict_proba_text_nn_pipeline_should_return_some_result(self):\n # given\n x_train = np.array(['this is really really awesome !',\n 'it is so awesome !',\n 'that sucks']\n )\n y_train = np.array([1, 1, 0])\n\n # when\n model = TextNeuralNetPipeline(factory_class=LSTMFactory, num_labels=2)\n model.fit(x_train, y_train, verbose=0)\n\n x_test = np.array(['it is really awesome !'])\n y_pred = model.predict_proba(x_test)[0]\n\n # then\n self.assertIsNotNone(y_pred)\n\n def test_fit_image_nn_workflow_should_set_params_automatically(self):\n # given\n workflow = ImageNeuralNetPipeline(weights=\"imagenet\")\n\n # when\n workflow.fit()\n\n # then\n self.assertTupleEqual((299, 299), workflow.img_size_)\n self.assertEqual(\"block14_sepconv2_act\", workflow.last_conv_layer_name_)\n self.assertListEqual([\"avg_pool\", \"predictions\"], workflow.classifier_layer_names_)\n\n", "# Copyright 2013-2014 Lars Buitinck / University of Amsterdam\n#\n# Parts taken from scikit-learn, written by\n# Olivier Grisel\n# Gael Varoquaux\n# Andreas Mueller\n# Lars Buitinck\n# Alexandre Gramfort\n\nimport numpy as np\nimport scipy.sparse as sp\n\n# XXX These are private helper functions from scikit-learn. We should copy\n# the code over instead of importing them.\nfrom sklearn.utils import check_random_state\nfrom sklearn.utils.extmath import safe_sparse_dot\n\nfrom .ctrans import count_trans\nfrom .safeadd import safe_add\nfrom .transmatrix import make_trans_matrix\n\n\ndef _assert_all_finite(X):\n X = np.asanyarray(X)\n # First try an O(n) time, O(1) space solution for the common case that\n # there everything is finite; fall back to O(n) space np.isfinite to\n # prevent false positives from overflow in sum method.\n if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())\n and not np.isfinite(X).all()):\n raise ValueError(\"Input contains NaN, infinity\"\n \" or a value too large for %r.\" % X.dtype)\n\n\ndef assert_all_finite(X):\n \"\"\"Throw a ValueError if X contains NaN or infinity.\n\n Input MUST be an np.ndarray instance or a scipy.sparse matrix.\"\"\"\n\n _assert_all_finite(X.data if sp.issparse(X) else X)\n\n\ndef array2d(X, dtype=None, order=None, copy=False):\n \"\"\"Returns at least 2-d array with data from X\"\"\"\n if sp.issparse(X):\n raise TypeError('A sparse matrix was passed, but dense data '\n 'is required. Use X.toarray() to convert to dense.')\n X_2d = np.asarray(np.atleast_2d(X), dtype=dtype, order=order)\n _assert_all_finite(X_2d)\n if X is X_2d and copy:\n X_2d = np.copy(X_2d)\n return X_2d\n\n\ndef _atleast2d_or_sparse(X, dtype, order, copy, sparse_class, convmethod,\n check_same_type):\n if sp.issparse(X):\n if check_same_type(X) and X.dtype == dtype:\n X = getattr(X, convmethod)(copy=copy)\n elif dtype is None or X.dtype == dtype:\n X = getattr(X, convmethod)()\n else:\n X = sparse_class(X, dtype=dtype)\n _assert_all_finite(X.data)\n X.data = np.array(X.data, copy=False, order=order)\n else:\n X = array2d(X, dtype=dtype, order=order, copy=copy)\n return X\n\n\ndef atleast2d_or_csr(X, dtype=None, order=None, copy=False):\n \"\"\"Like numpy.atleast_2d, but converts sparse matrices to CSR format\n\n Also, converts np.matrix to np.ndarray.\n \"\"\"\n return _atleast2d_or_sparse(X, dtype, order, copy, sp.csr_matrix,\n \"tocsr\", sp.isspmatrix_csr)\n\n\ndef validate_lengths(n_samples, lengths):\n \"\"\"Validate lengths array against n_samples.\n\n Parameters\n ----------\n n_samples : integer\n Total number of samples.\n\n lengths : array-like of integers, shape (n_sequences,), optional\n Lengths of individual sequences in the input.\n\n Returns\n -------\n start : array of integers, shape (n_sequences,)\n Start indices of sequences.\n\n end : array of integers, shape (n_sequences,)\n One-past-the-end indices of sequences.\n \"\"\"\n if lengths is None:\n lengths = [n_samples]\n lengths = np.asarray(lengths, dtype=np.int32)\n if lengths.sum() > n_samples:\n msg = \"More than {0:d} samples in lengths array {1!s}\"\n raise ValueError(msg.format(n_samples, lengths))\n\n end = np.cumsum(lengths)\n start = end - lengths\n\n return start, end\n", "import unittest\nimport numpy as np\n\nfrom dsbox.ml.markov.hmmlearn import hmm\nfrom tests.ml.markov.hmmlearn import log_likelihood_increasing, make_covar_matrix, normalized\n\n\nclass GaussianHMMTestMixin(unittest.TestCase):\n covariance_type = 'diag' # set by subclasses\n\n def setUp(self):\n self.prng = prng = np.random.RandomState(10)\n self.n_components = n_components = 3\n self.n_features = n_features = 3\n self.startprob = prng.rand(n_components)\n self.startprob = self.startprob / self.startprob.sum()\n self.transmat = prng.rand(n_components, n_components)\n self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],\n (1, n_components))\n self.means = prng.randint(-20, 20, (n_components, n_features))\n self.covars = make_covar_matrix(\n self.covariance_type, n_components, n_features, random_state=prng\n )\n\n def test_bad_covariance_type(self):\n self.setUp()\n with self.assertRaises(ValueError):\n h = hmm.GaussianHMM(20, covariance_type='badcovariance_type')\n h.means_ = self.means\n h.covars_ = []\n h.startprob_ = self.startprob\n h.transmat_ = self.transmat\n h._check()\n\n def test_score_samples_and_decode(self):\n self.setUp()\n h = hmm.GaussianHMM(self.n_components, self.covariance_type,\n init_params=\"st\")\n h.means_ = self.means\n h.covars_ = self.covars\n\n # Make sure the means are far apart so posteriors.argmax()\n # picks the actual component used to generate the observations.\n h.means_ = 20 * h.means_\n\n gaussidx = np.repeat(np.arange(self.n_components), 5)\n n_samples = len(gaussidx)\n X = self.prng.randn(n_samples, self.n_features) + h.means_[gaussidx]\n h._init(X)\n ll, posteriors = h.score_samples(X)\n\n self.assertEqual(posteriors.shape, (n_samples, self.n_components))\n assert np.allclose(posteriors.sum(axis=1), np.ones(n_samples))\n\n viterbi_ll, stateseq = h.decode(X)\n assert np.allclose(stateseq, gaussidx)\n\n def test_sample(self, n=1000):\n self.setUp()\n h = hmm.GaussianHMM(self.n_components, self.covariance_type)\n h.startprob_ = self.startprob\n h.transmat_ = self.transmat\n # Make sure the means are far apart so posteriors.argmax()\n # picks the actual component used to generate the observations.\n h.means_ = 20 * self.means\n h.covars_ = np.maximum(self.covars, 0.1)\n\n X, state_sequence = h.sample(n, random_state=self.prng)\n self.assertEqual(X.shape, (n, self.n_features))\n self.assertEqual(len(state_sequence), n)\n\n def test_fit(self, params='stmc', n_iter=5, **kwargs):\n self.setUp()\n h = hmm.GaussianHMM(self.n_components, self.covariance_type)\n h.startprob_ = self.startprob\n h.transmat_ = normalized(\n self.transmat + np.diag(self.prng.rand(self.n_components)), 1)\n h.means_ = 20 * self.means\n h.covars_ = self.covars\n\n lengths = [10] * 10\n X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)\n\n # Mess up the parameters and see if we can re-learn them.\n # TODO: change the params and uncomment the check\n self.assertIsNotNone(h.fit(X, lengths=lengths))\n # assert log_likelihood_increasing(h, X, lengths, n_iter)\n\n def test_fit_sequences_of_different_length(self):\n self.setUp()\n lengths = [3, 4, 5]\n X = self.prng.rand(sum(lengths), self.n_features)\n\n h = hmm.GaussianHMM(self.n_components, self.covariance_type)\n # This shouldn't raise\n # ValueError: setting an array element with a sequence.\n self.assertIsNotNone(h.fit(X, lengths=lengths))\n\n def test_fit_with_length_one_signal(self):\n self.setUp()\n lengths = [10, 8, 1]\n X = self.prng.rand(sum(lengths), self.n_features)\n\n h = hmm.GaussianHMM(self.n_components, self.covariance_type)\n # This shouldn't raise\n # ValueError: zero-size array to reduction operation maximum which\n # has no identity\n self.assertIsNotNone(h.fit(X, lengths=lengths))\n\n def test_fit_zero_variance(self):\n self.setUp()\n # Example from issue #2 on GitHub.\n X = np.asarray([\n [7.15000000e+02, 5.85000000e+02, 0.00000000e+00, 0.00000000e+00],\n [7.15000000e+02, 5.20000000e+02, 1.04705811e+00, -6.03696289e+01],\n [7.15000000e+02, 4.55000000e+02, 7.20886230e-01, -5.27055664e+01],\n [7.15000000e+02, 3.90000000e+02, -4.57946777e-01, -7.80605469e+01],\n [7.15000000e+02, 3.25000000e+02, -6.43127441e+00, -5.59954834e+01],\n [7.15000000e+02, 2.60000000e+02, -2.90063477e+00, -7.80220947e+01],\n [7.15000000e+02, 1.95000000e+02, 8.45532227e+00, -7.03294373e+01],\n [7.15000000e+02, 1.30000000e+02, 4.09387207e+00, -5.83621216e+01],\n [7.15000000e+02, 6.50000000e+01, -1.21667480e+00, -4.48131409e+01]\n ])\n\n h = hmm.GaussianHMM(3, self.covariance_type)\n self.assertIsNotNone(h.fit(X))\n\n def test_fit_with_priors(self, params='stmc', n_iter=5):\n self.setUp()\n startprob_prior = 10 * self.startprob + 2.0\n transmat_prior = 10 * self.transmat + 2.0\n means_prior = self.means\n means_weight = 2.0\n covars_weight = 2.0\n if self.covariance_type in ('full', 'tied'):\n covars_weight += self.n_features\n covars_prior = self.covars\n\n h = hmm.GaussianHMM(self.n_components, self.covariance_type)\n h.startprob_ = self.startprob\n h.startprob_prior = startprob_prior\n h.transmat_ = normalized(\n self.transmat + np.diag(self.prng.rand(self.n_components)), 1)\n h.transmat_prior = transmat_prior\n h.means_ = 20 * self.means\n h.means_prior = means_prior\n h.means_weight = means_weight\n h.covars_ = self.covars\n h.covars_prior = covars_prior\n h.covars_weight = covars_weight\n\n lengths = [200] * 10\n X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)\n\n # Re-initialize the parameters and check that we can converge to the\n # original parameter values.\n h_learn = hmm.GaussianHMM(self.n_components, self.covariance_type,\n params=params)\n h_learn.n_iter = 0\n h_learn.fit(X, lengths=lengths)\n\n self.assertTrue(log_likelihood_increasing(h_learn, X, lengths, n_iter))\n\n # Make sure we've converged to the right parameters.\n # a) means\n self.assertTrue(np.allclose(sorted(h.means_.tolist()),\n sorted(h_learn.means_.tolist()),\n 0.01))\n # b) covars are hard to estimate precisely from a relatively small\n # sample, thus the large threshold\n self.assertTrue(np.allclose(sorted(h._covars_.tolist()),\n sorted(h_learn._covars_.tolist()),\n 10))\n\n\nclass TestGaussianHMMWithSphericalCovars(GaussianHMMTestMixin, unittest.TestCase):\n covariance_type = 'spherical'\n\n def test_fit_startprob_and_transmat(self):\n self.test_fit('st')\n\n\nclass TestGaussianHMMWithDiagonalCovars(GaussianHMMTestMixin, unittest.TestCase):\n covariance_type = 'diag'\n\n def test_covar_is_writeable(self):\n h = hmm.GaussianHMM(n_components=1, covariance_type=\"diag\",\n init_params=\"c\")\n X = np.random.normal(size=(1000, 5))\n h._init(X)\n\n # np.diag returns a read-only view of the array in NumPy 1.9.X.\n # Make sure this doesn't prevent us from fitting an HMM with\n # diagonal covariance matrix. See PR#44 on GitHub for details\n # and discussion.\n self.assertTrue(h._covars_.flags[\"WRITEABLE\"])\n\n def test_fit_left_right(self):\n transmat = np.zeros((self.n_components, self.n_components))\n\n # Left-to-right: each state is connected to itself and its\n # direct successor.\n for i in range(self.n_components):\n if i == self.n_components - 1:\n transmat[i, i] = 1.0\n else:\n transmat[i, i] = transmat[i, i + 1] = 0.5\n\n # Always start in first state\n startprob = np.zeros(self.n_components)\n startprob[0] = 1.0\n\n lengths = [10, 8, 1]\n X = self.prng.rand(sum(lengths), self.n_features)\n\n h = hmm.GaussianHMM(self.n_components, covariance_type=\"diag\",\n params=\"mct\", init_params=\"cm\")\n h.startprob_ = startprob.copy()\n h.transmat_ = transmat.copy()\n h.fit(X)\n\n self.assertTrue((h.startprob_[startprob == 0.0] == 0.0).all())\n self.assertTrue((h.transmat_[transmat == 0.0] == 0.0).all())\n\n posteriors = h.predict_proba(X)\n self.assertFalse(np.isnan(posteriors).any())\n self.assertTrue(np.allclose(posteriors.sum(axis=1), 1.))\n\n score, state_sequence = h.decode(X, algorithm=\"viterbi\")\n self.assertTrue(np.isfinite(score))\n\n\nclass TestGaussianHMMWithTiedCovars(GaussianHMMTestMixin, unittest.TestCase):\n covariance_type = 'tied'\n\n\nclass TestGaussianHMMWithFullCovars(GaussianHMMTestMixin, unittest.TestCase):\n covariance_type = 'full'\n" ]
[ [ "sklearn.metrics.auc", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylim", "matplotlib.pyplot.subplots", "sklearn.metrics.roc_curve", "pandas.DataFrame", "matplotlib.pyplot.plot", "numpy.round", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.matshow", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "numpy.array", "numpy.random.seed" ], [ "scipy.sparse.issparse", "numpy.isfinite", "numpy.asarray", "numpy.cumsum", "numpy.atleast_2d", "numpy.copy", "numpy.asanyarray", "numpy.array" ], [ "numpy.maximum", "numpy.allclose", "numpy.isfinite", "numpy.asarray", "numpy.arange", "numpy.isnan", "numpy.ones", "numpy.random.normal", "numpy.random.RandomState", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aschueth/MetPy
[ "5e906c0fcfadccdc8514011d15d911243130d405" ]
[ "src/metpy/calc/thermo.py" ]
[ "# Copyright (c) 2008,2015,2016,2017,2018,2019 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"Contains a collection of thermodynamic calculations.\"\"\"\nimport warnings\n\nimport numpy as np\nimport scipy.integrate as si\nimport scipy.optimize as so\n\nfrom .tools import (_greater_or_close, _less_or_close, _remove_nans, find_bounding_indices,\n find_intersections, first_derivative, get_layer)\nfrom .. import constants as mpconsts\nfrom ..cbook import broadcast_indices\nfrom ..interpolate.one_dimension import interpolate_1d\nfrom ..package_tools import Exporter\nfrom ..units import check_units, concatenate, units\nfrom ..xarray import preprocess_xarray\n\nexporter = Exporter(globals())\n\nsat_pressure_0c = 6.112 * units.millibar\n\n\[email protected]\n@preprocess_xarray\n@check_units('[temperature]', '[temperature]')\ndef relative_humidity_from_dewpoint(temperature, dewpoint):\n r\"\"\"Calculate the relative humidity.\n\n Uses temperature and dewpoint in celsius to calculate relative\n humidity using the ratio of vapor pressure to saturation vapor pressures.\n\n Parameters\n ----------\n temperature : `pint.Quantity`\n air temperature\n dewpoint : `pint.Quantity`\n dewpoint temperature\n\n Returns\n -------\n `pint.Quantity`\n relative humidity\n\n See Also\n --------\n saturation_vapor_pressure\n\n \"\"\"\n e = saturation_vapor_pressure(dewpoint)\n e_s = saturation_vapor_pressure(temperature)\n return (e / e_s)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[pressure]')\ndef exner_function(pressure, reference_pressure=mpconsts.P0):\n r\"\"\"Calculate the Exner function.\n\n .. math:: \\Pi = \\left( \\frac{p}{p_0} \\right)^\\kappa\n\n This can be used to calculate potential temperature from temperature (and visa-versa),\n since\n\n .. math:: \\Pi = \\frac{T}{\\theta}\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n total atmospheric pressure\n reference_pressure : `pint.Quantity`, optional\n The reference pressure against which to calculate the Exner function, defaults to\n metpy.constants.P0\n\n Returns\n -------\n `pint.Quantity`\n The value of the Exner function at the given pressure\n\n See Also\n --------\n potential_temperature\n temperature_from_potential_temperature\n\n \"\"\"\n return (pressure / reference_pressure).to('dimensionless')**mpconsts.kappa\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]')\ndef potential_temperature(pressure, temperature):\n r\"\"\"Calculate the potential temperature.\n\n Uses the Poisson equation to calculation the potential temperature\n given `pressure` and `temperature`.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n total atmospheric pressure\n temperature : `pint.Quantity`\n air temperature\n\n Returns\n -------\n `pint.Quantity`\n The potential temperature corresponding to the temperature and\n pressure.\n\n See Also\n --------\n dry_lapse\n\n Notes\n -----\n Formula:\n\n .. math:: \\Theta = T (P_0 / P)^\\kappa\n\n Examples\n --------\n >>> from metpy.units import units\n >>> metpy.calc.potential_temperature(800. * units.mbar, 273. * units.kelvin)\n <Quantity(290.9665329591884, 'kelvin')>\n\n \"\"\"\n return temperature / exner_function(pressure)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]')\ndef temperature_from_potential_temperature(pressure, potential_temperature):\n r\"\"\"Calculate the temperature from a given potential temperature.\n\n Uses the inverse of the Poisson equation to calculate the temperature from a\n given potential temperature at a specific pressure level.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n total atmospheric pressure\n potential_temperature : `pint.Quantity`\n potential temperature\n\n Returns\n -------\n `pint.Quantity`\n The temperature corresponding to the potential temperature and pressure.\n\n See Also\n --------\n dry_lapse\n potential_temperature\n\n Notes\n -----\n Formula:\n\n .. math:: T = \\Theta (P / P_0)^\\kappa\n\n Examples\n --------\n >>> from metpy.units import units\n >>> from metpy.calc import temperature_from_potential_temperature\n >>> # potential temperature\n >>> theta = np.array([ 286.12859679, 288.22362587]) * units.kelvin\n >>> p = 850 * units.mbar\n >>> T = temperature_from_potential_temperature(p, theta)\n\n \"\"\"\n return potential_temperature * exner_function(pressure)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[pressure]')\ndef dry_lapse(pressure, temperature, reference_pressure=None):\n r\"\"\"Calculate the temperature at a level assuming only dry processes.\n\n This function lifts a parcel starting at `temperature`, conserving\n potential temperature. The starting pressure can be given by `reference_pressure`.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The atmospheric pressure level(s) of interest\n temperature : `pint.Quantity`\n The starting temperature\n reference_pressure : `pint.Quantity`, optional\n The reference pressure. If not given, it defaults to the first element of the\n pressure array.\n\n Returns\n -------\n `pint.Quantity`\n The resulting parcel temperature at levels given by `pressure`\n\n See Also\n --------\n moist_lapse : Calculate parcel temperature assuming liquid saturation processes\n parcel_profile : Calculate complete parcel profile\n potential_temperature\n\n \"\"\"\n if reference_pressure is None:\n reference_pressure = pressure[0]\n return temperature * (pressure / reference_pressure)**mpconsts.kappa\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[pressure]')\ndef moist_lapse(pressure, temperature, reference_pressure=None):\n r\"\"\"Calculate the temperature at a level assuming liquid saturation processes.\n\n This function lifts a parcel starting at `temperature`. The starting pressure can\n be given by `reference_pressure`. Essentially, this function is calculating moist\n pseudo-adiabats.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The atmospheric pressure level(s) of interest\n temperature : `pint.Quantity`\n The starting temperature\n reference_pressure : `pint.Quantity`, optional\n The reference pressure. If not given, it defaults to the first element of the\n pressure array.\n\n Returns\n -------\n `pint.Quantity`\n The temperature corresponding to the starting temperature and\n pressure levels.\n\n See Also\n --------\n dry_lapse : Calculate parcel temperature assuming dry adiabatic processes\n parcel_profile : Calculate complete parcel profile\n\n Notes\n -----\n This function is implemented by integrating the following differential\n equation:\n\n .. math:: \\frac{dT}{dP} = \\frac{1}{P} \\frac{R_d T + L_v r_s}\n {C_{pd} + \\frac{L_v^2 r_s \\epsilon}{R_d T^2}}\n\n This equation comes from [Bakhshaii2013]_.\n\n \"\"\"\n def dt(t, p):\n t = units.Quantity(t, temperature.units)\n p = units.Quantity(p, pressure.units)\n rs = saturation_mixing_ratio(p, t)\n frac = ((mpconsts.Rd * t + mpconsts.Lv * rs)\n / (mpconsts.Cp_d + (mpconsts.Lv * mpconsts.Lv * rs * mpconsts.epsilon\n / (mpconsts.Rd * t * t)))).to('kelvin')\n return (frac / p).magnitude\n\n if reference_pressure is None:\n reference_pressure = pressure[0]\n\n pressure = pressure.to('mbar')\n reference_pressure = reference_pressure.to('mbar')\n temperature = np.atleast_1d(temperature)\n\n side = 'left'\n\n pres_decreasing = (pressure[0] > pressure[-1])\n if pres_decreasing:\n # Everything is easier if pressures are in increasing order\n pressure = pressure[::-1]\n side = 'right'\n\n ref_pres_idx = np.searchsorted(pressure.m, reference_pressure.m, side=side)\n\n ret_temperatures = np.empty((0, temperature.shape[0]))\n\n if reference_pressure > pressure.min():\n # Integrate downward in pressure\n pres_down = np.append(reference_pressure.m, pressure[(ref_pres_idx - 1)::-1].m)\n trace_down = si.odeint(dt, temperature.m.squeeze(), pres_down.squeeze())\n ret_temperatures = np.concatenate((ret_temperatures, trace_down[:0:-1]))\n\n if reference_pressure < pressure.max():\n # Integrate upward in pressure\n pres_up = np.append(reference_pressure.m, pressure[ref_pres_idx:].m)\n trace_up = si.odeint(dt, temperature.m.squeeze(), pres_up.squeeze())\n ret_temperatures = np.concatenate((ret_temperatures, trace_up[1:]))\n\n if pres_decreasing:\n ret_temperatures = ret_temperatures[::-1]\n\n return units.Quantity(ret_temperatures.T.squeeze(), temperature.units)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef lcl(pressure, temperature, dewpoint, max_iters=50, eps=1e-5):\n r\"\"\"Calculate the lifted condensation level (LCL) using from the starting point.\n\n The starting state for the parcel is defined by `temperature`, `dewpoint`,\n and `pressure`. If these are arrays, this function will return a LCL\n for every index. This function does work with surface grids as a result.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The starting atmospheric pressure\n temperature : `pint.Quantity`\n The starting temperature\n dewpoint : `pint.Quantity`\n The starting dewpoint\n\n Returns\n -------\n `pint.Quantity`\n The LCL pressure\n `pint.Quantity`\n The LCL temperature\n\n Other Parameters\n ----------------\n max_iters : int, optional\n The maximum number of iterations to use in calculation, defaults to 50.\n eps : float, optional\n The desired relative error in the calculated value, defaults to 1e-5.\n\n See Also\n --------\n parcel_profile\n\n Notes\n -----\n This function is implemented using an iterative approach to solve for the\n LCL. The basic algorithm is:\n\n 1. Find the dewpoint from the LCL pressure and starting mixing ratio\n 2. Find the LCL pressure from the starting temperature and dewpoint\n 3. Iterate until convergence\n\n The function is guaranteed to finish by virtue of the `max_iters` counter.\n\n \"\"\"\n def _lcl_iter(p, p0, w, t):\n td = globals()['dewpoint'](vapor_pressure(units.Quantity(p, pressure.units), w))\n return (p0 * (td / t) ** (1. / mpconsts.kappa)).m\n\n w = mixing_ratio(saturation_vapor_pressure(dewpoint), pressure)\n lcl_p = so.fixed_point(_lcl_iter, pressure.m, args=(pressure.m, w, temperature),\n xtol=eps, maxiter=max_iters)\n\n # np.isclose needed if surface is LCL due to precision error with np.log in dewpoint.\n # Causes issues with parcel_profile_with_lcl if removed. Issue #1187\n lcl_p = np.where(np.isclose(lcl_p, pressure.m), pressure.m, lcl_p) * pressure.units\n\n return lcl_p, globals()['dewpoint'](vapor_pressure(lcl_p, w)).to(temperature.units)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')\ndef lfc(pressure, temperature, dewpoint, parcel_temperature_profile=None, dewpoint_start=None,\n which='top'):\n r\"\"\"Calculate the level of free convection (LFC).\n\n This works by finding the first intersection of the ideal parcel path and\n the measured parcel temperature. If this intersection occurs below the LCL,\n the LFC is determined to be the same as the LCL, based upon the conditions\n set forth in [USAF1990]_, pg 4-14, where a parcel must be lifted dry adiabatically\n to saturation before it can freely rise.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The atmospheric pressure\n temperature : `pint.Quantity`\n The temperature at the levels given by `pressure`\n dewpoint : `pint.Quantity`\n The dewpoint at the levels given by `pressure`\n parcel_temperature_profile: `pint.Quantity`, optional\n The parcel temperature profile from which to calculate the LFC. Defaults to the\n surface parcel profile.\n dewpoint_start: `pint.Quantity`, optional\n The dewpoint of the parcel for which to calculate the LFC. Defaults to the surface\n dewpoint.\n which: str, optional\n Pick which LFC to return. Options are 'top', 'bottom', 'wide', 'most_cape', and 'all'.\n 'top' returns the lowest-pressure LFC, default.\n 'bottom' returns the highest-pressure LFC.\n 'wide' returns the LFC whose corresponding EL is farthest away.\n 'most_cape' returns the LFC that results in the most CAPE in the profile.\n\n Returns\n -------\n `pint.Quantity`\n The LFC pressure, or array of same if which='all'\n `pint.Quantity`\n The LFC temperature, or array of same if which='all'\n\n See Also\n --------\n parcel_profile\n\n \"\"\"\n pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)\n # Default to surface parcel if no profile or starting pressure level is given\n if parcel_temperature_profile is None:\n new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpoint)\n pressure, temperature, dewpoint, parcel_temperature_profile = new_stuff\n parcel_temperature_profile = parcel_temperature_profile.to(temperature.units)\n\n if dewpoint_start is None:\n dewpoint_start = dewpoint[0]\n\n # The parcel profile and data may have the same first data point.\n # If that is the case, ignore that point to get the real first\n # intersection for the LFC calculation. Use logarithmic interpolation.\n if np.isclose(parcel_temperature_profile[0].to(temperature.units).m, temperature[0].m):\n x, y = find_intersections(pressure[1:], parcel_temperature_profile[1:],\n temperature[1:], direction='increasing', log_x=True)\n else:\n x, y = find_intersections(pressure, parcel_temperature_profile,\n temperature, direction='increasing', log_x=True)\n\n # Compute LCL for this parcel for future comparisons\n this_lcl = lcl(pressure[0], parcel_temperature_profile[0], dewpoint_start)\n\n # The LFC could:\n # 1) Not exist\n # 2) Exist but be equal to the LCL\n # 3) Exist and be above the LCL\n\n # LFC does not exist or is LCL\n if len(x) == 0:\n # Is there any positive area above the LCL?\n mask = pressure < this_lcl[0]\n if np.all(_less_or_close(parcel_temperature_profile[mask], temperature[mask])):\n # LFC doesn't exist\n x, y = np.nan * pressure.units, np.nan * temperature.units\n else: # LFC = LCL\n x, y = this_lcl\n return x, y\n\n # LFC exists. Make sure it is no lower than the LCL\n else:\n idx = x < this_lcl[0]\n # LFC height < LCL height, so set LFC = LCL\n if not any(idx):\n el_pres, _ = find_intersections(pressure[1:], parcel_temperature_profile[1:],\n temperature[1:], direction='decreasing',\n log_x=True)\n if np.min(el_pres) > this_lcl[0]:\n x, y = np.nan * pressure.units, np.nan * temperature.units\n else:\n x, y = this_lcl\n return x, y\n # Otherwise, find all LFCs that exist above the LCL\n # What is returned depends on which flag as described in the docstring\n else:\n return _multiple_el_lfc_options(x, y, idx, which, pressure,\n parcel_temperature_profile, temperature,\n dewpoint, intersect_type='LFC')\n\n\ndef _multiple_el_lfc_options(intersect_pressures, intersect_temperatures, valid_x,\n which, pressure, parcel_temperature_profile, temperature,\n dewpoint, intersect_type):\n \"\"\"Choose which ELs and LFCs to return from a sounding.\"\"\"\n p_list, t_list = intersect_pressures[valid_x], intersect_temperatures[valid_x]\n if which == 'all':\n x, y = p_list, t_list\n elif which == 'bottom':\n x, y = p_list[0], t_list[0]\n elif which == 'top':\n x, y = p_list[-1], t_list[-1]\n elif which == 'wide':\n x, y = _wide_option(intersect_type, p_list, t_list, pressure,\n parcel_temperature_profile, temperature)\n elif which == 'most_cape':\n x, y = _most_cape_option(intersect_type, p_list, t_list, pressure, temperature,\n dewpoint, parcel_temperature_profile)\n else:\n raise ValueError('Invalid option for \"which\". Valid options are \"top\", \"bottom\", '\n '\"wide\", \"most_cape\", and \"all\".')\n return x, y\n\n\ndef _wide_option(intersect_type, p_list, t_list, pressure, parcel_temperature_profile,\n temperature):\n \"\"\"Calculate the LFC or EL that produces the greatest distance between these points.\"\"\"\n # zip the LFC and EL lists together and find greatest difference\n if intersect_type == 'LFC':\n # Find EL intersection pressure values\n lfc_p_list = p_list\n el_p_list, _ = find_intersections(pressure[1:], parcel_temperature_profile[1:],\n temperature[1:], direction='decreasing',\n log_x=True)\n else: # intersect_type == 'EL'\n el_p_list = p_list\n # Find LFC intersection pressure values\n lfc_p_list, _ = find_intersections(pressure, parcel_temperature_profile,\n temperature, direction='increasing',\n log_x=True)\n diff = [lfc_p.m - el_p.m for lfc_p, el_p in zip(lfc_p_list, el_p_list)]\n return (p_list[np.where(diff == np.max(diff))][0],\n t_list[np.where(diff == np.max(diff))][0])\n\n\ndef _most_cape_option(intersect_type, p_list, t_list, pressure, temperature, dewpoint,\n parcel_temperature_profile):\n \"\"\"Calculate the LFC or EL that produces the most CAPE in the profile.\"\"\"\n # Need to loop through all possible combinations of cape, find greatest cape profile\n cape_list, pair_list = [], []\n for which_lfc in ['top', 'bottom']:\n for which_el in ['top', 'bottom']:\n cape, _ = cape_cin(pressure, temperature, dewpoint, parcel_temperature_profile,\n which_lfc=which_lfc, which_el=which_el)\n cape_list.append(cape.m)\n pair_list.append([which_lfc, which_el])\n (lfc_chosen, el_chosen) = pair_list[np.where(cape_list == np.max(cape_list))[0][0]]\n if intersect_type == 'LFC':\n if lfc_chosen == 'top':\n x, y = p_list[-1], t_list[-1]\n else: # 'bottom' is returned\n x, y = p_list[0], t_list[0]\n else: # EL is returned\n if el_chosen == 'top':\n x, y = p_list[-1], t_list[-1]\n else:\n x, y = p_list[0], t_list[0]\n return x, y\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')\ndef el(pressure, temperature, dewpoint, parcel_temperature_profile=None, which='top'):\n r\"\"\"Calculate the equilibrium level.\n\n This works by finding the last intersection of the ideal parcel path and\n the measured environmental temperature. If there is one or fewer intersections, there is\n no equilibrium level.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The atmospheric pressure profile\n temperature : `pint.Quantity`\n The temperature at the levels given by `pressure`\n dewpoint : `pint.Quantity`\n The dewpoint at the levels given by `pressure`\n parcel_temperature_profile: `pint.Quantity`, optional\n The parcel temperature profile from which to calculate the EL. Defaults to the\n surface parcel profile.\n which: str, optional\n Pick which LFC to return. Options are 'top', 'bottom', 'wide', 'most_cape', and 'all'.\n 'top' returns the lowest-pressure EL, default.\n 'bottom' returns the highest-pressure EL.\n 'wide' returns the EL whose corresponding LFC is farthest away.\n 'most_cape' returns the EL that results in the most CAPE in the profile.\n\n Returns\n -------\n `pint.Quantity`\n The EL pressure, or array of same if which='all'\n `pint.Quantity`\n The EL temperature, or array of same if which='all'\n\n See Also\n --------\n parcel_profile\n\n \"\"\"\n pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)\n # Default to surface parcel if no profile or starting pressure level is given\n if parcel_temperature_profile is None:\n new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpoint)\n pressure, temperature, dewpoint, parcel_temperature_profile = new_stuff\n parcel_temperature_profile = parcel_temperature_profile.to(temperature.units)\n\n # If the top of the sounding parcel is warmer than the environment, there is no EL\n if parcel_temperature_profile[-1] > temperature[-1]:\n return np.nan * pressure.units, np.nan * temperature.units\n\n # Interpolate in log space to find the appropriate pressure - units have to be stripped\n # and reassigned to allow np.log() to function properly.\n x, y = find_intersections(pressure[1:], parcel_temperature_profile[1:], temperature[1:],\n direction='decreasing', log_x=True)\n lcl_p, _ = lcl(pressure[0], temperature[0], dewpoint[0])\n idx = x < lcl_p\n if len(x) > 0 and x[-1] < lcl_p:\n return _multiple_el_lfc_options(x, y, idx, which, pressure,\n parcel_temperature_profile, temperature, dewpoint,\n intersect_type='EL')\n else:\n return np.nan * pressure.units, np.nan * temperature.units\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef parcel_profile(pressure, temperature, dewpoint):\n r\"\"\"Calculate the profile a parcel takes through the atmosphere.\n\n The parcel starts at `temperature`, and `dewpoint`, lifted up\n dry adiabatically to the LCL, and then moist adiabatically from there.\n `pressure` specifies the pressure levels for the profile.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The atmospheric pressure level(s) of interest. This array must be from\n high to low pressure.\n temperature : `pint.Quantity`\n The starting temperature\n dewpoint : `pint.Quantity`\n The starting dewpoint\n\n Returns\n -------\n `pint.Quantity`\n The parcel temperatures at the specified pressure levels.\n\n See Also\n --------\n lcl, moist_lapse, dry_lapse\n\n \"\"\"\n _, _, _, t_l, _, t_u = _parcel_profile_helper(pressure, temperature, dewpoint)\n return concatenate((t_l, t_u))\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef parcel_profile_with_lcl(pressure, temperature, dewpoint):\n r\"\"\"Calculate the profile a parcel takes through the atmosphere.\n\n The parcel starts at `temperature`, and `dewpoint`, lifted up\n dry adiabatically to the LCL, and then moist adiabatically from there.\n `pressure` specifies the pressure levels for the profile. This function returns\n a profile that includes the LCL.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The atmospheric pressure level(s) of interest. This array must be from\n high to low pressure.\n temperature : `pint.Quantity`\n The atmospheric temperature at the levels in `pressure`. The first entry should be at\n the same level as the first `pressure` data point.\n dewpoint : `pint.Quantity`\n The atmospheric dewpoint at the levels in `pressure`. The first entry should be at\n the same level as the first `pressure` data point.\n\n Returns\n -------\n pressure : `pint.Quantity`\n The parcel profile pressures, which includes the specified levels and the LCL\n ambient_temperature : `pint.Quantity`\n The atmospheric temperature values, including the value interpolated to the LCL level\n ambient_dew_point : `pint.Quantity`\n The atmospheric dewpoint values, including the value interpolated to the LCL level\n profile_temperature : `pint.Quantity`\n The parcel profile temperatures at all of the levels in the returned pressures array,\n including the LCL.\n\n See Also\n --------\n lcl, moist_lapse, dry_lapse, parcel_profile\n\n \"\"\"\n p_l, p_lcl, p_u, t_l, t_lcl, t_u = _parcel_profile_helper(pressure, temperature[0],\n dewpoint[0])\n new_press = concatenate((p_l, p_lcl, p_u))\n prof_temp = concatenate((t_l, t_lcl, t_u))\n new_temp = _insert_lcl_level(pressure, temperature, p_lcl)\n new_dewp = _insert_lcl_level(pressure, dewpoint, p_lcl)\n return new_press, new_temp, new_dewp, prof_temp\n\n\ndef _parcel_profile_helper(pressure, temperature, dewpoint):\n \"\"\"Help calculate parcel profiles.\n\n Returns the temperature and pressure, above, below, and including the LCL. The\n other calculation functions decide what to do with the pieces.\n\n \"\"\"\n # Find the LCL\n press_lcl, temp_lcl = lcl(pressure[0], temperature, dewpoint)\n press_lcl = press_lcl.to(pressure.units)\n\n # Find the dry adiabatic profile, *including* the LCL. We need >= the LCL in case the\n # LCL is included in the levels. It's slightly redundant in that case, but simplifies\n # the logic for removing it later.\n press_lower = concatenate((pressure[pressure >= press_lcl], press_lcl))\n temp_lower = dry_lapse(press_lower, temperature)\n\n # If the pressure profile doesn't make it to the lcl, we can stop here\n if _greater_or_close(np.nanmin(pressure.m), press_lcl.m):\n return (press_lower[:-1], press_lcl, units.Quantity(np.array([]), press_lower.units),\n temp_lower[:-1], temp_lcl, units.Quantity(np.array([]), temp_lower.units))\n\n # Find moist pseudo-adiabatic profile starting at the LCL\n press_upper = concatenate((press_lcl, pressure[pressure < press_lcl]))\n temp_upper = moist_lapse(press_upper, temp_lower[-1]).to(temp_lower.units)\n\n # Return profile pieces\n return (press_lower[:-1], press_lcl, press_upper[1:],\n temp_lower[:-1], temp_lcl, temp_upper[1:])\n\n\ndef _insert_lcl_level(pressure, temperature, lcl_pressure):\n \"\"\"Insert the LCL pressure into the profile.\"\"\"\n interp_temp = interpolate_1d(lcl_pressure, pressure, temperature)\n\n # Pressure needs to be increasing for searchsorted, so flip it and then convert\n # the index back to the original array\n loc = pressure.size - pressure[::-1].searchsorted(lcl_pressure)\n return np.insert(temperature.m, loc, interp_temp.m) * temperature.units\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[dimensionless]')\ndef vapor_pressure(pressure, mixing_ratio):\n r\"\"\"Calculate water vapor (partial) pressure.\n\n Given total `pressure` and water vapor `mixing_ratio`, calculates the\n partial pressure of water vapor.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n total atmospheric pressure\n mixing_ratio : `pint.Quantity`\n dimensionless mass mixing ratio\n\n Returns\n -------\n `pint.Quantity`\n The ambient water vapor (partial) pressure in the same units as\n `pressure`.\n\n Notes\n -----\n This function is a straightforward implementation of the equation given in many places,\n such as [Hobbs1977]_ pg.71:\n\n .. math:: e = p \\frac{r}{r + \\epsilon}\n\n See Also\n --------\n saturation_vapor_pressure, dewpoint\n\n \"\"\"\n return pressure * mixing_ratio / (mpconsts.epsilon + mixing_ratio)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[temperature]')\ndef saturation_vapor_pressure(temperature):\n r\"\"\"Calculate the saturation water vapor (partial) pressure.\n\n Parameters\n ----------\n temperature : `pint.Quantity`\n air temperature\n\n Returns\n -------\n `pint.Quantity`\n The saturation water vapor (partial) pressure\n\n See Also\n --------\n vapor_pressure, dewpoint\n\n Notes\n -----\n Instead of temperature, dewpoint may be used in order to calculate\n the actual (ambient) water vapor (partial) pressure.\n\n The formula used is that from [Bolton1980]_ for T in degrees Celsius:\n\n .. math:: 6.112 e^\\frac{17.67T}{T + 243.5}\n\n \"\"\"\n # Converted from original in terms of C to use kelvin. Using raw absolute values of C in\n # a formula plays havoc with units support.\n return sat_pressure_0c * np.exp(17.67 * (temperature - 273.15 * units.kelvin)\n / (temperature - 29.65 * units.kelvin))\n\n\[email protected]\n@preprocess_xarray\n@check_units('[temperature]', '[dimensionless]')\ndef dewpoint_from_relative_humidity(temperature, relative_humidity):\n r\"\"\"Calculate the ambient dewpoint given air temperature and relative humidity.\n\n Parameters\n ----------\n temperature : `pint.Quantity`\n air temperature\n relative_humidity : `pint.Quantity`\n relative humidity expressed as a ratio in the range 0 < relative_humidity <= 1\n\n Returns\n -------\n `pint.Quantity`\n The dewpoint temperature\n\n See Also\n --------\n dewpoint, saturation_vapor_pressure\n\n \"\"\"\n if np.any(relative_humidity > 1.2):\n warnings.warn('Relative humidity >120%, ensure proper units.')\n return dewpoint(relative_humidity * saturation_vapor_pressure(temperature))\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]')\ndef dewpoint(vapor_pressure):\n r\"\"\"Calculate the ambient dewpoint given the vapor pressure.\n\n Parameters\n ----------\n e : `pint.Quantity`\n Water vapor partial pressure\n\n Returns\n -------\n `pint.Quantity`\n dewpoint temperature\n\n See Also\n --------\n dewpoint_from_relative_humidity, saturation_vapor_pressure, vapor_pressure\n\n Notes\n -----\n This function inverts the [Bolton1980]_ formula for saturation vapor\n pressure to instead calculate the temperature. This yield the following\n formula for dewpoint in degrees Celsius:\n\n .. math:: T = \\frac{243.5 log(e / 6.112)}{17.67 - log(e / 6.112)}\n\n \"\"\"\n val = np.log(vapor_pressure / sat_pressure_0c)\n return 0. * units.degC + 243.5 * units.delta_degC * val / (17.67 - val)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[pressure]', '[dimensionless]')\ndef mixing_ratio(partial_press, total_press, molecular_weight_ratio=mpconsts.epsilon):\n r\"\"\"Calculate the mixing ratio of a gas.\n\n This calculates mixing ratio given its partial pressure and the total pressure of\n the air. There are no required units for the input arrays, other than that\n they have the same units.\n\n Parameters\n ----------\n partial_press : `pint.Quantity`\n Partial pressure of the constituent gas\n total_press : `pint.Quantity`\n Total air pressure\n molecular_weight_ratio : `pint.Quantity` or float, optional\n The ratio of the molecular weight of the constituent gas to that assumed\n for air. Defaults to the ratio for water vapor to dry air\n (:math:`\\epsilon\\approx0.622`).\n\n Returns\n -------\n `pint.Quantity`\n The (mass) mixing ratio, dimensionless (e.g. Kg/Kg or g/g)\n\n Notes\n -----\n This function is a straightforward implementation of the equation given in many places,\n such as [Hobbs1977]_ pg.73:\n\n .. math:: r = \\epsilon \\frac{e}{p - e}\n\n See Also\n --------\n saturation_mixing_ratio, vapor_pressure\n\n \"\"\"\n return (molecular_weight_ratio * partial_press\n / (total_press - partial_press)).to('dimensionless')\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]')\ndef saturation_mixing_ratio(total_press, temperature):\n r\"\"\"Calculate the saturation mixing ratio of water vapor.\n\n This calculation is given total pressure and the temperature. The implementation\n uses the formula outlined in [Hobbs1977]_ pg.73.\n\n Parameters\n ----------\n total_press: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n air temperature\n\n Returns\n -------\n `pint.Quantity`\n The saturation mixing ratio, dimensionless\n\n \"\"\"\n return mixing_ratio(saturation_vapor_pressure(temperature), total_press)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef equivalent_potential_temperature(pressure, temperature, dewpoint):\n r\"\"\"Calculate equivalent potential temperature.\n\n This calculation must be given an air parcel's pressure, temperature, and dewpoint.\n The implementation uses the formula outlined in [Bolton1980]_:\n\n First, the LCL temperature is calculated:\n\n .. math:: T_{L}=\\frac{1}{\\frac{1}{T_{D}-56}+\\frac{ln(T_{K}/T_{D})}{800}}+56\n\n Which is then used to calculate the potential temperature at the LCL:\n\n .. math:: \\theta_{DL}=T_{K}\\left(\\frac{1000}{p-e}\\right)^k\n \\left(\\frac{T_{K}}{T_{L}}\\right)^{.28r}\n\n Both of these are used to calculate the final equivalent potential temperature:\n\n .. math:: \\theta_{E}=\\theta_{DL}\\exp\\left[\\left(\\frac{3036.}{T_{L}}\n -1.78\\right)*r(1+.448r)\\right]\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n Temperature of parcel\n dewpoint: `pint.Quantity`\n Dewpoint of parcel\n\n Returns\n -------\n `pint.Quantity`\n The equivalent potential temperature of the parcel\n\n Notes\n -----\n [Bolton1980]_ formula for Theta-e is used, since according to\n [DaviesJones2009]_ it is the most accurate non-iterative formulation\n available.\n\n \"\"\"\n t = temperature.to('kelvin').magnitude\n td = dewpoint.to('kelvin').magnitude\n p = pressure.to('hPa').magnitude\n e = saturation_vapor_pressure(dewpoint).to('hPa').magnitude\n r = saturation_mixing_ratio(pressure, dewpoint).magnitude\n\n t_l = 56 + 1. / (1. / (td - 56) + np.log(t / td) / 800.)\n th_l = t * (1000 / (p - e)) ** mpconsts.kappa * (t / t_l) ** (0.28 * r)\n th_e = th_l * np.exp((3036. / t_l - 1.78) * r * (1 + 0.448 * r))\n\n return th_e * units.kelvin\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]')\ndef saturation_equivalent_potential_temperature(pressure, temperature):\n r\"\"\"Calculate saturation equivalent potential temperature.\n\n This calculation must be given an air parcel's pressure and temperature.\n The implementation uses the formula outlined in [Bolton1980]_ for the\n equivalent potential temperature, and assumes a saturated process.\n\n First, because we assume a saturated process, the temperature at the LCL is\n equivalent to the current temperature. Therefore the following equation\n\n .. math:: T_{L}=\\frac{1}{\\frac{1}{T_{D}-56}+\\frac{ln(T_{K}/T_{D})}{800}}+56\n\n reduces to\n\n .. math:: T_{L} = T_{K}\n\n Then the potential temperature at the temperature/LCL is calculated:\n\n .. math:: \\theta_{DL}=T_{K}\\left(\\frac{1000}{p-e}\\right)^k\n \\left(\\frac{T_{K}}{T_{L}}\\right)^{.28r}\n\n However, because\n\n .. math:: T_{L} = T_{K}\n\n it follows that\n\n .. math:: \\theta_{DL}=T_{K}\\left(\\frac{1000}{p-e}\\right)^k\n\n Both of these are used to calculate the final equivalent potential temperature:\n\n .. math:: \\theta_{E}=\\theta_{DL}\\exp\\left[\\left(\\frac{3036.}{T_{K}}\n -1.78\\right)*r(1+.448r)\\right]\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n Temperature of parcel\n\n Returns\n -------\n `pint.Quantity`\n The saturation equivalent potential temperature of the parcel\n\n Notes\n -----\n [Bolton1980]_ formula for Theta-e is used (for saturated case), since according to\n [DaviesJones2009]_ it is the most accurate non-iterative formulation\n available.\n\n \"\"\"\n t = temperature.to('kelvin').magnitude\n p = pressure.to('hPa').magnitude\n e = saturation_vapor_pressure(temperature).to('hPa').magnitude\n r = saturation_mixing_ratio(pressure, temperature).magnitude\n\n th_l = t * (1000 / (p - e)) ** mpconsts.kappa\n th_es = th_l * np.exp((3036. / t - 1.78) * r * (1 + 0.448 * r))\n\n return th_es * units.kelvin\n\n\[email protected]\n@preprocess_xarray\n@check_units('[temperature]', '[dimensionless]', '[dimensionless]')\ndef virtual_temperature(temperature, mixing_ratio, molecular_weight_ratio=mpconsts.epsilon):\n r\"\"\"Calculate virtual temperature.\n\n This calculation must be given an air parcel's temperature and mixing ratio.\n The implementation uses the formula outlined in [Hobbs2006]_ pg.80.\n\n Parameters\n ----------\n temperature: `pint.Quantity`\n air temperature\n mixing_ratio : `pint.Quantity`\n dimensionless mass mixing ratio\n molecular_weight_ratio : `pint.Quantity` or float, optional\n The ratio of the molecular weight of the constituent gas to that assumed\n for air. Defaults to the ratio for water vapor to dry air.\n (:math:`\\epsilon\\approx0.622`).\n\n Returns\n -------\n `pint.Quantity`\n The corresponding virtual temperature of the parcel\n\n Notes\n -----\n .. math:: T_v = T \\frac{\\text{w} + \\epsilon}{\\epsilon\\,(1 + \\text{w})}\n\n \"\"\"\n return temperature * ((mixing_ratio + molecular_weight_ratio)\n / (molecular_weight_ratio * (1 + mixing_ratio)))\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[dimensionless]', '[dimensionless]')\ndef virtual_potential_temperature(pressure, temperature, mixing_ratio,\n molecular_weight_ratio=mpconsts.epsilon):\n r\"\"\"Calculate virtual potential temperature.\n\n This calculation must be given an air parcel's pressure, temperature, and mixing ratio.\n The implementation uses the formula outlined in [Markowski2010]_ pg.13.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n air temperature\n mixing_ratio : `pint.Quantity`\n dimensionless mass mixing ratio\n molecular_weight_ratio : `pint.Quantity` or float, optional\n The ratio of the molecular weight of the constituent gas to that assumed\n for air. Defaults to the ratio for water vapor to dry air.\n (:math:`\\epsilon\\approx0.622`).\n\n Returns\n -------\n `pint.Quantity`\n The corresponding virtual potential temperature of the parcel\n\n Notes\n -----\n .. math:: \\Theta_v = \\Theta \\frac{\\text{w} + \\epsilon}{\\epsilon\\,(1 + \\text{w})}\n\n \"\"\"\n pottemp = potential_temperature(pressure, temperature)\n return virtual_temperature(pottemp, mixing_ratio, molecular_weight_ratio)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[dimensionless]', '[dimensionless]')\ndef density(pressure, temperature, mixing_ratio, molecular_weight_ratio=mpconsts.epsilon):\n r\"\"\"Calculate density.\n\n This calculation must be given an air parcel's pressure, temperature, and mixing ratio.\n The implementation uses the formula outlined in [Hobbs2006]_ pg.67.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n air temperature\n mixing_ratio : `pint.Quantity`\n dimensionless mass mixing ratio\n molecular_weight_ratio : `pint.Quantity` or float, optional\n The ratio of the molecular weight of the constituent gas to that assumed\n for air. Defaults to the ratio for water vapor to dry air.\n (:math:`\\epsilon\\approx0.622`).\n\n Returns\n -------\n `pint.Quantity`\n The corresponding density of the parcel\n\n Notes\n -----\n .. math:: \\rho = \\frac{p}{R_dT_v}\n\n \"\"\"\n virttemp = virtual_temperature(temperature, mixing_ratio, molecular_weight_ratio)\n return (pressure / (mpconsts.Rd * virttemp)).to(units.kilogram / units.meter ** 3)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef relative_humidity_wet_psychrometric(pressure, dry_bulb_temperature, web_bulb_temperature,\n **kwargs):\n r\"\"\"Calculate the relative humidity with wet bulb and dry bulb temperatures.\n\n This uses a psychrometric relationship as outlined in [WMO8-2014]_, with\n coefficients from [Fan1987]_.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n dry_bulb_temperature: `pint.Quantity`\n Dry bulb temperature\n web_bulb_temperature: `pint.Quantity`\n Wet bulb temperature\n\n Returns\n -------\n `pint.Quantity`\n Relative humidity\n\n Notes\n -----\n .. math:: relative_humidity = \\frac{e}{e_s}\n\n * :math:`relative_humidity` is relative humidity as a unitless ratio\n * :math:`e` is vapor pressure from the wet psychrometric calculation\n * :math:`e_s` is the saturation vapor pressure\n\n See Also\n --------\n psychrometric_vapor_pressure_wet, saturation_vapor_pressure\n\n \"\"\"\n return (psychrometric_vapor_pressure_wet(pressure, dry_bulb_temperature,\n web_bulb_temperature, **kwargs)\n / saturation_vapor_pressure(dry_bulb_temperature))\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef psychrometric_vapor_pressure_wet(pressure, dry_bulb_temperature, wet_bulb_temperature,\n psychrometer_coefficient=6.21e-4 / units.kelvin):\n r\"\"\"Calculate the vapor pressure with wet bulb and dry bulb temperatures.\n\n This uses a psychrometric relationship as outlined in [WMO8-2014]_, with\n coefficients from [Fan1987]_.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n dry_bulb_temperature: `pint.Quantity`\n Dry bulb temperature\n wet_bulb_temperature: `pint.Quantity`\n Wet bulb temperature\n psychrometer_coefficient: `pint.Quantity`, optional\n Psychrometer coefficient. Defaults to 6.21e-4 K^-1.\n\n Returns\n -------\n `pint.Quantity`\n Vapor pressure\n\n Notes\n -----\n .. math:: e' = e'_w(T_w) - A p (T - T_w)\n\n * :math:`e'` is vapor pressure\n * :math:`e'_w(T_w)` is the saturation vapor pressure with respect to water at temperature\n :math:`T_w`\n * :math:`p` is the pressure of the wet bulb\n * :math:`T` is the temperature of the dry bulb\n * :math:`T_w` is the temperature of the wet bulb\n * :math:`A` is the psychrometer coefficient\n\n Psychrometer coefficient depends on the specific instrument being used and the ventilation\n of the instrument.\n\n See Also\n --------\n saturation_vapor_pressure\n\n \"\"\"\n return (saturation_vapor_pressure(wet_bulb_temperature) - psychrometer_coefficient\n * pressure * (dry_bulb_temperature - wet_bulb_temperature).to('kelvin'))\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[dimensionless]')\ndef mixing_ratio_from_relative_humidity(pressure, temperature, relative_humidity):\n r\"\"\"Calculate the mixing ratio from relative humidity, temperature, and pressure.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n Air temperature\n relative_humidity: array_like\n The relative humidity expressed as a unitless ratio in the range [0, 1]. Can also pass\n a percentage if proper units are attached.\n\n Returns\n -------\n `pint.Quantity`\n Dimensionless mixing ratio\n\n Notes\n -----\n Formula adapted from [Hobbs1977]_ pg. 74.\n\n .. math:: w = (relative_humidity)(w_s)\n\n * :math:`w` is mixing ratio\n * :math:`relative_humidity` is relative humidity as a unitless ratio\n * :math:`w_s` is the saturation mixing ratio\n\n See Also\n --------\n relative_humidity_from_mixing_ratio, saturation_mixing_ratio\n\n \"\"\"\n return (relative_humidity\n * saturation_mixing_ratio(pressure, temperature)).to('dimensionless')\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[dimensionless]')\ndef relative_humidity_from_mixing_ratio(pressure, temperature, mixing_ratio):\n r\"\"\"Calculate the relative humidity from mixing ratio, temperature, and pressure.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n Air temperature\n mixing_ratio: `pint.Quantity`\n Dimensionless mass mixing ratio\n\n Returns\n -------\n `pint.Quantity`\n Relative humidity\n\n Notes\n -----\n Formula based on that from [Hobbs1977]_ pg. 74.\n\n .. math:: relative_humidity = \\frac{w}{w_s}\n\n * :math:`relative_humidity` is relative humidity as a unitless ratio\n * :math:`w` is mixing ratio\n * :math:`w_s` is the saturation mixing ratio\n\n See Also\n --------\n mixing_ratio_from_relative_humidity, saturation_mixing_ratio\n\n \"\"\"\n return mixing_ratio / saturation_mixing_ratio(pressure, temperature)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[dimensionless]')\ndef mixing_ratio_from_specific_humidity(specific_humidity):\n r\"\"\"Calculate the mixing ratio from specific humidity.\n\n Parameters\n ----------\n specific_humidity: `pint.Quantity`\n Specific humidity of air\n\n Returns\n -------\n `pint.Quantity`\n Mixing ratio\n\n Notes\n -----\n Formula from [Salby1996]_ pg. 118.\n\n .. math:: w = \\frac{q}{1-q}\n\n * :math:`w` is mixing ratio\n * :math:`q` is the specific humidity\n\n See Also\n --------\n mixing_ratio, specific_humidity_from_mixing_ratio\n\n \"\"\"\n try:\n specific_humidity = specific_humidity.to('dimensionless')\n except AttributeError:\n pass\n return specific_humidity / (1 - specific_humidity)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[dimensionless]')\ndef specific_humidity_from_mixing_ratio(mixing_ratio):\n r\"\"\"Calculate the specific humidity from the mixing ratio.\n\n Parameters\n ----------\n mixing_ratio: `pint.Quantity`\n mixing ratio\n\n Returns\n -------\n `pint.Quantity`\n Specific humidity\n\n Notes\n -----\n Formula from [Salby1996]_ pg. 118.\n\n .. math:: q = \\frac{w}{1+w}\n\n * :math:`w` is mixing ratio\n * :math:`q` is the specific humidity\n\n See Also\n --------\n mixing_ratio, mixing_ratio_from_specific_humidity\n\n \"\"\"\n try:\n mixing_ratio = mixing_ratio.to('dimensionless')\n except AttributeError:\n pass\n return mixing_ratio / (1 + mixing_ratio)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[dimensionless]')\ndef relative_humidity_from_specific_humidity(pressure, temperature, specific_humidity):\n r\"\"\"Calculate the relative humidity from specific humidity, temperature, and pressure.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n Air temperature\n specific_humidity: `pint.Quantity`\n Specific humidity of air\n\n Returns\n -------\n `pint.Quantity`\n Relative humidity\n\n Notes\n -----\n Formula based on that from [Hobbs1977]_ pg. 74. and [Salby1996]_ pg. 118.\n\n .. math:: relative_humidity = \\frac{q}{(1-q)w_s}\n\n * :math:`relative_humidity` is relative humidity as a unitless ratio\n * :math:`q` is specific humidity\n * :math:`w_s` is the saturation mixing ratio\n\n See Also\n --------\n relative_humidity_from_mixing_ratio\n\n \"\"\"\n return (mixing_ratio_from_specific_humidity(specific_humidity)\n / saturation_mixing_ratio(pressure, temperature))\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')\ndef cape_cin(pressure, temperature, dewpoint, parcel_profile, which_lfc='bottom',\n which_el='top'):\n r\"\"\"Calculate CAPE and CIN.\n\n Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)\n of a given upper air profile and parcel path. CIN is integrated between the surface and\n LFC, CAPE is integrated between the LFC and EL (or top of sounding). Intersection points\n of the measured temperature profile and parcel profile are logarithmically interpolated.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The atmospheric pressure level(s) of interest, in order from highest to\n lowest pressure.\n temperature : `pint.Quantity`\n The atmospheric temperature corresponding to pressure.\n dewpoint : `pint.Quantity`\n The atmospheric dewpoint corresponding to pressure.\n parcel_profile : `pint.Quantity`\n The temperature profile of the parcel.\n which_lfc : str\n Choose which LFC to integrate from. Valid options are 'top', 'bottom', 'wide',\n and 'most_cape'. Default is 'bottom'.\n which_el : str\n Choose which EL to integrate to. Valid options are 'top', 'bottom', 'wide',\n and 'most_cape'. Default is 'top'.\n\n Returns\n -------\n `pint.Quantity`\n Convective Available Potential Energy (CAPE).\n `pint.Quantity`\n Convective INhibition (CIN).\n\n Notes\n -----\n Formula adopted from [Hobbs1977]_.\n\n .. math:: \\text{CAPE} = -R_d \\int_{LFC}^{EL} (T_{parcel} - T_{env}) d\\text{ln}(p)\n\n .. math:: \\text{CIN} = -R_d \\int_{SFC}^{LFC} (T_{parcel} - T_{env}) d\\text{ln}(p)\n\n\n * :math:`CAPE` Convective available potential energy\n * :math:`CIN` Convective inhibition\n * :math:`LFC` Pressure of the level of free convection\n * :math:`EL` Pressure of the equilibrium level\n * :math:`SFC` Level of the surface or beginning of parcel path\n * :math:`R_d` Gas constant\n * :math:`g` Gravitational acceleration\n * :math:`T_{parcel}` Parcel temperature\n * :math:`T_{env}` Environment temperature\n * :math:`p` Atmospheric pressure\n\n See Also\n --------\n lfc, el\n\n \"\"\"\n pressure, temperature, dewpoint, parcel_profile = _remove_nans(pressure, temperature,\n dewpoint, parcel_profile)\n # Calculate LFC limit of integration\n lfc_pressure, _ = lfc(pressure, temperature, dewpoint,\n parcel_temperature_profile=parcel_profile, which=which_lfc)\n\n # If there is no LFC, no need to proceed.\n if np.isnan(lfc_pressure):\n return 0 * units('J/kg'), 0 * units('J/kg')\n else:\n lfc_pressure = lfc_pressure.magnitude\n\n # Calculate the EL limit of integration\n el_pressure, _ = el(pressure, temperature, dewpoint,\n parcel_temperature_profile=parcel_profile, which=which_el)\n\n # No EL and we use the top reading of the sounding.\n if np.isnan(el_pressure):\n el_pressure = pressure[-1].magnitude\n else:\n el_pressure = el_pressure.magnitude\n\n # Difference between the parcel path and measured temperature profiles\n y = (parcel_profile - temperature).to(units.degK)\n\n # Estimate zero crossings\n x, y = _find_append_zero_crossings(np.copy(pressure), y)\n\n # CAPE\n # Only use data between the LFC and EL for calculation\n p_mask = _less_or_close(x.m, lfc_pressure) & _greater_or_close(x.m, el_pressure)\n x_clipped = x[p_mask].magnitude\n y_clipped = y[p_mask].magnitude\n cape = (mpconsts.Rd\n * (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units('J/kg'))\n\n # CIN\n # Only use data between the surface and LFC for calculation\n p_mask = _greater_or_close(x.m, lfc_pressure)\n x_clipped = x[p_mask].magnitude\n y_clipped = y[p_mask].magnitude\n cin = (mpconsts.Rd\n * (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units('J/kg'))\n\n # Set CIN to 0 if it's returned as a positive value (#1190)\n if cin > 0 * units('J/kg'):\n cin = 0 * units('J/kg')\n return cape, cin\n\n\ndef _find_append_zero_crossings(x, y):\n r\"\"\"\n Find and interpolate zero crossings.\n\n Estimate the zero crossings of an x,y series and add estimated crossings to series,\n returning a sorted array with no duplicate values.\n\n Parameters\n ----------\n x : `pint.Quantity`\n x values of data\n y : `pint.Quantity`\n y values of data\n\n Returns\n -------\n x : `pint.Quantity`\n x values of data\n y : `pint.Quantity`\n y values of data\n\n \"\"\"\n crossings = find_intersections(x[1:], y[1:], np.zeros_like(y[1:]) * y.units, log_x=True)\n x = concatenate((x, crossings[0]))\n y = concatenate((y, crossings[1]))\n\n # Resort so that data are in order\n sort_idx = np.argsort(x)\n x = x[sort_idx]\n y = y[sort_idx]\n\n # Remove duplicate data points if there are any\n keep_idx = np.ediff1d(x.magnitude, to_end=[1]) > 1e-6\n x = x[keep_idx]\n y = y[keep_idx]\n return x, y\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef most_unstable_parcel(pressure, temperature, dewpoint, height=None,\n bottom=None, depth=300 * units.hPa):\n \"\"\"\n Determine the most unstable parcel in a layer.\n\n Determines the most unstable parcel of air by calculating the equivalent\n potential temperature and finding its maximum in the specified layer.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Atmospheric pressure profile\n temperature: `pint.Quantity`\n Atmospheric temperature profile\n dewpoint: `pint.Quantity`\n Atmospheric dewpoint profile\n height: `pint.Quantity`, optional\n Atmospheric height profile. Standard atmosphere assumed when None (the default).\n bottom: `pint.Quantity`, optional\n Bottom of the layer to consider for the calculation in pressure or height.\n Defaults to using the bottom pressure or height.\n depth: `pint.Quantity`, optional\n Depth of the layer to consider for the calculation in pressure or height. Defaults\n to 300 hPa.\n\n Returns\n -------\n `pint.Quantity`\n Pressure, temperature, and dewpoint of most unstable parcel in the profile.\n integer\n Index of the most unstable parcel in the given profile\n\n See Also\n --------\n get_layer\n\n \"\"\"\n p_layer, t_layer, td_layer = get_layer(pressure, temperature, dewpoint, bottom=bottom,\n depth=depth, height=height, interpolate=False)\n theta_e = equivalent_potential_temperature(p_layer, t_layer, td_layer)\n max_idx = np.argmax(theta_e)\n return p_layer[max_idx], t_layer[max_idx], td_layer[max_idx], max_idx\n\n\[email protected]\n@preprocess_xarray\n@check_units('[temperature]', '[pressure]', '[temperature]')\ndef isentropic_interpolation(levels, pressure, temperature, *args, axis=0,\n temperature_out=False, max_iters=50, eps=1e-6,\n bottom_up_search=True, **kwargs):\n r\"\"\"Interpolate data in isobaric coordinates to isentropic coordinates.\n\n Parameters\n ----------\n levels : array\n One-dimensional array of desired potential temperature surfaces\n pressure : array\n One-dimensional array of pressure levels\n temperature : array\n Array of temperature\n axis : int, optional\n The axis corresponding to the vertical in the temperature array, defaults to 0.\n temperature_out : bool, optional\n If true, will calculate temperature and output as the last item in the output list.\n Defaults to False.\n max_iters : int, optional\n The maximum number of iterations to use in calculation, defaults to 50.\n eps : float, optional\n The desired absolute error in the calculated value, defaults to 1e-6.\n bottom_up_search : bool, optional\n Controls whether to search for levels bottom-up, or top-down. Defaults to\n True, which is bottom-up search.\n args : array, optional\n Any additional variables will be interpolated to each isentropic level.\n\n Returns\n -------\n list\n List with pressure at each isentropic level, followed by each additional\n argument interpolated to isentropic coordinates.\n\n Notes\n -----\n Input variable arrays must have the same number of vertical levels as the pressure levels\n array. Pressure is calculated on isentropic surfaces by assuming that temperature varies\n linearly with the natural log of pressure. Linear interpolation is then used in the\n vertical to find the pressure at each isentropic level. Interpolation method from\n [Ziv1994]_. Any additional arguments are assumed to vary linearly with temperature and will\n be linearly interpolated to the new isentropic levels.\n\n See Also\n --------\n potential_temperature\n\n \"\"\"\n # iteration function to be used later\n # Calculates theta from linearly interpolated temperature and solves for pressure\n def _isen_iter(iter_log_p, isentlevs_nd, ka, a, b, pok):\n exner = pok * np.exp(-ka * iter_log_p)\n t = a * iter_log_p + b\n # Newton-Raphson iteration\n f = isentlevs_nd - t * exner\n fp = exner * (ka * t - a)\n return iter_log_p - (f / fp)\n\n # Get dimensions in temperature\n ndim = temperature.ndim\n\n # Convert units\n pres = pressure.to('hPa')\n temperature = temperature.to('kelvin')\n\n slices = [np.newaxis] * ndim\n slices[axis] = slice(None)\n slices = tuple(slices)\n pres = np.broadcast_to(pres[slices].magnitude, temperature.shape) * pres.units\n\n # Sort input data\n sort_pres = np.argsort(pres.m, axis=axis)\n sort_pres = np.swapaxes(np.swapaxes(sort_pres, 0, axis)[::-1], 0, axis)\n sorter = broadcast_indices(pres, sort_pres, ndim, axis)\n levs = pres[sorter]\n tmpk = temperature[sorter]\n\n levels = np.asarray(levels.m_as('kelvin')).reshape(-1)\n isentlevels = levels[np.argsort(levels)]\n\n # Make the desired isentropic levels the same shape as temperature\n shape = list(temperature.shape)\n shape[axis] = isentlevels.size\n isentlevs_nd = np.broadcast_to(isentlevels[slices], shape)\n\n # exponent to Poisson's Equation, which is imported above\n ka = mpconsts.kappa.m_as('dimensionless')\n\n # calculate theta for each point\n pres_theta = potential_temperature(levs, tmpk)\n\n # Raise error if input theta level is larger than pres_theta max\n if np.max(pres_theta.m) < np.max(levels):\n raise ValueError('Input theta level out of data bounds')\n\n # Find log of pressure to implement assumption of linear temperature dependence on\n # ln(p)\n log_p = np.log(levs.m)\n\n # Calculations for interpolation routine\n pok = mpconsts.P0 ** ka\n\n # index values for each point for the pressure level nearest to the desired theta level\n above, below, good = find_bounding_indices(pres_theta.m, levels, axis,\n from_below=bottom_up_search)\n\n # calculate constants for the interpolation\n a = (tmpk.m[above] - tmpk.m[below]) / (log_p[above] - log_p[below])\n b = tmpk.m[above] - a * log_p[above]\n\n # calculate first guess for interpolation\n isentprs = 0.5 * (log_p[above] + log_p[below])\n\n # Make sure we ignore any nans in the data for solving; checking a is enough since it\n # combines log_p and tmpk.\n good &= ~np.isnan(a)\n\n # iterative interpolation using scipy.optimize.fixed_point and _isen_iter defined above\n log_p_solved = so.fixed_point(_isen_iter, isentprs[good],\n args=(isentlevs_nd[good], ka, a[good], b[good], pok.m),\n xtol=eps, maxiter=max_iters)\n\n # get back pressure from log p\n isentprs[good] = np.exp(log_p_solved)\n\n # Mask out points we know are bad as well as points that are beyond the max pressure\n isentprs[~(good & _less_or_close(isentprs, np.max(pres.m)))] = np.nan\n\n # create list for storing output data\n ret = [isentprs * units.hPa]\n\n # if temperature_out = true, calculate temperature and output as last item in list\n if temperature_out:\n ret.append((isentlevs_nd / ((mpconsts.P0.m / isentprs) ** ka)) * units.kelvin)\n\n # do an interpolation for each additional argument\n if args:\n others = interpolate_1d(isentlevels, pres_theta.m, *(arr[sorter] for arr in args),\n axis=axis, return_list_always=True)\n ret.extend(others)\n\n return ret\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef surface_based_cape_cin(pressure, temperature, dewpoint):\n r\"\"\"Calculate surface-based CAPE and CIN.\n\n Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)\n of a given upper air profile for a surface-based parcel. CIN is integrated\n between the surface and LFC, CAPE is integrated between the LFC and EL (or top of\n sounding). Intersection points of the measured temperature profile and parcel profile are\n logarithmically interpolated.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Atmospheric pressure profile. The first entry should be the starting\n (surface) observation, with the array going from high to low pressure.\n temperature : `pint.Quantity`\n Temperature profile corresponding to the `pressure` profile.\n dewpoint : `pint.Quantity`\n Dewpoint profile corresponding to the `pressure` profile.\n\n Returns\n -------\n `pint.Quantity`\n Surface based Convective Available Potential Energy (CAPE).\n `pint.Quantity`\n Surface based Convective INhibition (CIN).\n\n See Also\n --------\n cape_cin, parcel_profile\n\n \"\"\"\n pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)\n p, t, td, profile = parcel_profile_with_lcl(pressure, temperature, dewpoint)\n return cape_cin(p, t, td, profile)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef most_unstable_cape_cin(pressure, temperature, dewpoint, **kwargs):\n r\"\"\"Calculate most unstable CAPE/CIN.\n\n Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)\n of a given upper air profile and most unstable parcel path. CIN is integrated between the\n surface and LFC, CAPE is integrated between the LFC and EL (or top of sounding).\n Intersection points of the measured temperature profile and parcel profile are\n logarithmically interpolated.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Pressure profile\n temperature : `pint.Quantity`\n Temperature profile\n dewpoint : `pint.Quantity`\n Dew point profile\n kwargs\n Additional keyword arguments to pass to `most_unstable_parcel`\n\n Returns\n -------\n `pint.Quantity`\n Most unstable Convective Available Potential Energy (CAPE).\n `pint.Quantity`\n Most unstable Convective INhibition (CIN).\n\n See Also\n --------\n cape_cin, most_unstable_parcel, parcel_profile\n\n \"\"\"\n pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)\n _, _, _, parcel_idx = most_unstable_parcel(pressure, temperature, dewpoint, **kwargs)\n p, t, td, mu_profile = parcel_profile_with_lcl(pressure[parcel_idx:],\n temperature[parcel_idx:],\n dewpoint[parcel_idx:])\n return cape_cin(p, t, td, mu_profile)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef mixed_layer_cape_cin(pressure, temperature, dewpoint, **kwargs):\n r\"\"\"Calculate mixed-layer CAPE and CIN.\n\n Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)\n of a given upper air profile and mixed-layer parcel path. CIN is integrated between the\n surface and LFC, CAPE is integrated between the LFC and EL (or top of sounding).\n Intersection points of the measured temperature profile and parcel profile are\n logarithmically interpolated. Kwargs for `mixed_parcel` can be provided, such as `depth`.\n Default mixed-layer depth is 100 hPa.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Pressure profile\n temperature : `pint.Quantity`\n Temperature profile\n dewpoint : `pint.Quantity`\n Dewpoint profile\n kwargs\n Additional keyword arguments to pass to `mixed_parcel`\n\n Returns\n -------\n `pint.Quantity`\n Mixed-layer Convective Available Potential Energy (CAPE).\n `pint.Quantity`\n Mixed-layer Convective INhibition (CIN).\n\n See Also\n --------\n cape_cin, mixed_parcel, parcel_profile\n \"\"\"\n depth = kwargs.get('depth', 100 * units.hPa)\n parcel_pressure, parcel_temp, parcel_dewpoint = mixed_parcel(pressure, temperature,\n dewpoint, **kwargs)\n\n # Remove values below top of mixed layer and add in the mixed layer values\n pressure_prof = pressure[pressure < (pressure[0] - depth)]\n temp_prof = temperature[pressure < (pressure[0] - depth)]\n dew_prof = dewpoint[pressure < (pressure[0] - depth)]\n pressure_prof = concatenate([parcel_pressure, pressure_prof])\n temp_prof = concatenate([parcel_temp, temp_prof])\n dew_prof = concatenate([parcel_dewpoint, dew_prof])\n\n p, t, td, ml_profile = parcel_profile_with_lcl(pressure_prof, temp_prof, dew_prof)\n return cape_cin(p, t, td, ml_profile)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef mixed_parcel(pressure, temperature, dewpoint, parcel_start_pressure=None,\n height=None, bottom=None, depth=100 * units.hPa, interpolate=True):\n r\"\"\"Calculate the properties of a parcel mixed from a layer.\n\n Determines the properties of an air parcel that is the result of complete mixing of a\n given atmospheric layer.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Atmospheric pressure profile\n temperature : `pint.Quantity`\n Atmospheric temperature profile\n dewpoint : `pint.Quantity`\n Atmospheric dewpoint profile\n parcel_start_pressure : `pint.Quantity`, optional\n Pressure at which the mixed parcel should begin (default None)\n height: `pint.Quantity`, optional\n Atmospheric heights corresponding to the given pressures (default None)\n bottom : `pint.Quantity`, optional\n The bottom of the layer as a pressure or height above the surface pressure\n (default None)\n depth : `pint.Quantity`, optional\n The thickness of the layer as a pressure or height above the bottom of the layer\n (default 100 hPa)\n interpolate : bool, optional\n Interpolate the top and bottom points if they are not in the given data\n\n Returns\n -------\n `pint.Quantity`\n The pressure of the mixed parcel\n `pint.Quantity`\n The temperature of the mixed parcel\n `pint.Quantity`\n The dewpoint of the mixed parcel\n\n \"\"\"\n # If a parcel starting pressure is not provided, use the surface\n if not parcel_start_pressure:\n parcel_start_pressure = pressure[0]\n\n # Calculate the potential temperature and mixing ratio over the layer\n theta = potential_temperature(pressure, temperature)\n mixing_ratio = saturation_mixing_ratio(pressure, dewpoint)\n\n # Mix the variables over the layer\n mean_theta, mean_mixing_ratio = mixed_layer(pressure, theta, mixing_ratio, bottom=bottom,\n height=height, depth=depth,\n interpolate=interpolate)\n\n # Convert back to temperature\n mean_temperature = mean_theta * exner_function(parcel_start_pressure)\n\n # Convert back to dewpoint\n mean_vapor_pressure = vapor_pressure(parcel_start_pressure, mean_mixing_ratio)\n\n # Using globals() here allows us to keep the dewpoint parameter but still call the\n # function of the same name.\n mean_dewpoint = globals()['dewpoint'](mean_vapor_pressure)\n\n return (parcel_start_pressure, mean_temperature.to(temperature.units),\n mean_dewpoint.to(dewpoint.units))\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]')\ndef mixed_layer(pressure, *args, height=None, bottom=None, depth=100 * units.hPa,\n interpolate=True):\n r\"\"\"Mix variable(s) over a layer, yielding a mass-weighted average.\n\n This function will integrate a data variable with respect to pressure and determine the\n average value using the mean value theorem.\n\n Parameters\n ----------\n pressure : array-like\n Atmospheric pressure profile\n datavar : array-like\n Atmospheric variable measured at the given pressures\n height: array-like, optional\n Atmospheric heights corresponding to the given pressures (default None)\n bottom : `pint.Quantity`, optional\n The bottom of the layer as a pressure or height above the surface pressure\n (default None)\n depth : `pint.Quantity`, optional\n The thickness of the layer as a pressure or height above the bottom of the layer\n (default 100 hPa)\n interpolate : bool, optional\n Interpolate the top and bottom points if they are not in the given data (default True)\n\n Returns\n -------\n `pint.Quantity`\n The mixed value of the data variable.\n\n \"\"\"\n layer = get_layer(pressure, *args, height=height, bottom=bottom,\n depth=depth, interpolate=interpolate)\n p_layer = layer[0]\n datavars_layer = layer[1:]\n\n ret = []\n for datavar_layer in datavars_layer:\n actual_depth = abs(p_layer[0] - p_layer[-1])\n ret.append((-1. / actual_depth.m) * np.trapz(datavar_layer.m, p_layer.m)\n * datavar_layer.units)\n return ret\n\n\[email protected]\n@preprocess_xarray\n@check_units('[length]', '[temperature]')\ndef dry_static_energy(height, temperature):\n r\"\"\"Calculate the dry static energy of parcels.\n\n This function will calculate the dry static energy following the first two terms of\n equation 3.72 in [Hobbs2006]_.\n\n Notes\n -----\n .. math::\\text{dry static energy} = c_{pd} * T + gz\n\n * :math:`T` is temperature\n * :math:`z` is height\n\n Parameters\n ----------\n height : `pint.Quantity`\n Atmospheric height\n temperature : `pint.Quantity`\n Air temperature\n\n Returns\n -------\n `pint.Quantity`\n The dry static energy\n\n \"\"\"\n return (mpconsts.g * height + mpconsts.Cp_d * temperature).to('kJ/kg')\n\n\[email protected]\n@preprocess_xarray\n@check_units('[length]', '[temperature]', '[dimensionless]')\ndef moist_static_energy(height, temperature, specific_humidity):\n r\"\"\"Calculate the moist static energy of parcels.\n\n This function will calculate the moist static energy following\n equation 3.72 in [Hobbs2006]_.\n\n Notes\n -----\n .. math::\\text{moist static energy} = c_{pd} * T + gz + L_v q\n\n * :math:`T` is temperature\n * :math:`z` is height\n * :math:`q` is specific humidity\n\n Parameters\n ----------\n height : `pint.Quantity`\n Atmospheric height\n temperature : `pint.Quantity`\n Air temperature\n specific_humidity : `pint.Quantity`\n Atmospheric specific humidity\n\n Returns\n -------\n `pint.Quantity`\n The moist static energy\n\n \"\"\"\n return (dry_static_energy(height, temperature)\n + mpconsts.Lv * specific_humidity.to('dimensionless')).to('kJ/kg')\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]')\ndef thickness_hydrostatic(pressure, temperature, mixing_ratio=None,\n molecular_weight_ratio=mpconsts.epsilon, bottom=None, depth=None):\n r\"\"\"Calculate the thickness of a layer via the hypsometric equation.\n\n This thickness calculation uses the pressure and temperature profiles (and optionally\n mixing ratio) via the hypsometric equation with virtual temperature adjustment\n\n .. math:: Z_2 - Z_1 = -\\frac{R_d}{g} \\int_{p_1}^{p_2} T_v d\\ln p,\n\n which is based off of Equation 3.24 in [Hobbs2006]_.\n\n This assumes a hydrostatic atmosphere.\n\n Layer bottom and depth specified in pressure.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Atmospheric pressure profile\n temperature : `pint.Quantity`\n Atmospheric temperature profile\n mixing_ratio : `pint.Quantity`, optional\n Profile of dimensionless mass mixing ratio. If none is given, virtual temperature\n is simply set to be the given temperature.\n molecular_weight_ratio : `pint.Quantity` or float, optional\n The ratio of the molecular weight of the constituent gas to that assumed\n for air. Defaults to the ratio for water vapor to dry air.\n (:math:`\\epsilon\\approx0.622`).\n bottom : `pint.Quantity`, optional\n The bottom of the layer in pressure. Defaults to the first observation.\n depth : `pint.Quantity`, optional\n The depth of the layer in hPa. Defaults to the full profile if bottom is not given,\n and 100 hPa if bottom is given.\n\n Returns\n -------\n `pint.Quantity`\n The thickness of the layer in meters.\n\n See Also\n --------\n thickness_hydrostatic_from_relative_humidity, pressure_to_height_std, virtual_temperature\n\n \"\"\"\n # Get the data for the layer, conditional upon bottom/depth being specified and mixing\n # ratio being given\n if bottom is None and depth is None:\n if mixing_ratio is None:\n layer_p, layer_virttemp = pressure, temperature\n else:\n layer_p = pressure\n layer_virttemp = virtual_temperature(temperature, mixing_ratio,\n molecular_weight_ratio)\n else:\n if mixing_ratio is None:\n layer_p, layer_virttemp = get_layer(pressure, temperature, bottom=bottom,\n depth=depth)\n else:\n layer_p, layer_temp, layer_w = get_layer(pressure, temperature, mixing_ratio,\n bottom=bottom, depth=depth)\n layer_virttemp = virtual_temperature(layer_temp, layer_w, molecular_weight_ratio)\n\n # Take the integral (with unit handling) and return the result in meters\n return (- mpconsts.Rd / mpconsts.g * np.trapz(\n layer_virttemp.m_as('K'), x=np.log(layer_p.m_as('hPa'))) * units.K).to('m')\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]')\ndef thickness_hydrostatic_from_relative_humidity(pressure, temperature, relative_humidity,\n bottom=None, depth=None):\n r\"\"\"Calculate the thickness of a layer given pressure, temperature and relative humidity.\n\n Similar to ``thickness_hydrostatic``, this thickness calculation uses the pressure,\n temperature, and relative humidity profiles via the hypsometric equation with virtual\n temperature adjustment.\n\n .. math:: Z_2 - Z_1 = -\\frac{R_d}{g} \\int_{p_1}^{p_2} T_v d\\ln p,\n\n which is based off of Equation 3.24 in [Hobbs2006]_. Virtual temperature is calculated\n from the profiles of temperature and relative humidity.\n\n This assumes a hydrostatic atmosphere.\n\n Layer bottom and depth specified in pressure.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Atmospheric pressure profile\n temperature : `pint.Quantity`\n Atmospheric temperature profile\n relative_humidity : `pint.Quantity`\n Atmospheric relative humidity profile. The relative humidity is expressed as a\n unitless ratio in the range [0, 1]. Can also pass a percentage if proper units are\n attached.\n bottom : `pint.Quantity`, optional\n The bottom of the layer in pressure. Defaults to the first observation.\n depth : `pint.Quantity`, optional\n The depth of the layer in hPa. Defaults to the full profile if bottom is not given,\n and 100 hPa if bottom is given.\n\n Returns\n -------\n `pint.Quantity`\n The thickness of the layer in meters.\n\n See Also\n --------\n thickness_hydrostatic, pressure_to_height_std, virtual_temperature,\n mixing_ratio_from_relative_humidity\n\n \"\"\"\n mixing = mixing_ratio_from_relative_humidity(pressure, temperature, relative_humidity)\n\n return thickness_hydrostatic(pressure, temperature, mixing_ratio=mixing, bottom=bottom,\n depth=depth)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[length]', '[temperature]')\ndef brunt_vaisala_frequency_squared(height, potential_temperature, axis=0):\n r\"\"\"Calculate the square of the Brunt-Vaisala frequency.\n\n Brunt-Vaisala frequency squared (a measure of atmospheric stability) is given by the\n formula:\n\n .. math:: N^2 = \\frac{g}{\\theta} \\frac{d\\theta}{dz}\n\n This formula is based off of Equations 3.75 and 3.77 in [Hobbs2006]_.\n\n Parameters\n ----------\n height : `pint.Quantity`\n One-dimensional profile of atmospheric height\n potential_temperature : `pint.Quantity`\n Atmospheric potential temperature\n axis : int, optional\n The axis corresponding to vertical in the potential temperature array, defaults to 0.\n\n Returns\n -------\n `pint.Quantity`\n The square of the Brunt-Vaisala frequency.\n\n See Also\n --------\n brunt_vaisala_frequency, brunt_vaisala_period, potential_temperature\n\n \"\"\"\n # Ensure validity of temperature units\n potential_temperature = potential_temperature.to('K')\n\n # Calculate and return the square of Brunt-Vaisala frequency\n return mpconsts.g / potential_temperature * first_derivative(potential_temperature,\n x=height, axis=axis)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[length]', '[temperature]')\ndef brunt_vaisala_frequency(height, potential_temperature, axis=0):\n r\"\"\"Calculate the Brunt-Vaisala frequency.\n\n This function will calculate the Brunt-Vaisala frequency as follows:\n\n .. math:: N = \\left( \\frac{g}{\\theta} \\frac{d\\theta}{dz} \\right)^\\frac{1}{2}\n\n This formula based off of Equations 3.75 and 3.77 in [Hobbs2006]_.\n\n This function is a wrapper for `brunt_vaisala_frequency_squared` that filters out negative\n (unstable) quantities and takes the square root.\n\n Parameters\n ----------\n height : `pint.Quantity`\n One-dimensional profile of atmospheric height\n potential_temperature : `pint.Quantity`\n Atmospheric potential temperature\n axis : int, optional\n The axis corresponding to vertical in the potential temperature array, defaults to 0.\n\n Returns\n -------\n `pint.Quantity`\n Brunt-Vaisala frequency.\n\n See Also\n --------\n brunt_vaisala_frequency_squared, brunt_vaisala_period, potential_temperature\n\n \"\"\"\n bv_freq_squared = brunt_vaisala_frequency_squared(height, potential_temperature,\n axis=axis)\n bv_freq_squared[bv_freq_squared.magnitude < 0] = np.nan\n\n return np.sqrt(bv_freq_squared)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[length]', '[temperature]')\ndef brunt_vaisala_period(height, potential_temperature, axis=0):\n r\"\"\"Calculate the Brunt-Vaisala period.\n\n This function is a helper function for `brunt_vaisala_frequency` that calculates the\n period of oscillation as in Exercise 3.13 of [Hobbs2006]_:\n\n .. math:: \\tau = \\frac{2\\pi}{N}\n\n Returns `NaN` when :math:`N^2 > 0`.\n\n Parameters\n ----------\n height : `pint.Quantity`\n One-dimensional profile of atmospheric height\n potential_temperature : pint.Quantity`\n Atmospheric potential temperature\n axis : int, optional\n The axis corresponding to vertical in the potential temperature array, defaults to 0.\n\n Returns\n -------\n `pint.Quantity`\n Brunt-Vaisala period.\n\n See Also\n --------\n brunt_vaisala_frequency, brunt_vaisala_frequency_squared, potential_temperature\n\n \"\"\"\n bv_freq_squared = brunt_vaisala_frequency_squared(height, potential_temperature,\n axis=axis)\n bv_freq_squared[bv_freq_squared.magnitude <= 0] = np.nan\n\n return 2 * np.pi / np.sqrt(bv_freq_squared)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef wet_bulb_temperature(pressure, temperature, dewpoint):\n \"\"\"Calculate the wet-bulb temperature using Normand's rule.\n\n This function calculates the wet-bulb temperature using the Normand method. The LCL is\n computed, and that parcel brought down to the starting pressure along a moist adiabat.\n The Normand method (and others) are described and compared by [Knox2017]_.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Initial atmospheric pressure\n temperature : `pint.Quantity`\n Initial atmospheric temperature\n dewpoint : `pint.Quantity`\n Initial atmospheric dewpoint\n\n Returns\n -------\n `pint.Quantity`\n Wet-bulb temperature\n\n See Also\n --------\n lcl, moist_lapse\n\n \"\"\"\n if not hasattr(pressure, 'shape'):\n pressure = np.atleast_1d(pressure)\n temperature = np.atleast_1d(temperature)\n dewpoint = np.atleast_1d(dewpoint)\n\n it = np.nditer([pressure, temperature, dewpoint, None],\n op_dtypes=['float', 'float', 'float', 'float'],\n flags=['buffered'])\n\n for press, temp, dewp, ret in it:\n press = press * pressure.units\n temp = temp * temperature.units\n dewp = dewp * dewpoint.units\n lcl_pressure, lcl_temperature = lcl(press, temp, dewp)\n moist_adiabat_temperatures = moist_lapse(concatenate([lcl_pressure, press]),\n lcl_temperature)\n ret[...] = moist_adiabat_temperatures[-1].magnitude\n\n # If we started with a scalar, return a scalar\n if it.operands[3].size == 1:\n return it.operands[3][0] * moist_adiabat_temperatures.units\n return it.operands[3] * moist_adiabat_temperatures.units\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]')\ndef static_stability(pressure, temperature, axis=0):\n r\"\"\"Calculate the static stability within a vertical profile.\n\n .. math:: \\sigma = -\\frac{RT}{p} \\frac{\\partial \\ln \\theta}{\\partial p}\n\n This formula is based on equation 4.3.6 in [Bluestein1992]_.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Profile of atmospheric pressure\n temperature : `pint.Quantity`\n Profile of temperature\n axis : int, optional\n The axis corresponding to vertical in the pressure and temperature arrays, defaults\n to 0.\n\n Returns\n -------\n `pint.Quantity`\n The profile of static stability.\n\n \"\"\"\n theta = potential_temperature(pressure, temperature)\n\n return - mpconsts.Rd * temperature / pressure * first_derivative(np.log(theta.m_as('K')),\n x=pressure, axis=axis)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[dimensionless]')\ndef dewpoint_from_specific_humidity(pressure, temperature, specific_humidity):\n r\"\"\"Calculate the dewpoint from specific humidity, temperature, and pressure.\n\n Parameters\n ----------\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n Air temperature\n specific_humidity: `pint.Quantity`\n Specific humidity of air\n\n Returns\n -------\n `pint.Quantity`\n Dew point temperature\n\n See Also\n --------\n relative_humidity_from_mixing_ratio, dewpoint_from_relative_humidity\n\n \"\"\"\n return dewpoint_from_relative_humidity(temperature,\n relative_humidity_from_specific_humidity(\n pressure, temperature, specific_humidity))\n\n\[email protected]\n@preprocess_xarray\n@check_units('[length]/[time]', '[pressure]', '[temperature]')\ndef vertical_velocity_pressure(w, pressure, temperature, mixing_ratio=0):\n r\"\"\"Calculate omega from w assuming hydrostatic conditions.\n\n This function converts vertical velocity with respect to height\n :math:`\\left(w = \\frac{Dz}{Dt}\\right)` to that\n with respect to pressure :math:`\\left(\\omega = \\frac{Dp}{Dt}\\right)`\n assuming hydrostatic conditions on the synoptic scale.\n By Equation 7.33 in [Hobbs2006]_,\n\n .. math:: \\omega \\simeq -\\rho g w\n\n Density (:math:`\\rho`) is calculated using the :func:`density` function,\n from the given pressure and temperature. If `mixing_ratio` is given, the virtual\n temperature correction is used, otherwise, dry air is assumed.\n\n Parameters\n ----------\n w: `pint.Quantity`\n Vertical velocity in terms of height\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n Air temperature\n mixing_ratio: `pint.Quantity`, optional\n Mixing_ratio ratio of air\n\n Returns\n -------\n `pint.Quantity`\n Vertical velocity in terms of pressure (in Pascals / second)\n\n See Also\n --------\n density, vertical_velocity\n\n \"\"\"\n rho = density(pressure, temperature, mixing_ratio)\n return (-mpconsts.g * rho * w).to('Pa/s')\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]/[time]', '[pressure]', '[temperature]')\ndef vertical_velocity(omega, pressure, temperature, mixing_ratio=0):\n r\"\"\"Calculate w from omega assuming hydrostatic conditions.\n\n This function converts vertical velocity with respect to pressure\n :math:`\\left(\\omega = \\frac{Dp}{Dt}\\right)` to that with respect to height\n :math:`\\left(w = \\frac{Dz}{Dt}\\right)` assuming hydrostatic conditions on\n the synoptic scale. By Equation 7.33 in [Hobbs2006]_,\n\n .. math:: \\omega \\simeq -\\rho g w\n\n so that\n\n .. math:: w \\simeq \\frac{- \\omega}{\\rho g}\n\n Density (:math:`\\rho`) is calculated using the :func:`density` function,\n from the given pressure and temperature. If `mixing_ratio` is given, the virtual\n temperature correction is used, otherwise, dry air is assumed.\n\n Parameters\n ----------\n omega: `pint.Quantity`\n Vertical velocity in terms of pressure\n pressure: `pint.Quantity`\n Total atmospheric pressure\n temperature: `pint.Quantity`\n Air temperature\n mixing_ratio: `pint.Quantity`, optional\n Mixing ratio of air\n\n Returns\n -------\n `pint.Quantity`\n Vertical velocity in terms of height (in meters / second)\n\n See Also\n --------\n density, vertical_velocity_pressure\n\n \"\"\"\n rho = density(pressure, temperature, mixing_ratio)\n return (omega / (- mpconsts.g * rho)).to('m/s')\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]')\ndef specific_humidity_from_dewpoint(pressure, dewpoint):\n r\"\"\"Calculate the specific humidity from the dewpoint temperature and pressure.\n\n Parameters\n ----------\n dewpoint: `pint.Quantity`\n dewpoint temperature\n\n pressure: `pint.Quantity`\n pressure\n\n Returns\n -------\n `pint.Quantity`\n Specific humidity\n\n See Also\n --------\n mixing_ratio, saturation_mixing_ratio\n\n \"\"\"\n mixing_ratio = saturation_mixing_ratio(pressure, dewpoint)\n return specific_humidity_from_mixing_ratio(mixing_ratio)\n\n\[email protected]\n@preprocess_xarray\n@check_units('[pressure]', '[temperature]', '[temperature]')\ndef lifted_index(pressure, temperature, parcel_profile):\n \"\"\"Calculate Lifted Index from the pressure temperature and parcel profile.\n\n Lifted index formula derived from [Galway1956]_ and referenced by [Doswell-Schultz2006]_:\n LI = T500 - Tp500\n where:\n T500 is the measured temperature at 500 hPa.\n Tp500 is the temperature of the lifted parcel at 500 hPa.\n\n Calculation of the lifted index is defined as the temperature difference between the\n observed 500 hPa temperature and the temperature of a parcel lifted from the\n surface to 500 hPa.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n The atmospheric pressure level(s) of interest, in order from highest to\n lowest pressure.\n temperature : `pint.Quantity`\n The atmospheric temperature corresponding to pressure.\n parcel_profile : `pint.Quantity`\n The temperature profile of the parcel.\n\n Returns\n -------\n `pint.Quantity`\n Lifted Index.\n\n \"\"\"\n # find the index for the 500 hPa pressure level.\n idx = np.where(pressure == 500 * units.hPa)\n # find the measured temperature at 500 hPa.\n T500 = temperature[idx]\n # find the parcel profile temperature at 500 hPa.\n Tp500 = parcel_profile[idx]\n # calculate the lifted index.\n lifted_index = T500 - Tp500.to(units.degC)\n return lifted_index\n\n\[email protected]\n@preprocess_xarray\n@check_units('[length]', '[temperature]', '[speed]', '[speed]')\ndef gradient_richardson_number(height, potential_temperature, u, v, axis=0):\n r\"\"\"Calculate the gradient (or flux) Richardson number.\n\n .. math:: Ri = (g/\\theta) * \\frac{\\left(\\partial \\theta/\\partial z\\)}\n {[\\left(\\partial u / \\partial z\\right)^2 + \\left(\\partial v / \\partial z\\right)^2}\n\n See [Holton2004]_ pg. 121-122. As noted by [Holton2004]_, flux Richardson\n number values below 0.25 indicate turbulence.\n\n Parameters\n ----------\n height : `pint.Quantity`\n Atmospheric height\n potential_temperature : `pint.Quantity`\n Atmospheric potential temperature\n u : `pint.Quantity`\n x component of the wind\n v : `pint.Quantity`\n y component of the wind\n axis : int, optional\n The axis corresponding to vertical, defaults to 0.\n\n Returns\n -------\n `pint.Quantity`\n Gradient Richardson number\n \"\"\"\n dthetadz = first_derivative(potential_temperature, x=height, axis=axis)\n dudz = first_derivative(u, x=height, axis=axis)\n dvdz = first_derivative(v, x=height, axis=axis)\n\n return (mpconsts.g / potential_temperature) * (dthetadz / (dudz ** 2 + dvdz ** 2))\n" ]
[ [ "numpy.sqrt", "numpy.nanmin", "numpy.concatenate", "numpy.max", "numpy.zeros_like", "numpy.any", "numpy.searchsorted", "numpy.exp", "numpy.where", "numpy.trapz", "numpy.swapaxes", "numpy.atleast_1d", "numpy.copy", "numpy.argmax", "numpy.insert", "scipy.optimize.fixed_point", "numpy.isclose", "numpy.log", "numpy.min", "numpy.isnan", "numpy.append", "numpy.argsort", "numpy.array", "numpy.nditer", "numpy.ediff1d", "numpy.broadcast_to", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
mrcagney/googlemaps_helpers
[ "75dfcc3e5e788d04c3af3e7608909b349ac83e8d" ]
[ "googlemaps_helpers/main.py" ]
[ "from itertools import product\nimport math\nfrom collections import OrderedDict\nfrom pathlib import Path\nimport logging\n\nimport pandas as pd\nimport numpy as np\nimport geopandas as gpd\nimport shapely.geometry as sg\nimport googlemaps\n\n\n# Configure logging\nlogger = logging.getLogger()\nhandler = logging.StreamHandler()\nformatter = logging.Formatter(\n '%(asctime)s %(name)-12s %(levelname)-8s \\n%(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\nlogger.setLevel(logging.INFO)\n\nWGS84 = {'init': 'epsg:4326'}\n# Maximum number of elements in a Google Maps Distance Matrix API query\nMAX_ELEMENTS = 100\n\ndef flip_coords(xy_list):\n \"\"\"\n Given a list of coordinate pairs, swap the first and second\n coordinates and return the resulting list.\n \"\"\"\n return [(y, x) for (x, y) in xy_list]\n\ndef make_ids(n, prefix='row_'):\n \"\"\"\n Return a list of ``n`` (integer) unique strings of the form\n ``prefix``<number>.\n \"\"\"\n k = int(math.log10(n)) + 1 # Number of digits for padding\n return [prefix + '{num:0{pad}d}'.format(num=i, pad=k) for i in range(n)]\n\ndef to_df(distance_matrix_response, origin_ids=None, destination_ids=None):\n \"\"\"\n Given a (decoded) JSON response to a Google Maps\n Distance Matrix API call, convert it into a DataFrame with the\n following columns.\n\n - ``'origin_address'``\n - ``'origin_id'``: ID of origin; defaults to an element of\n :func:`make_ids`\n - ``'destination_address'``\n - ``'destination_id'``: ID of destination; defaluts to an element of\n :func:`make_ids`\n - ``'duration'``: time from origin to destination; includes\n time in traffic if that's available in the response\n - ``'distance'``: distance from origin to destination\n\n The origin and destination addresses in the response can optionally\n be assigned IDs by setting ``origin_ids`` (list of strings) and\n ``destination_ids`` (list of strings).\n \"\"\"\n # Initialize\n r = distance_matrix_response\n columns = ['origin_address', 'destination_address', 'origin_id',\n 'destination_id', 'duration', 'distance']\n f = pd.DataFrame([], columns=columns)\n\n # Append addresses\n if not r['rows']:\n return f\n\n f['origin_address'], f['destination_address'] = zip(\n *product(r['origin_addresses'], r['destination_addresses']))\n\n # Append IDs\n if origin_ids is None:\n origin_ids = make_ids(len(r['origin_addresses']))\n\n if destination_ids is None:\n destination_ids = make_ids(len(r['destination_addresses']))\n\n f['origin_id'], f['destination_id'] = zip(\n *product(origin_ids, destination_ids))\n\n # Append durations and distances\n durs = []\n dists = []\n for row in r['rows']:\n for e in row['elements']:\n if e['status'] == 'OK':\n if 'duration_in_traffic' in e:\n dur_key = 'duration_in_traffic'\n else:\n dur_key = 'duration'\n durs.append(e[dur_key]['value'])\n dists.append(e['distance']['value'])\n else:\n durs.append(np.nan)\n dists.append(np.nan)\n f['duration'] = durs\n f['distance'] = dists\n\n return f\n\ndef point_df_to_gdf(f, x_col='lon', y_col='lat', from_crs=WGS84):\n \"\"\"\n Given a DataFrame of points with x coordinates\n in the column ``x_col`` and y coordinates in the column ``y_col``,\n with respect to the GeoPandas coordinate reference system\n ``from_crs`` (dictionary), convert the DataFrame into a GeoDataFrame\n with that coordinate reference system and with a ``'geometry'``\n column that corresponds to the points.\n Delete the original x and y columns, and return the result.\n \"\"\"\n f = f.copy()\n f['geometry'] = f[[x_col, y_col]].apply(lambda p: sg.Point(p), axis=1)\n f = f.drop([x_col, y_col], axis=1)\n f = gpd.GeoDataFrame(f)\n f.crs = from_crs\n return f\n\ndef point_gdf_to_df(f, x_col='lon', y_col='lat', to_crs=WGS84):\n \"\"\"\n The inverse of :func:`point_df_to_gdf`.\n Given a GeoDataFrame of points, convert to the coordinate reference\n system ``to_crs`` (dictionary), then split its ``'geometry'`` column\n into x coordinates in the column ``x_col`` and y coordinates in the\n columns ``y_col``, deleting the ``'geometry'`` column afterwards.\n Coerce the result into a DataFrame and return it.\n \"\"\"\n f = f.copy()\n if f.crs is None:\n raise ValueError('GeoDataFrame needs a crs attribute')\n if f.crs != to_crs:\n f = f.to_crs(to_crs)\n\n f[x_col], f[y_col] = zip(*f['geometry'].map(lambda p: p.coords[0]))\n del f['geometry']\n return pd.DataFrame(f)\n\ndef build_distance_matrix_df(client, origins_gdf, destinations_gdf,\n origin_id_col=None, destination_id_col=None,\n max_elements=MAX_ELEMENTS, **distance_matrix_kwargs):\n \"\"\"\n Compute the duration-distance matrix between the given origins\n and destinations, assuming that the number of origins multiplied\n by the number of destinations is at most ``max_elements``.\n To do this, call the Google Maps Distance Matrix API once.\n\n INPUT:\n\n - ``client``: google-maps-services-python Client instance\n - ``origins_gdf``: GeoDataFrame of point; the origins\n - ``destinations_gdf``: GeoDataFrame of points; the destinations\n - ``origin_id_col``: string; name of ID column in ``origins_gdf``\n - ``destination_id_col``: string; name of ID column in\n ``destinations_gdf``\n - ``max_elements``: integer; max number of elements allowable in\n one Google Maps Distance Matrix API call\n - ``distance_matrix_kwargs``: dictionary; keyword arguments for\n Google Maps Distance Matrix API\n\n OUTPUT:\n\n A DataFrame of the form output by :func:`to_df` where the origins\n come from ``origins_gdf`` and the destinations come from\n ``destinations_gdf``.\n\n Return an empty DataFrame with the expected column names if an\n HTTPError on Timeout exception occurs.\n \"\"\"\n # Initialize origin and destinations GeoDataFrames\n o_gdf = origins_gdf.copy()\n d_gdf = destinations_gdf.copy()\n\n n = o_gdf.shape[0]*d_gdf.shape[0]\n if n > max_elements:\n raise ValueError('Number of origins times number of destinations '\n 'is {}, which exceeds threshold of {} elements'.format(\n n, max_elements))\n\n # Prepare origin data\n if o_gdf.crs != WGS84:\n o_gdf = o_gdf.to_crs(WGS84)\n if origin_id_col is None:\n origin_id_col = 'temp_id'\n o_gdf[origin_id_col] = make_ids(o_gdf.shape[0])\n\n o_locs = [geo.coords[0] for geo in o_gdf['geometry']]\n o_ids = o_gdf[origin_id_col].values\n\n # Prepare destination data\n if d_gdf.crs != WGS84:\n d_gdf = d_gdf.to_crs(WGS84)\n if destination_id_col is None:\n destination_id_col = 'temp_id'\n d_gdf[destination_id_col] = make_ids(d_gdf.shape[0])\n\n d_locs = [geo.coords[0] for geo in d_gdf['geometry']]\n d_ids = d_gdf[destination_id_col].values\n\n # Get matrix info\n try:\n r = client.distance_matrix(flip_coords(o_locs),\n flip_coords(d_locs), **distance_matrix_kwargs)\n f = to_df(r, o_ids, d_ids)\n except (googlemaps.exceptions.HTTPError, googlemaps.exceptions.Timeout):\n # Empty DataFrame\n f = pd.DataFrame(columns=[\n 'origin_address',\n 'origin_id',\n 'destination_address',\n 'destination_id',\n 'duration',\n 'distance',\n ])\n\n return f\n\ndef run_distance_matrix_job(client, origins_gdf, destinations_gdf, out_dir,\n origin_id_col=None, destination_id_col=None,\n max_elements=MAX_ELEMENTS, **distance_matrix_kwargs):\n \"\"\"\n Compute the duration-distance matrix between the given origins\n and destinations.\n To do this, call the Google Maps Distance Matrix API repeatedly,\n ensuring that each call uses no more than ``max_elements`` elements.\n\n INPUT:\n\n - ``client``: google-maps-services-python Client instance\n - ``origins_gdf``: GeoDataFrame of points; the origins\n - ``destinations_gdf``: GeoDataFrame of points; the destinations\n - ``out_dir``: string or Path object of a directory at which\n to store the output files; create the directory if it does not\n exist\n - ``origin_id_col``: string; name of ID column in ``origins_gdf``\n - ``destination_id_col``: string; name of ID column in\n ``destinations_gdf``\n - ``max_elements``: integer; max number of elements allowable in\n one Google Maps Distance Matrix API call\n - ``distance_matrix_kwargs``: dictionary; keyword arguments for\n Google Maps Distance Matrix API\n\n OUTPUT:\n\n A collection of CSV files located at ``out_dir`` of the form output\n by :func:`to_df`, where the origins comes from ``origins_gdf`` and\n the destinations come from ``destinations_gdf``.\n Each file will contains one origin points and at most\n ``max_elements`` destination points, for a total of at most\n ``max_elements`` rows.\n An empty DataFrame with the expected column names will be saved to\n file if an HTTPError on Timeout exception occurs.\n This can happen if, for example, the daily query limit is exceeded.\n \"\"\"\n o_gdf = origins_gdf.copy()\n d_gdf = destinations_gdf.copy()\n\n n_o = o_gdf.shape[0]\n n_d = d_gdf.shape[0]\n\n # Create IDs if necessary\n if origin_id_col is None:\n origin_id_col = 'ersatz_origin_id'\n o_gdf[origin_id_col] = make_ids(n_o, 'orig_row_')\n\n if destination_id_col is None:\n destination_id_col = 'ersatz_destination_id'\n d_gdf[destination_id_col] = make_ids(n_d, 'dest_row_')\n\n # Get mode for logging\n mode = distance_matrix_kwargs.get('mode', 'driving')\n\n # Make output directory if it does not exist\n out_dir = Path(out_dir)\n if not out_dir.exists():\n out_dir.mkdir(parents=True)\n\n # Iterate through origins.\n # For each origin segment all destinations into chunks of size\n # at most ``max_elements``.\n # For each destination chunk, build a one-to-many matrix from the\n # origin to all the destinations in the chunk and save it to file.\n for ix, orig_id in o_gdf[[origin_id_col]].itertuples():\n logger.info('Working on origin {} of {} (id {})'.format(\n ix + 1, n_o, orig_id))\n\n # Chunk destinations and build one-to-many matrices from origin\n # to destination chunks.\n # A failed attempt (e.g. through API usage over limit)\n # will build an empty matrix\n for j in range(math.ceil(n_d/max_elements)):\n n1 = max_elements*j\n n2 = min(max_elements*(j + 1), n_d)\n dest_id1, dest_id2 = (\n d_gdf[destination_id_col].iat[n1],\n d_gdf[destination_id_col].iat[n2 - 1]\n )\n path = Path(out_dir)/'{}_from_{}_to_{}--{}.csv'.format(\n mode, orig_id, dest_id1, dest_id2)\n f = build_distance_matrix_df(client, o_gdf.loc[ix:ix],\n d_gdf.iloc[n1:n2],\n origin_id_col=origin_id_col,\n destination_id_col=destination_id_col,\n **distance_matrix_kwargs)\n f.to_csv(path, index=False)\n\n if f.empty:\n logger.info('* Failed to get data for ' + path.stem)\n\ndef compute_cost(n, cost=0.5/1000, num_freebies=0,\n daily_limit=100000, chunk_size=MAX_ELEMENTS):\n \"\"\"\n Estimate the cost of a sequence of Google Maps Distance Matrix\n queries comprising a total of n elements at ``cost`` USD per\n element, where the first ``num_freebies`` (integer) elements are\n free.\n Return a Series that includes the cost and some other metadata.\n \"\"\"\n d = OrderedDict()\n d['#elements'] = n\n d['exceeds {!s}-element daily limit?'.format(daily_limit)] = (\n n > daily_limit)\n d['estimated cost for job in USD'] = max(0, n - num_freebies)*cost\n d['estimated duration for job in minutes'] = n/chunk_size/60\n return pd.Series(d)\n" ]
[ [ "pandas.Series", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
quantumalaviya/keras
[ "8d874de12ed2e199d9528bfff891f4f60ee2a636", "8d874de12ed2e199d9528bfff891f4f60ee2a636" ]
[ "keras/saving/saved_model/layer_serialization.py", "keras/saving/hdf5_format.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Classes and functions implementing Layer SavedModel serialization.\"\"\"\n\nfrom keras.mixed_precision import policy\nfrom keras.saving.saved_model import base_serialization\nfrom keras.saving.saved_model import constants\nfrom keras.saving.saved_model import save_impl\nfrom keras.saving.saved_model import serialized_attributes\nfrom keras.utils import generic_utils\nimport tensorflow.compat.v2 as tf\n\n\nclass LayerSavedModelSaver(base_serialization.SavedModelSaver):\n \"\"\"Implements Layer SavedModel serialization.\"\"\"\n\n @property\n def object_identifier(self):\n return constants.LAYER_IDENTIFIER\n\n @property\n def python_properties(self):\n # TODO(kathywu): Add python property validator\n return self._python_properties_internal()\n\n def _python_properties_internal(self):\n \"\"\"Returns dictionary of all python properties.\"\"\"\n # TODO(kathywu): Add support for metrics serialization.\n # TODO(kathywu): Synchronize with the keras spec (go/keras-json-spec) once\n # the python config serialization has caught up.\n metadata = dict(\n name=self.obj.name,\n trainable=self.obj.trainable,\n expects_training_arg=self.obj._expects_training_arg, # pylint: disable=protected-access\n dtype=policy.serialize(self.obj._dtype_policy), # pylint: disable=protected-access\n batch_input_shape=getattr(self.obj, '_batch_input_shape', None),\n stateful=self.obj.stateful,\n must_restore_from_config=self.obj._must_restore_from_config, # pylint: disable=protected-access\n )\n\n metadata.update(get_serialized(self.obj))\n if self.obj.input_spec is not None:\n # Layer's input_spec has already been type-checked in the property setter.\n metadata['input_spec'] = tf.nest.map_structure(\n lambda x: generic_utils.serialize_keras_object(x) if x else None,\n self.obj.input_spec)\n if (self.obj.activity_regularizer is not None and\n hasattr(self.obj.activity_regularizer, 'get_config')):\n metadata['activity_regularizer'] = generic_utils.serialize_keras_object(\n self.obj.activity_regularizer)\n if self.obj._build_input_shape is not None: # pylint: disable=protected-access\n metadata['build_input_shape'] = self.obj._build_input_shape # pylint: disable=protected-access\n return metadata\n\n def objects_to_serialize(self, serialization_cache):\n return (self._get_serialized_attributes(\n serialization_cache).objects_to_serialize)\n\n def functions_to_serialize(self, serialization_cache):\n return (self._get_serialized_attributes(\n serialization_cache).functions_to_serialize)\n\n def _get_serialized_attributes(self, serialization_cache):\n \"\"\"Generates or retrieves serialized attributes from cache.\"\"\"\n keras_cache = serialization_cache.setdefault(constants.KERAS_CACHE_KEY, {})\n if self.obj in keras_cache:\n return keras_cache[self.obj]\n\n serialized_attr = keras_cache[self.obj] = (\n serialized_attributes.SerializedAttributes.new(self.obj))\n\n if (save_impl.should_skip_serialization(self.obj) or\n self.obj._must_restore_from_config): # pylint: disable=protected-access\n return serialized_attr\n\n object_dict, function_dict = self._get_serialized_attributes_internal(\n serialization_cache)\n\n serialized_attr.set_and_validate_objects(object_dict)\n serialized_attr.set_and_validate_functions(function_dict)\n return serialized_attr\n\n def _get_serialized_attributes_internal(self, serialization_cache):\n \"\"\"Returns dictionary of serialized attributes.\"\"\"\n objects = save_impl.wrap_layer_objects(self.obj, serialization_cache)\n functions = save_impl.wrap_layer_functions(self.obj, serialization_cache)\n # Attribute validator requires that the default save signature is added to\n # function dict, even if the value is None.\n functions['_default_save_signature'] = None\n return objects, functions\n\n\n# TODO(kathywu): Move serialization utils (and related utils from\n# generic_utils.py) to a separate file.\ndef get_serialized(obj):\n with generic_utils.skip_failed_serialization():\n # Store the config dictionary, which may be used when reviving the object.\n # When loading, the program will attempt to revive the object from config,\n # and if that fails, the object will be revived from the SavedModel.\n return generic_utils.serialize_keras_object(obj)\n\n\nclass InputLayerSavedModelSaver(base_serialization.SavedModelSaver):\n \"\"\"InputLayer serialization.\"\"\"\n\n @property\n def object_identifier(self):\n return constants.INPUT_LAYER_IDENTIFIER\n\n @property\n def python_properties(self):\n\n return dict(\n class_name=type(self.obj).__name__,\n name=self.obj.name,\n dtype=self.obj.dtype,\n sparse=self.obj.sparse,\n ragged=self.obj.ragged,\n batch_input_shape=self.obj._batch_input_shape, # pylint: disable=protected-access\n config=self.obj.get_config())\n\n def objects_to_serialize(self, serialization_cache):\n return {}\n\n def functions_to_serialize(self, serialization_cache):\n return {}\n\n\nclass RNNSavedModelSaver(LayerSavedModelSaver):\n \"\"\"RNN layer serialization.\"\"\"\n\n @property\n def object_identifier(self):\n return constants.RNN_LAYER_IDENTIFIER\n\n def _get_serialized_attributes_internal(self, serialization_cache):\n objects, functions = (\n super(RNNSavedModelSaver, self)._get_serialized_attributes_internal(\n serialization_cache))\n states = tf.__internal__.tracking.wrap(self.obj.states)\n # SaveModel require all the objects to be Trackable when saving.\n # If the states is still a tuple after wrap_or_unwrap, it means it doesn't\n # contain any trackable item within it, eg empty tuple or (None, None) for\n # stateless ConvLSTM2D. We convert them to list so that wrap_or_unwrap can\n # make it a Trackable again for saving. When loaded, ConvLSTM2D is\n # able to handle the tuple/list conversion.\n if isinstance(states, tuple):\n states = tf.__internal__.tracking.wrap(list(states))\n objects['states'] = states\n return objects, functions\n\n\nclass VocabularySavedModelSaver(LayerSavedModelSaver):\n \"\"\"Handles vocabulary layer serialization.\n\n This class is needed for StringLookup, IntegerLookup, and TextVectorization,\n which all have a vocabulary as part of the config. Currently, we keep this\n vocab as part of the config until saving, when we need to clear it to avoid\n initializing a StaticHashTable twice (once when restoring the config and once\n when restoring restoring module resources). After clearing the vocab, we\n presist a property to the layer indicating it was constructed with a vocab.\n \"\"\"\n\n @property\n def python_properties(self):\n # TODO(kathywu): Add python property validator\n metadata = self._python_properties_internal()\n # Clear the vocabulary from the config during saving.\n metadata['config']['vocabulary'] = None\n # Persist a property to track that a vocabulary was passed on construction.\n metadata['config']['has_input_vocabulary'] = self.obj._has_input_vocabulary # pylint: disable=protected-access\n return metadata\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"Functions for saving and loading a Keras Model from HDF5 format.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport json\nimport os\n\nimport numpy as np\n\nfrom keras import backend\nfrom keras import optimizer_v1\nfrom keras.saving import model_config as model_config_lib\nfrom keras.saving import saving_utils\nfrom keras.saving.saved_model import json_utils\nfrom keras.utils.generic_utils import LazyLoader\nfrom keras.utils.io_utils import ask_to_proceed_with_overwrite\nfrom tensorflow.python.platform import tf_logging as logging\n\n\n# pylint: disable=g-import-not-at-top\ntry:\n import h5py\n HDF5_OBJECT_HEADER_LIMIT = 64512\nexcept ImportError:\n h5py = None\n# pylint: enable=g-import-not-at-top\n\n# TODO(b/134426265): Switch back to single-quotes to match the rest of the file\n# once the issue with copybara is fixed.\n# pylint:disable=g-inconsistent-quotes\nsequential_lib = LazyLoader(\n \"sequential_lib\", globals(),\n \"keras.engine.sequential\")\n# pylint:enable=g-inconsistent-quotes\n\n\ndef save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True):\n \"\"\"Saves a model to a HDF5 file.\n\n The saved model contains:\n - the model's configuration (topology)\n - the model's weights\n - the model's optimizer's state (if any)\n\n Thus the saved model can be reinstantiated in\n the exact same state, without any of the code\n used for model definition or training.\n\n Args:\n model: Keras model instance to be saved.\n filepath: One of the following:\n - String, path where to save the model\n - `h5py.File` object where to save the model\n overwrite: Whether we should overwrite any existing\n model at the target location, or instead\n ask the user with a manual prompt.\n include_optimizer: If True, save optimizer's state together.\n\n Raises:\n ImportError: if h5py is not available.\n \"\"\"\n\n if h5py is None:\n raise ImportError('`save_model()` using h5 format requires h5py. Could not '\n 'import h5py.')\n\n # TODO(psv) Add warning when we save models that contain non-serializable\n # entities like metrics added using `add_metric` and losses added using\n # `add_loss.`\n if len(model.weights) != len(model._undeduplicated_weights):\n logging.warning('Found duplicated `Variable`s in Model\\'s `weights`. '\n 'This is usually caused by `Variable`s being shared by '\n 'Layers in the Model. These `Variable`s will be treated '\n 'as separate `Variable`s when the Model is restored. To '\n 'avoid this, please save with `save_format=\"tf\"`.')\n\n if not isinstance(filepath, h5py.File):\n # If file exists and should not be overwritten.\n if not overwrite and os.path.isfile(filepath):\n proceed = ask_to_proceed_with_overwrite(filepath)\n if not proceed:\n return\n\n # Try creating dir if not exist\n dirpath = os.path.dirname(filepath)\n if not os.path.exists(dirpath):\n tf.io.gfile.makedirs(dirpath)\n\n f = h5py.File(filepath, mode='w')\n opened_new_file = True\n else:\n f = filepath\n opened_new_file = False\n\n try:\n model_metadata = saving_utils.model_metadata(model, include_optimizer)\n for k, v in model_metadata.items():\n if isinstance(v, (dict, list, tuple)):\n f.attrs[k] = json.dumps(\n v, default=json_utils.get_json_type).encode('utf8')\n else:\n f.attrs[k] = v\n\n model_weights_group = f.create_group('model_weights')\n save_weights_to_hdf5_group(model_weights_group, model)\n\n # TODO(b/128683857): Add integration tests between tf.keras and external\n # Keras, to avoid breaking TF.js users.\n if (include_optimizer and model.optimizer and\n not isinstance(model.optimizer, optimizer_v1.TFOptimizer)):\n save_optimizer_weights_to_hdf5_group(f, model.optimizer)\n\n f.flush()\n finally:\n if opened_new_file:\n f.close()\n\n\ndef load_model_from_hdf5(filepath, custom_objects=None, compile=True): # pylint: disable=redefined-builtin\n \"\"\"Loads a model saved via `save_model_to_hdf5`.\n\n Args:\n filepath: One of the following:\n - String, path to the saved model\n - `h5py.File` object from which to load the model\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n compile: Boolean, whether to compile the model\n after loading.\n\n Returns:\n A Keras model instance. If an optimizer was found\n as part of the saved model, the model is already\n compiled. Otherwise, the model is uncompiled and\n a warning will be displayed. When `compile` is set\n to False, the compilation is omitted without any\n warning.\n\n Raises:\n ImportError: if h5py is not available.\n ValueError: In case of an invalid savefile.\n \"\"\"\n if h5py is None:\n raise ImportError('`load_model()` using h5 format requires h5py. Could not '\n 'import h5py.')\n\n if not custom_objects:\n custom_objects = {}\n\n opened_new_file = not isinstance(filepath, h5py.File)\n if opened_new_file:\n f = h5py.File(filepath, mode='r')\n else:\n f = filepath\n\n model = None\n try:\n # instantiate model\n model_config = f.attrs.get('model_config')\n if model_config is None:\n raise ValueError(f'No model config found in the file at {filepath}.')\n if hasattr(model_config, 'decode'):\n model_config = model_config.decode('utf-8')\n model_config = json_utils.decode(model_config)\n model = model_config_lib.model_from_config(model_config,\n custom_objects=custom_objects)\n\n # set weights\n load_weights_from_hdf5_group(f['model_weights'], model)\n\n if compile:\n # instantiate optimizer\n training_config = f.attrs.get('training_config')\n if hasattr(training_config, 'decode'):\n training_config = training_config.decode('utf-8')\n if training_config is None:\n logging.warning('No training configuration found in the save file, so '\n 'the model was *not* compiled. Compile it manually.')\n return model\n training_config = json_utils.decode(training_config)\n\n # Compile model.\n model.compile(**saving_utils.compile_args_from_training_config(\n training_config, custom_objects), from_serialized=True)\n saving_utils.try_build_compiled_arguments(model)\n\n # Set optimizer weights.\n if 'optimizer_weights' in f:\n try:\n model.optimizer._create_all_weights(model.trainable_variables)\n except (NotImplementedError, AttributeError):\n logging.warning(\n 'Error when creating the weights of optimizer {}, making it '\n 'impossible to restore the saved optimizer state. As a result, '\n 'your model is starting with a freshly initialized optimizer.')\n\n optimizer_weight_values = load_optimizer_weights_from_hdf5_group(f)\n try:\n model.optimizer.set_weights(optimizer_weight_values)\n except ValueError:\n logging.warning('Error in loading the saved optimizer '\n 'state. As a result, your model is '\n 'starting with a freshly initialized '\n 'optimizer.')\n finally:\n if opened_new_file:\n f.close()\n return model\n\n\ndef preprocess_weights_for_loading(layer,\n weights,\n original_keras_version=None,\n original_backend=None):\n \"\"\"Preprocess layer weights between different Keras formats.\n\n Converts layers weights from Keras 1 format to Keras 2 and also weights of\n cuDNN layers in Keras 2.\n\n Args:\n layer: Layer instance.\n weights: List of weights values (Numpy arrays).\n original_keras_version: Keras version for the weights, as a string.\n original_backend: Keras backend the weights were trained with,\n as a string.\n\n Returns:\n A list of weights values (Numpy arrays).\n \"\"\"\n def convert_nested_bidirectional(weights):\n \"\"\"Converts layers nested in `Bidirectional` wrapper.\n\n This function uses `preprocess_weights_for_loading()` for converting\n layers.\n\n Args:\n weights: List of weights values (Numpy arrays).\n\n Returns:\n A list of weights values (Numpy arrays).\n \"\"\"\n num_weights_per_layer = len(weights) // 2\n forward_weights = preprocess_weights_for_loading(\n layer.forward_layer, weights[:num_weights_per_layer],\n original_keras_version, original_backend)\n backward_weights = preprocess_weights_for_loading(\n layer.backward_layer, weights[num_weights_per_layer:],\n original_keras_version, original_backend)\n return forward_weights + backward_weights\n\n def convert_nested_time_distributed(weights):\n \"\"\"Converts layers nested in `TimeDistributed` wrapper.\n\n This function uses `preprocess_weights_for_loading()` for converting nested\n layers.\n\n Args:\n weights: List of weights values (Numpy arrays).\n\n Returns:\n A list of weights values (Numpy arrays).\n \"\"\"\n return preprocess_weights_for_loading(\n layer.layer, weights, original_keras_version, original_backend)\n\n def convert_nested_model(weights):\n \"\"\"Converts layers nested in `Model` or `Sequential`.\n\n This function uses `preprocess_weights_for_loading()` for converting nested\n layers.\n\n Args:\n weights: List of weights values (Numpy arrays).\n\n Returns:\n A list of weights values (Numpy arrays).\n \"\"\"\n trainable_weights = weights[:len(layer.trainable_weights)]\n non_trainable_weights = weights[len(layer.trainable_weights):]\n\n new_trainable_weights = []\n new_non_trainable_weights = []\n\n for sublayer in layer.layers:\n num_trainable_weights = len(sublayer.trainable_weights)\n num_non_trainable_weights = len(sublayer.non_trainable_weights)\n if sublayer.weights:\n preprocessed = preprocess_weights_for_loading(\n layer=sublayer,\n weights=(trainable_weights[:num_trainable_weights] +\n non_trainable_weights[:num_non_trainable_weights]),\n original_keras_version=original_keras_version,\n original_backend=original_backend)\n new_trainable_weights.extend(preprocessed[:num_trainable_weights])\n new_non_trainable_weights.extend(preprocessed[num_trainable_weights:])\n\n trainable_weights = trainable_weights[num_trainable_weights:]\n non_trainable_weights = non_trainable_weights[\n num_non_trainable_weights:]\n new_trainable_weights += layer._trainable_weights\n new_non_trainable_weights += layer._non_trainable_weights\n return new_trainable_weights + new_non_trainable_weights\n\n # Convert layers nested in Bidirectional/Model/Sequential.\n # Both transformation should be ran for both Keras 1->2 conversion\n # and for conversion of cuDNN layers.\n if layer.__class__.__name__ == 'Bidirectional':\n weights = convert_nested_bidirectional(weights)\n if layer.__class__.__name__ == 'TimeDistributed':\n weights = convert_nested_time_distributed(weights)\n elif layer.__class__.__name__ in ['Model', 'Sequential', 'Functional']:\n weights = convert_nested_model(weights)\n\n if original_keras_version == '1':\n if layer.__class__.__name__ == 'TimeDistributed':\n weights = preprocess_weights_for_loading(\n layer.layer, weights, original_keras_version, original_backend)\n\n if layer.__class__.__name__ == 'Conv1D':\n shape = weights[0].shape\n # Handle Keras 1.1 format\n if shape[:2] != (layer.kernel_size[0], 1) or shape[3] != layer.filters:\n # Legacy shape:\n # (filters, input_dim, filter_length, 1)\n assert shape[0] == layer.filters and shape[2:] == (layer.kernel_size[0],\n 1)\n weights[0] = np.transpose(weights[0], (2, 3, 1, 0))\n weights[0] = weights[0][:, 0, :, :]\n\n if layer.__class__.__name__ == 'Conv2D':\n if layer.data_format == 'channels_first':\n # old: (filters, stack_size, kernel_rows, kernel_cols)\n # new: (kernel_rows, kernel_cols, stack_size, filters)\n weights[0] = np.transpose(weights[0], (2, 3, 1, 0))\n\n if layer.__class__.__name__ == 'Conv2DTranspose':\n if layer.data_format == 'channels_last':\n # old: (kernel_rows, kernel_cols, stack_size, filters)\n # new: (kernel_rows, kernel_cols, filters, stack_size)\n weights[0] = np.transpose(weights[0], (0, 1, 3, 2))\n if layer.data_format == 'channels_first':\n # old: (filters, stack_size, kernel_rows, kernel_cols)\n # new: (kernel_rows, kernel_cols, filters, stack_size)\n weights[0] = np.transpose(weights[0], (2, 3, 0, 1))\n\n if layer.__class__.__name__ == 'Conv3D':\n if layer.data_format == 'channels_first':\n # old: (filters, stack_size, ...)\n # new: (..., stack_size, filters)\n weights[0] = np.transpose(weights[0], (2, 3, 4, 1, 0))\n\n if layer.__class__.__name__ == 'GRU':\n if len(weights) == 9:\n kernel = np.concatenate([weights[0], weights[3], weights[6]], axis=-1)\n recurrent_kernel = np.concatenate(\n [weights[1], weights[4], weights[7]], axis=-1)\n bias = np.concatenate([weights[2], weights[5], weights[8]], axis=-1)\n weights = [kernel, recurrent_kernel, bias]\n\n if layer.__class__.__name__ == 'LSTM':\n if len(weights) == 12:\n # old: i, c, f, o\n # new: i, f, c, o\n kernel = np.concatenate(\n [weights[0], weights[6], weights[3], weights[9]], axis=-1)\n recurrent_kernel = np.concatenate(\n [weights[1], weights[7], weights[4], weights[10]], axis=-1)\n bias = np.concatenate(\n [weights[2], weights[8], weights[5], weights[11]], axis=-1)\n weights = [kernel, recurrent_kernel, bias]\n\n if layer.__class__.__name__ == 'ConvLSTM2D':\n if len(weights) == 12:\n kernel = np.concatenate(\n [weights[0], weights[6], weights[3], weights[9]], axis=-1)\n recurrent_kernel = np.concatenate(\n [weights[1], weights[7], weights[4], weights[10]], axis=-1)\n bias = np.concatenate(\n [weights[2], weights[8], weights[5], weights[11]], axis=-1)\n if layer.data_format == 'channels_first':\n # old: (filters, stack_size, kernel_rows, kernel_cols)\n # new: (kernel_rows, kernel_cols, stack_size, filters)\n kernel = np.transpose(kernel, (2, 3, 1, 0))\n recurrent_kernel = np.transpose(recurrent_kernel, (2, 3, 1, 0))\n weights = [kernel, recurrent_kernel, bias]\n\n conv_layers = ['Conv1D', 'Conv2D', 'Conv3D', 'Conv2DTranspose', 'ConvLSTM2D']\n if layer.__class__.__name__ in conv_layers:\n if backend.int_shape(layer.weights[0]) != weights[0].shape:\n weights[0] = np.transpose(weights[0], (3, 2, 0, 1))\n if layer.__class__.__name__ == 'ConvLSTM2D':\n weights[1] = np.transpose(weights[1], (3, 2, 0, 1))\n\n # convert cuDNN layers\n return _convert_rnn_weights(layer, weights)\n\n\ndef _convert_rnn_weights(layer, weights):\n \"\"\"Converts weights for RNN layers between native and cuDNN format.\n\n Input kernels for each gate are transposed and converted between Fortran\n and C layout, recurrent kernels are transposed. For LSTM biases are summed/\n split in half, for GRU biases are reshaped.\n\n Weights can be converted in both directions between `LSTM` and`CuDNNSLTM`\n and between `CuDNNGRU` and `GRU(reset_after=True)`. Default `GRU` is not\n compatible with `CuDNNGRU`.\n\n For missing biases in `LSTM`/`GRU` (`use_bias=False`) no conversion is made.\n\n Args:\n layer: Target layer instance.\n weights: List of source weights values (input kernels, recurrent kernels,\n [biases]) (Numpy arrays).\n\n Returns:\n A list of converted weights values (Numpy arrays).\n\n Raises:\n ValueError: for incompatible GRU layer/weights or incompatible biases\n \"\"\"\n\n def transform_kernels(kernels, func, n_gates):\n \"\"\"Transforms kernel for each gate separately using given function.\n\n Args:\n kernels: Stacked array of kernels for individual gates.\n func: Function applied to kernel of each gate.\n n_gates: Number of gates (4 for LSTM, 3 for GRU).\n\n Returns:\n Stacked array of transformed kernels.\n \"\"\"\n return np.hstack([func(k) for k in np.hsplit(kernels, n_gates)])\n\n def transpose_input(from_cudnn):\n \"\"\"Makes a function that transforms input kernels from/to cuDNN format.\n\n It keeps the shape, but changes between the layout (Fortran/C). Eg.:\n\n ```\n Keras cuDNN\n [[0, 1, 2], <---> [[0, 2, 4],\n [3, 4, 5]] [1, 3, 5]]\n ```\n\n It can be passed to `transform_kernels()`.\n\n Args:\n from_cudnn: `True` if source weights are in cuDNN format, `False` if\n they're in plain Keras format.\n\n Returns:\n Function that converts input kernel to the other format.\n \"\"\"\n order = 'F' if from_cudnn else 'C'\n\n def transform(kernel):\n return kernel.T.reshape(kernel.shape, order=order)\n\n return transform\n\n target_class = layer.__class__.__name__\n\n # convert the weights between CuDNNLSTM and LSTM\n if target_class in ['LSTM', 'CuDNNLSTM'] and len(weights) == 3:\n # determine if we're loading a CuDNNLSTM layer\n # from the number of bias weights:\n # CuDNNLSTM has (units * 8) weights; while LSTM has (units * 4)\n # if there's no bias weight in the file, skip this conversion\n units = weights[1].shape[0]\n bias_shape = weights[2].shape\n n_gates = 4\n\n if bias_shape == (2 * units * n_gates,):\n source = 'CuDNNLSTM'\n elif bias_shape == (units * n_gates,):\n source = 'LSTM'\n else:\n raise ValueError('Invalid bias shape: ' + str(bias_shape))\n\n def convert_lstm_weights(weights, from_cudnn=True):\n \"\"\"Converts the weights between CuDNNLSTM and LSTM.\n\n Args:\n weights: Original weights.\n from_cudnn: Indicates whether original weights are from cuDNN layer.\n\n Returns:\n Updated weights compatible with LSTM.\n \"\"\"\n\n # Transpose (and reshape) input and recurrent kernels\n kernels = transform_kernels(weights[0], transpose_input(from_cudnn),\n n_gates)\n recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates)\n if from_cudnn:\n # merge input and recurrent biases into a single set\n biases = np.sum(np.split(weights[2], 2, axis=0), axis=0)\n else:\n # Split single set of biases evenly to two sets. The way of\n # splitting doesn't matter as long as the two sets sum is kept.\n biases = np.tile(0.5 * weights[2], 2)\n return [kernels, recurrent_kernels, biases]\n\n if source != target_class:\n weights = convert_lstm_weights(weights, from_cudnn=source == 'CuDNNLSTM')\n\n # convert the weights between CuDNNGRU and GRU(reset_after=True)\n if target_class in ['GRU', 'CuDNNGRU'] and len(weights) == 3:\n # We can determine the source of the weights from the shape of the bias.\n # If there is no bias we skip the conversion since\n # CuDNNGRU always has biases.\n\n units = weights[1].shape[0]\n bias_shape = weights[2].shape\n n_gates = 3\n\n def convert_gru_weights(weights, from_cudnn=True):\n \"\"\"Converts the weights between CuDNNGRU and GRU.\n\n Args:\n weights: Original weights.\n from_cudnn: Indicates whether original weights are from cuDNN layer.\n\n Returns:\n Updated weights compatible with GRU.\n \"\"\"\n\n kernels = transform_kernels(weights[0], transpose_input(from_cudnn),\n n_gates)\n recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates)\n biases = np.array(weights[2]).reshape((2, -1) if from_cudnn else -1)\n return [kernels, recurrent_kernels, biases]\n\n if bias_shape == (2 * units * n_gates,):\n source = 'CuDNNGRU'\n elif bias_shape == (2, units * n_gates):\n source = 'GRU(reset_after=True)'\n elif bias_shape == (units * n_gates,):\n source = 'GRU(reset_after=False)'\n else:\n raise ValueError('Invalid bias shape: ' + str(bias_shape))\n\n if target_class == 'CuDNNGRU':\n target = 'CuDNNGRU'\n elif layer.reset_after:\n target = 'GRU(reset_after=True)'\n else:\n target = 'GRU(reset_after=False)'\n\n # only convert between different types\n if source != target:\n types = (source, target)\n if 'GRU(reset_after=False)' in types:\n raise ValueError('%s is not compatible with %s' % types)\n if source == 'CuDNNGRU':\n weights = convert_gru_weights(weights, from_cudnn=True)\n elif source == 'GRU(reset_after=True)':\n weights = convert_gru_weights(weights, from_cudnn=False)\n\n return weights\n\n\ndef save_optimizer_weights_to_hdf5_group(hdf5_group, optimizer):\n \"\"\"Saves optimizer weights of a optimizer to a HDF5 group.\n\n Args:\n hdf5_group: HDF5 group.\n optimizer: optimizer instance.\n \"\"\"\n\n symbolic_weights = getattr(optimizer, 'weights')\n if symbolic_weights:\n weights_group = hdf5_group.create_group('optimizer_weights')\n weight_names = [str(w.name).encode('utf8') for w in symbolic_weights]\n save_attributes_to_hdf5_group(weights_group, 'weight_names', weight_names)\n weight_values = backend.batch_get_value(symbolic_weights)\n for name, val in zip(weight_names, weight_values):\n param_dset = weights_group.create_dataset(\n name, val.shape, dtype=val.dtype)\n if not val.shape:\n # scalar\n param_dset[()] = val\n else:\n param_dset[:] = val\n\n\ndef load_optimizer_weights_from_hdf5_group(hdf5_group):\n \"\"\"Load optimizer weights from a HDF5 group.\n\n Args:\n hdf5_group: A pointer to a HDF5 group.\n\n Returns:\n data: List of optimizer weight names.\n \"\"\"\n weights_group = hdf5_group['optimizer_weights']\n optimizer_weight_names = load_attributes_from_hdf5_group(\n weights_group, 'weight_names')\n return [weights_group[weight_name] for weight_name in optimizer_weight_names]\n\n\ndef save_subset_weights_to_hdf5_group(f, weights):\n \"\"\"Save top-level weights of a model to a HDF5 group.\n\n Args:\n f: HDF5 group.\n weights: List of weight variables.\n \"\"\"\n weight_values = backend.batch_get_value(weights)\n weight_names = [w.name.encode('utf8') for w in weights]\n save_attributes_to_hdf5_group(f, 'weight_names', weight_names)\n for name, val in zip(weight_names, weight_values):\n param_dset = f.create_dataset(name, val.shape, dtype=val.dtype)\n if not val.shape:\n # scalar\n param_dset[()] = val\n else:\n param_dset[:] = val\n\n\ndef save_weights_to_hdf5_group(f, model):\n \"\"\"Saves the weights of a list of layers to a HDF5 group.\n\n Args:\n f: HDF5 group.\n model: Model instance.\n \"\"\"\n from keras import __version__ as keras_version # pylint: disable=g-import-not-at-top\n save_attributes_to_hdf5_group(\n f, 'layer_names', [layer.name.encode('utf8') for layer in model.layers])\n f.attrs['backend'] = backend.backend().encode('utf8')\n f.attrs['keras_version'] = str(keras_version).encode('utf8')\n\n # Sort model layers by layer name to ensure that group names are strictly\n # growing to avoid prefix issues.\n for layer in sorted(model.layers, key=lambda x: x.name):\n g = f.create_group(layer.name)\n weights = _legacy_weights(layer)\n save_subset_weights_to_hdf5_group(g, weights)\n weights = model._trainable_weights + model._non_trainable_weights\n g = f.create_group('top_level_model_weights')\n save_subset_weights_to_hdf5_group(g, weights)\n\n\ndef load_subset_weights_from_hdf5_group(f):\n \"\"\"Load layer weights of a model from hdf5.\n\n Args:\n f: A pointer to a HDF5 group.\n\n Returns:\n List of NumPy arrays of the weight values.\n\n Raises:\n ValueError: in case of mismatch between provided model\n and weights file.\n \"\"\"\n weight_names = load_attributes_from_hdf5_group(f, 'weight_names')\n return [np.asarray(f[weight_name]) for weight_name in weight_names]\n\n\ndef load_weights_from_hdf5_group(f, model):\n \"\"\"Implements topological (order-based) weight loading.\n\n Args:\n f: A pointer to a HDF5 group.\n model: Model instance.\n\n Raises:\n ValueError: in case of mismatch between provided layers\n and weights file.\n \"\"\"\n if 'keras_version' in f.attrs:\n original_keras_version = f.attrs['keras_version']\n if hasattr(original_keras_version, 'decode'):\n original_keras_version = original_keras_version.decode('utf8')\n else:\n original_keras_version = '1'\n if 'backend' in f.attrs:\n original_backend = f.attrs['backend']\n if hasattr(original_backend, 'decode'):\n original_backend = original_backend.decode('utf8')\n else:\n original_backend = None\n\n filtered_layers = []\n for layer in model.layers:\n weights = _legacy_weights(layer)\n if weights:\n filtered_layers.append(layer)\n\n layer_names = load_attributes_from_hdf5_group(f, 'layer_names')\n filtered_layer_names = []\n for name in layer_names:\n g = f[name]\n weight_names = load_attributes_from_hdf5_group(g, 'weight_names')\n if weight_names:\n filtered_layer_names.append(name)\n layer_names = filtered_layer_names\n if len(layer_names) != len(filtered_layers):\n raise ValueError(\n f'Layer count mismatch when loading weights from file. '\n f'Model expected {len(filtered_layers)} layers, found '\n f'{len(layer_names)} saved layers.')\n\n # We batch weight value assignments in a single backend call\n # which provides a speedup in TensorFlow.\n weight_value_tuples = []\n for k, name in enumerate(layer_names):\n g = f[name]\n layer = filtered_layers[k]\n symbolic_weights = _legacy_weights(layer)\n weight_values = load_subset_weights_from_hdf5_group(g)\n weight_values = preprocess_weights_for_loading(layer, weight_values,\n original_keras_version,\n original_backend)\n if len(weight_values) != len(symbolic_weights):\n raise ValueError(\n f'Weight count mismatch for layer #{k} (named {layer.name} in the '\n f'current model, {name} in the save file). '\n f'Layer expects {len(symbolic_weights)} weight(s). Received '\n f'{len(weight_values)} saved weight(s)')\n weight_value_tuples += zip(symbolic_weights, weight_values)\n\n if 'top_level_model_weights' in f:\n symbolic_weights = model._trainable_weights + model._non_trainable_weights\n weight_values = load_subset_weights_from_hdf5_group(\n f['top_level_model_weights'])\n if len(weight_values) != len(symbolic_weights):\n raise ValueError(\n f'Weight count mismatch for top-level weights when loading weights '\n f'from file. '\n f'Model expects {len(symbolic_weights)} top-level weight(s). '\n f'Received {len(weight_values)} saved top-level weight(s)')\n weight_value_tuples += zip(symbolic_weights, weight_values)\n backend.batch_set_value(weight_value_tuples)\n\n\ndef load_weights_from_hdf5_group_by_name(f, model, skip_mismatch=False):\n \"\"\"Implements name-based weight loading (instead of topological loading).\n\n Layers that have no matching name are skipped.\n\n Args:\n f: A pointer to a HDF5 group.\n model: Model instance.\n skip_mismatch: Boolean, whether to skip loading of layers\n where there is a mismatch in the number of weights,\n or a mismatch in the shape of the weights.\n\n Raises:\n ValueError: in case of mismatch between provided layers\n and weights file and skip_match=False.\n \"\"\"\n if 'keras_version' in f.attrs:\n original_keras_version = f.attrs['keras_version']\n if hasattr(original_keras_version, 'decode'):\n original_keras_version = original_keras_version.decode('utf8')\n else:\n original_keras_version = '1'\n if 'backend' in f.attrs:\n original_backend = f.attrs['backend']\n if hasattr(original_backend, 'decode'):\n original_backend = original_backend.decode('utf8')\n else:\n original_backend = None\n\n # New file format.\n layer_names = load_attributes_from_hdf5_group(f, 'layer_names')\n\n # Reverse index of layer name to list of layers with name.\n index = {}\n for layer in model.layers:\n if layer.name:\n index.setdefault(layer.name, []).append(layer)\n\n # We batch weight value assignments in a single backend call\n # which provides a speedup in TensorFlow.\n weight_value_tuples = []\n for k, name in enumerate(layer_names):\n g = f[name]\n weight_values = load_subset_weights_from_hdf5_group(g)\n for layer in index.get(name, []):\n symbolic_weights = _legacy_weights(layer)\n weight_values = preprocess_weights_for_loading(\n layer, weight_values, original_keras_version, original_backend)\n if len(weight_values) != len(symbolic_weights):\n if skip_mismatch:\n logging.warning(\n f'Skipping loading of weights for layer #{k} (named '\n f'{layer.name}) due to mismatch in number of weights. '\n f'Layer expects {len(symbolic_weights)} weight(s). Received '\n f'{len(weight_values)} saved weight(s)')\n continue\n raise ValueError(\n f'Weight count mismatch for layer #{k} (named {layer.name}). '\n f'Layer expects {len(symbolic_weights)} weight(s). Received '\n f'{len(weight_values)} saved weight(s)')\n # Set values.\n for i in range(len(weight_values)):\n expected_shape = backend.int_shape(symbolic_weights[i])\n received_shape = weight_values[i].shape\n if expected_shape != received_shape:\n if skip_mismatch:\n logging.warning(\n f'Skipping loading weights for layer #{k} (named '\n f'{layer.name}) due to mismatch in shape for weight '\n f'{symbolic_weights[i].name}. '\n f'Weight expects shape {expected_shape}. Received saved weight '\n f'with shape {received_shape}')\n continue\n raise ValueError(\n f'Shape mismatch in layer #{k} (named {layer.name}) for weight '\n f'{symbolic_weights[i].name}. '\n f'Weight expects shape {expected_shape}. Received saved weight '\n f'with shape {received_shape}')\n else:\n weight_value_tuples.append((symbolic_weights[i], weight_values[i]))\n\n if 'top_level_model_weights' in f:\n symbolic_weights = model._trainable_weights + model._non_trainable_weights\n weight_values = load_subset_weights_from_hdf5_group(\n f['top_level_model_weights'])\n\n if len(weight_values) != len(symbolic_weights):\n if skip_mismatch:\n logging.warning(\n f'Skipping loading top-level weights for model due to mismatch '\n f'in number of weights. '\n f'Model expects {len(symbolic_weights)} top-level weight(s). '\n f'Received {len(weight_values)} saved top-level weight(s)')\n else:\n raise ValueError(\n f'Weight count mismatch for top-level weights of model. '\n f'Model expects {len(symbolic_weights)} top-level weight(s). '\n f'Received {len(weight_values)} saved top-level weight(s)')\n else:\n for i in range(len(weight_values)):\n expected_shape = backend.int_shape(symbolic_weights[i])\n received_shape = weight_values[i].shape\n if expected_shape != received_shape:\n if skip_mismatch:\n logging.warning(\n f'Skipping loading top-level weight for model due to '\n f'mismatch in shape for weight {symbolic_weights[i].name}. '\n f'Weight expects shape {expected_shape}. Received saved weight '\n f'with shape {received_shape}')\n else:\n raise ValueError(\n f'Shape mismatch in model for top-level weight '\n f'{symbolic_weights[i].name}. '\n f'Weight expects shape {expected_shape}. Received saved weight '\n f'with shape {received_shape}')\n else:\n weight_value_tuples.append((symbolic_weights[i], weight_values[i]))\n\n backend.batch_set_value(weight_value_tuples)\n\n\ndef save_attributes_to_hdf5_group(group, name, data):\n \"\"\"Saves attributes (data) of the specified name into the HDF5 group.\n\n This method deals with an inherent problem of HDF5 file which is not\n able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes.\n\n Args:\n group: A pointer to a HDF5 group.\n name: A name of the attributes to save.\n data: Attributes data to store.\n\n Raises:\n RuntimeError: If any single attribute is too large to be saved.\n \"\"\"\n # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`\n # because in that case even chunking the array would not make the saving\n # possible.\n bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT]\n\n # Expecting this to never be true.\n if bad_attributes:\n raise RuntimeError(\n 'The following attributes cannot be saved to HDF5 file because they '\n f'are larger than {HDF5_OBJECT_HEADER_LIMIT} bytes: {bad_attributes}')\n\n data_npy = np.asarray(data)\n\n num_chunks = 1\n chunked_data = np.array_split(data_npy, num_chunks)\n\n # This will never loop forever thanks to the test above.\n while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data):\n num_chunks += 1\n chunked_data = np.array_split(data_npy, num_chunks)\n\n if num_chunks > 1:\n for chunk_id, chunk_data in enumerate(chunked_data):\n group.attrs['%s%d' % (name, chunk_id)] = chunk_data\n else:\n group.attrs[name] = data\n\n\ndef load_attributes_from_hdf5_group(group, name):\n \"\"\"Loads attributes of the specified name from the HDF5 group.\n\n This method deals with an inherent problem\n of HDF5 file which is not able to store\n data larger than HDF5_OBJECT_HEADER_LIMIT bytes.\n\n Args:\n group: A pointer to a HDF5 group.\n name: A name of the attributes to load.\n\n Returns:\n data: Attributes data.\n \"\"\"\n if name in group.attrs:\n data = [\n n.decode('utf8') if hasattr(n, 'decode') else n\n for n in group.attrs[name]\n ]\n else:\n data = []\n chunk_id = 0\n while '%s%d' % (name, chunk_id) in group.attrs:\n data.extend([\n n.decode('utf8') if hasattr(n, 'decode') else n\n for n in group.attrs['%s%d' % (name, chunk_id)]\n ])\n chunk_id += 1\n return data\n\n\ndef _legacy_weights(layer):\n \"\"\"DO NOT USE.\n\n For legacy reason, the layer.weights was in the order of\n [self.trainable_weights + self.non_trainable_weights], and this order was\n used for preserving the weights in h5 format. The new order of layer.weights\n are the same as layer.get_weights() which is more intuitive for user. To\n keep supporting the existing saved h5 file, this method should be used to\n save/load weights. In future version, we will delete this method and\n introduce a breaking change for h5 and stay with the new order for weights.\n\n Args:\n layer: a `tf.keras.Model` or `tf.keras.layers.Layer` instance.\n\n Returns:\n A list of variables with the order of trainable_weights, followed by\n non_trainable_weights.\n \"\"\"\n weights = layer.trainable_weights + layer.non_trainable_weights\n if any(not isinstance(w, tf.Variable) for w in weights):\n raise NotImplementedError(\n f'Save or restore weights that is not an instance of `tf.Variable` is '\n f'not supported in h5, use `save_format=\\'tf\\'` instead. Received a '\n f'model or layer {layer.__class__.__name__} with weights {weights}')\n return weights\n" ]
[ [ "tensorflow.compat.v2.__internal__.tracking.wrap" ], [ "tensorflow.python.platform.tf_logging.warning", "numpy.split", "numpy.asarray", "numpy.tile", "tensorflow.compat.v2.io.gfile.makedirs", "numpy.hsplit", "numpy.concatenate", "numpy.transpose", "numpy.array", "numpy.array_split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Pengchengpcx/Neighbor-Sampling-GCN
[ "4b47385bdbfeb5957a56b05c441482e701dd10de" ]
[ "build/lib/pygcn/utils.py" ]
[ "import numpy as np\nimport scipy.sparse as sp\nimport torch\n\n\ndef encode_onehot(labels):\n classes = set(labels)\n classes_dict = {c: np.identity(len(classes))[i, :] for i, c in\n enumerate(classes)}\n labels_onehot = np.array(list(map(classes_dict.get, labels)),\n dtype=np.int32)\n return labels_onehot\n\n\ndef load_data(path=\"../data/cora/\", dataset=\"cora\"):\n \"\"\"Load citation network dataset (cora only for now)\"\"\"\n print('Loading {} dataset...'.format(dataset))\n\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset),\n dtype=np.dtype(str))\n features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n labels = encode_onehot(idx_features_labels[:, -1])\n\n # build graph\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset),\n dtype=np.int32)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\n dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(labels.shape[0], labels.shape[0]),\n dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n features = normalize(features)\n adj = normalize(adj + sp.eye(adj.shape[0]))\n\n idx_train = range(140)\n idx_val = range(200, 500)\n idx_test = range(500, 1500)\n\n features = torch.FloatTensor(np.array(features.todense()))\n labels = torch.LongTensor(np.where(labels)[1])\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n\n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n\n return adj, features, labels, idx_train, idx_val, idx_test\n\n\ndef normalize(mx):\n \"\"\"Row-normalize sparse matrix\"\"\"\n rowsum = np.array(mx.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n mx = r_mat_inv.dot(mx)\n return mx\n\n\ndef accuracy(output, labels):\n preds = output.max(1)[1].type_as(labels)\n correct = preds.eq(labels).double()\n correct = correct.sum()\n return correct / len(labels)\n\n\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)\n\ndef sub_graph(adj, num):\n '''\n Monte carlo sample a number of neighbors for each node given the adjacent matrix\n adj: normalized and processed graph adjacent matrix\n num: the number of samples for each neighbor\n '''\n nodes = adj.shape[0]\n neighbor_number = torch.sum(adj>0,dim=1).reshape(node,1)/num\n sub_graph = torch.randint(0,nodes, (nodes,num))\n sub_graph = sub_graph.reshape(-1).cpu().tolist()\n sub_graph = list(set(sub_graph))\n mask = torch.zeros(nodes,nodes)\n mask[sub_graph,sub_graph] = 1\n \n return adj*mask*neighbor_number\n\n \n\n" ]
[ [ "torch.LongTensor", "torch.Size", "torch.randint", "torch.zeros", "scipy.sparse.eye", "numpy.power", "scipy.sparse.diags", "torch.from_numpy", "scipy.sparse.csr_matrix", "numpy.dtype", "numpy.ones", "torch.sum", "torch.sparse.FloatTensor", "numpy.array", "numpy.where", "numpy.isinf", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
EricLina/attn2d
[ "12c3a53887c985ae24199ecef2f7b2335fe214c6", "12c3a53887c985ae24199ecef2f7b2335fe214c6", "12c3a53887c985ae24199ecef2f7b2335fe214c6" ]
[ "examples/pervasive/modules/archive/expanding_resnet.py", "examples/pervasive/modules/archive/resnet_addup3.py", "examples/pervasive/modules/archive/densenet_bn.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# import torch.utils.checkpoint as cp\n\nfrom fairseq.modules import (\n MaskedConvolution, MultiheadMaskedConvolution\n)\n\n\nclass ExpandingResNet(nn.Module):\n \"\"\" A network of residual convolutional layers\"\"\"\n\n def __init__(self, num_init_features, args):\n super().__init__()\n num_layers = args.num_layers\n num_features = num_init_features\n self.reduce_channels = Linear(num_features, num_features // args.divide_channels) if args.divide_channels > 1 else None\n num_features = num_features // args.divide_channels\n self.output_channels = num_features\n self.add_up_scale = 1 / (num_layers + 1)\n\n self.residual_blocks = nn.ModuleList([])\n for i in range(num_layers):\n kernel_size = 2 * (i + 1) + 1\n print('Layer ', i, kernel_size)\n self.residual_blocks.append(_ResLayer(num_features, kernel_size, args))\n \n def forward(self, x, \n encoder_mask=None,\n decoder_mask=None,\n incremental_state=None):\n \"\"\"\n Input : N, Tt, Ts, C\n Output : N, Tt, Ts, C\n \"\"\"\n if self.reduce_channels is not None:\n x = self.reduce_channels(x)\n add_up = self.add_up_scale * x\n for layer in self.residual_blocks:\n x = layer(x,\n encoder_mask=encoder_mask,\n decoder_mask=decoder_mask,\n incremental_state=incremental_state)\n add_up += self.add_up_scale * x\n return add_up\n\n\nclass _ResLayer(nn.Module):\n \"\"\" Single residual layer\n\n num_input_features - number of input channels to the layer\n kernel_size - size of masked convolution, k x (k // 2)\n drop_rate - dropout rate\n \"\"\"\n\n def __init__(self, num_features, kernel_size, args):\n super().__init__()\n self.drop_rate = args.convolution_dropout\n ffn_dim = args.ffn_dim\n mid_features = args.reduce_dim\n stride = args.conv_stride # source dimension stride\n dilsrc = args.source_dilation\n diltrg = args.target_dilation\n resolution = args.maintain_resolution\n if resolution:\n if not stride == 1:\n raise ValueError('Could not maintain the resolution with stride=%d' % stride)\n\n # choose the padding accordingly:\n padding_trg = diltrg * (kernel_size - 1) // 2\n padding_src = dilsrc * (kernel_size - 1) // 2\n padding = (padding_trg, padding_src)\n else:\n # must maintain the target resolution:\n padding = (diltrg * (kernel_size - 1) // 2, 0)\n\n # Reduce dim should be dividible by groups\n self.conv1 = nn.Conv2d(num_features,\n mid_features,\n kernel_size=1,\n stride=1,\n bias=False)\n\n self.mconv2 = MaskedConvolution(\n mid_features, num_features,\n kernel_size, args,\n padding=padding,\n )\n self.fc1 = Linear(num_features, ffn_dim)\n self.fc2 = Linear(ffn_dim, num_features)\n self.scale = 0.5 ** .5\n\n def forward(self, x, \n encoder_mask=None,\n decoder_mask=None,\n incremental_state=None):\n residual = x\n x = x.permute(0, 3, 1, 2)\n x = self.conv1(x)\n # x = F.relu(x)\n x = self.mconv2(x, incremental_state)\n if self.training:\n if encoder_mask is not None:\n x = x.masked_fill(encoder_mask.unsqueeze(1).unsqueeze(1), 0)\n if decoder_mask is not None:\n x = x.masked_fill(decoder_mask.unsqueeze(1).unsqueeze(-1), 0)\n\n if self.drop_rate:\n x = F.dropout(x, p=self.drop_rate, training=self.training)\n x = x.permute(0, 2, 3, 1)\n x = self.scale * (x + residual) # N, C, Tt, Ts\n # FFN:\n residual = x\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n if self.drop_rate:\n x = F.dropout(x, p=self.drop_rate, training=self.training)\n x = self.scale * (x + residual)\n return x\n\n\ndef Linear(in_features, out_features, bias=True):\n m = nn.Linear(in_features, out_features, bias)\n nn.init.xavier_uniform_(m.weight)\n if bias:\n nn.init.constant_(m.bias, 0.)\n return m\n\n", "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# import torch.utils.checkpoint as cp\n\nfrom fairseq.modules import (\n MaskedConvolution, MultiheadMaskedConvolution\n)\n\n\nclass ResNetAddUp3(nn.Module):\n \"\"\" A network of residual convolutional layers\"\"\"\n\n def __init__(self, num_init_features, args):\n super().__init__()\n num_layers = args.num_layers\n kernel_size = args.kernel_size\n num_features = num_init_features\n self.reduce_channels = Linear(num_features, num_features // args.divide_channels) if args.divide_channels > 1 else None\n num_features = num_features // args.divide_channels\n self.output_channels = num_features\n layer_type = args.layer_type\n self.add_up_scale = 1 / (num_layers + 1)\n\n self.residual_blocks = nn.ModuleList([])\n for _ in range(num_layers):\n self.residual_blocks.append(_ResLayer(num_features, kernel_size, args))\n \n def forward(self, x,\n encoder_mask=None,\n decoder_mask=None,\n incremental_state=None\n ):\n \"\"\"\n Input : N, Tt, Ts, C\n Output : N, Tt, Ts, C\n \"\"\"\n if self.reduce_channels is not None:\n x = self.reduce_channels(x)\n add_up = self.add_up_scale * x\n for layer in self.residual_blocks:\n x = layer(x,\n encoder_mask=encoder_mask,\n decoder_mask=decoder_mask,\n incremental_state=incremental_state\n )\n add_up += self.add_up_scale * x\n return add_up\n\n\nclass _ResLayer(nn.Module):\n \"\"\" Single residual layer\n\n num_input_features - number of input channels to the layer\n kernel_size - size of masked convolution, k x (k // 2)\n drop_rate - dropout rate\n \"\"\"\n\n def __init__(self, num_features, kernel_size, args):\n super().__init__()\n self.drop_rate = args.convolution_dropout\n ffn_dim = args.ffn_dim\n mid_features = args.reduce_dim\n stride = args.conv_stride # source dimension stride\n dilsrc = args.source_dilation\n diltrg = args.target_dilation\n resolution = args.maintain_resolution\n if resolution:\n if not stride == 1:\n raise ValueError('Could not maintain the resolution with stride=%d' % stride)\n\n # choose the padding accordingly:\n padding_trg = diltrg * (kernel_size - 1) // 2\n padding_src = dilsrc * (kernel_size - 1) // 2\n padding = (padding_trg, padding_src)\n else:\n # must maintain the target resolution:\n padding = (diltrg * (kernel_size - 1) // 2, 0)\n\n # Reduce dim should be dividible by groups\n self.conv1 = nn.Conv2d(num_features,\n mid_features,\n kernel_size=1,\n stride=1,\n bias=False)\n\n self.mconv2 = MaskedConvolution(\n mid_features, num_features,\n kernel_size, args,\n padding=padding,\n )\n self.ln1 = nn.LayerNorm(num_features)\n self.fc1 = Linear(num_features, ffn_dim)\n self.fc2 = Linear(ffn_dim, num_features)\n self.ln2 = nn.LayerNorm(num_features)\n self.scale = 0.5 ** .5\n\n def forward(self, x, \n encoder_mask=None,\n decoder_mask=None,\n incremental_state=None\n ):\n residual = x\n x = x.permute(0, 3, 1, 2)\n x = self.conv1(x)\n # x = F.relu(x)\n x = self.mconv2(x, incremental_state)\n if self.training:\n if encoder_mask is not None:\n x = x.masked_fill(encoder_mask.unsqueeze(1).unsqueeze(1), 0)\n if decoder_mask is not None:\n x = x.masked_fill(decoder_mask.unsqueeze(1).unsqueeze(-1), 0)\n\n if self.drop_rate:\n x = F.dropout(x, p=self.drop_rate, training=self.training)\n x = x.permute(0, 2, 3, 1)\n x = self.scale * (x + residual) # N, C, Tt, Ts\n x = self.ln1(x)\n # FFN:\n residual = x\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n if self.drop_rate:\n x = F.dropout(x, p=self.drop_rate, training=self.training)\n x = self.scale * (x + residual)\n x = self.ln2(x)\n return x\n\n\ndef Linear(in_features, out_features, bias=True):\n m = nn.Linear(in_features, out_features, bias)\n nn.init.xavier_uniform_(m.weight)\n if bias:\n nn.init.constant_(m.bias, 0.)\n return m\n\n", "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as cp\n\nfrom fairseq.modules import (\n MaskedConvolution\n)\n\n\nclass DenseNetBN(nn.Module):\n \"\"\" Single block DenseNet with checkpointing\"\"\"\n\n def __init__(self, num_init_features, args):\n super().__init__()\n divide_channels = args.divide_channels\n num_layers = args.num_layers\n growth_rate = args.growth_rate\n num_features = num_init_features\n self.reduce_channels = Linear(\n num_features,\n num_features // args.divide_channels\n ) if args.divide_channels > 1 else None\n num_features = num_features // args.divide_channels\n\n self.dense_layers = nn.ModuleList([])\n\n for _ in range(num_layers):\n self.dense_layers.append(_DenseLayer(num_features, args))\n num_features += growth_rate\n self.output_channels = num_features\n\n def forward(self, x, decoder_mask=None, encoder_mask=None, incremental_state=None):\n \"\"\"\n Input : B, Tt, Ts, C\n Output : B, Tt, Ts, C\n \"\"\"\n if self.reduce_channels is not None:\n x = self.reduce_channels(x)\n # B,Tt,Ts,C >> B,C,Tt,Ts\n x = x.permute(0, 3, 1, 2)\n\n features = [x]\n for i, layer in enumerate(self.dense_layers):\n x = layer(features, \n decoder_mask=decoder_mask,\n encoder_mask=encoder_mask,\n incremental_state=incremental_state)\n features.append(x)\n\n x = torch.cat(features, 1)\n\n # Back to the original shape B, Tt,Ts,C\n x = x.permute(0, 2, 3, 1)\n return x\n\n\nclass _DenseLayer(nn.Module):\n def __init__(self, num_input_features, args):\n super().__init__()\n self.memory_efficient = args.memory_efficient\n self.drop_rate = args.convolution_dropout\n bn_size = args.bn_size\n growth_rate = args.growth_rate\n inter_features = bn_size * growth_rate\n kernel_size = args.kernel_size\n\n self.norm1 = PervasiveBatchNorm(num_input_features)\n self.conv1 = nn.Conv2d(num_input_features,\n inter_features,\n kernel_size=1,\n stride=1,\n bias=False\n )\n self.norm2 = PervasiveBatchNorm(inter_features)\n dilsrc = args.source_dilation\n diltrg = args.target_dilation\n padding_trg = diltrg * (kernel_size - 1) // 2\n padding_src = dilsrc * (kernel_size - 1) // 2\n padding = (padding_trg, padding_src)\n\n self.mconv2 = MaskedConvolution(\n inter_features, growth_rate,\n kernel_size, args,\n padding=padding,\n )\n\n def bottleneck_function(self, *inputs):\n print('Norm1>>>')\n x = self.norm1(torch.cat(inputs, 1))\n x = F.relu(x)\n x = self.conv1(x)\n return x\n\n def forward(self, prev_features, encoder_mask=None, decoder_mask=None, incremental_state=None):\n \"\"\"\n Memory efficient forward pass with checkpointing\n Each DenseLayer splits its forward into:\n - bottleneck_function \n - therest_function\n Prev_features as list of features in (B, C, Tt, Ts) \n encoder_padding_mask (B, Ts)\n Returns the new features alone (B, g, Tt, Ts)\n \"\"\"\n if self.memory_efficient and any(\n prev_feature.requires_grad \n for prev_feature in prev_features\n ):\n # Does not keep intermediate values,\n # but recompute them in the backward pass:\n # tradeoff btw memory & compute\n x = cp.checkpoint(\n self.bottleneck_function,\n *prev_features\n )\n else:\n x = self.bottleneck_function(*prev_features)\n\n print('Norm2>>')\n x = self.norm2(x)\n x = F.relu(x)\n x = self.mconv2(x, incremental_state)\n if encoder_mask is not None:\n x = x.masked_fill(encoder_mask.unsqueeze(1).unsqueeze(1), 0)\n if decoder_mask is not None:\n x = x.masked_fill(decoder_mask.unsqueeze(1).unsqueeze(-1), 0)\n\n if self.drop_rate:\n x = F.dropout(x, p=self.drop_rate, training=self.training)\n return x\n\n\ndef Linear(in_features, out_features, bias=True):\n m = nn.Linear(in_features, out_features, bias)\n nn.init.xavier_uniform_(m.weight)\n if bias:\n nn.init.constant_(m.bias, 0.)\n return m\n\n\nclass PervasiveBatchNorm(nn.Module):\n def __init__(self, num_features, track_running_stats=True, eps=1e-5, momentum=None):\n super().__init__()\n self.eps = eps\n self.momentum = momentum\n self.gamma = nn.Parameter(torch.ones(1, num_features, 1, 1))\n self.beta = nn.Parameter(torch.zeros(1, num_features, 1, 1))\n self.track_running_stats = track_running_stats\n if self.track_running_stats:\n self.register_buffer('running_mean', torch.zeros(1, num_features, 1, 1))\n self.register_buffer('running_std', torch.ones(1, num_features, 1, 1))\n self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))\n\n def reset_running_stats(self):\n if self.track_running_stats:\n self.running_mean.zero_()\n self.running_std.fill_(1)\n self.num_batches_tracked.zero_()\n\n def forward(self, x):\n \n if self.momentum is None: \n maf = 0.0\n else:\n maf = self.momentum\n\n if self.track_running_stats:\n if self.training:\n self.num_batches_tracked += 1\n # Current mean and std\n mean = x.mean(0, keepdim=True).mean(-1, keepdim=True) # 1,C,T,1\n std = x.std(0, keepdim=True).std(-1, keepdim=True)\n\n # moving average factor\n if self.momentum is None:\n maf = 1.0 / float(self.num_batches_tracked)\n else:\n maf = self.momentum\n use_mean = (1. - maf) * self.running_mean + maf * mean\n use_std = (1. - maf) * self.running_std + maf * std\n\n print('MAF:', maf)\n # Average across steps then update the moving stats\n self.running_mean = (1. - maf) * self.running_mean + maf * mean.mean(2, keepdim=True)\n self.running_std = (1. - maf) * self.running_std + maf * std.mean(2, keepdim=True)\n\n else:\n # Current mean and std\n use_mean = self.running_mean\n use_std = self.running_std\n\n else:\n use_mean = (1. - maf) * self.running_mean + maf * mean\n use_std = (1. - maf) * self.running_std + maf * std\n\n # print('Input', x)\n # print('Using mean:', use_mean)\n # print('Using std:', use_std)\n x = self.gamma * (x - use_mean) / (use_std + self.eps) + self.beta \n # print('Returning:', x)\n return x\n\n" ]
[ [ "torch.nn.functional.dropout", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.init.xavier_uniform_" ], [ "torch.nn.functional.dropout", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.LayerNorm", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.init.xavier_uniform_" ], [ "torch.ones", "torch.cat", "torch.nn.init.constant_", "torch.nn.functional.dropout", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.zeros", "torch.tensor", "torch.nn.Linear", "torch.nn.functional.relu", "torch.utils.checkpoint.checkpoint", "torch.nn.init.xavier_uniform_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
watsonjj/gammapy
[ "8d2498c8f63f73d1fbe4ba81ab02d9e72552df67", "8d2498c8f63f73d1fbe4ba81ab02d9e72552df67", "8d2498c8f63f73d1fbe4ba81ab02d9e72552df67", "8d2498c8f63f73d1fbe4ba81ab02d9e72552df67", "8d2498c8f63f73d1fbe4ba81ab02d9e72552df67", "8d2498c8f63f73d1fbe4ba81ab02d9e72552df67", "8d2498c8f63f73d1fbe4ba81ab02d9e72552df67", "8d2498c8f63f73d1fbe4ba81ab02d9e72552df67", "8d2498c8f63f73d1fbe4ba81ab02d9e72552df67", "8d2498c8f63f73d1fbe4ba81ab02d9e72552df67" ]
[ "gammapy/utils/fitting/tests/test_iminuit.py", "gammapy/catalog/tests/test_hess.py", "gammapy/utils/tests/test_time.py", "gammapy/spectrum/tests/test_fit.py", "gammapy/data/tests/test_gti.py", "gammapy/image/measure.py", "gammapy/data/tests/test_observers.py", "gammapy/irf/irf_reduce.py", "gammapy/data/pointing.py", "gammapy/data/tests/test_filters.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport pytest\nfrom numpy.testing import assert_allclose\nfrom .. import Parameter, Parameters, optimize_iminuit\n\npytest.importorskip(\"iminuit\")\n\n\ndef fcn(parameters):\n x = parameters[\"x\"].value\n y = parameters[\"y\"].value\n z = parameters[\"z\"].value\n x_opt, y_opt, z_opt = 2, 3e5, 4e-5\n x_err, y_err, z_err = 0.2, 3e4, 4e-6\n return ((x - x_opt) / x_err) ** 2 + ((y - y_opt) / y_err) ** 2 + ((z - z_opt) / z_err) ** 2\n\n\[email protected]()\ndef pars():\n x = Parameter(\"x\", 2.1)\n y = Parameter(\"y\", 3.1, scale=1e5)\n z = Parameter(\"z\", 4.1, scale=1e-5)\n return Parameters([x, y, z])\n\n\ndef test_iminuit_basic(pars):\n factors, info, minuit = optimize_iminuit(function=fcn, parameters=pars)\n\n assert info[\"success\"]\n assert_allclose(fcn(pars), 0, atol=1e-5)\n\n # Check the result in parameters is OK\n assert_allclose(pars[\"x\"].value, 2, rtol=1e-3)\n assert_allclose(pars[\"y\"].value, 3e5, rtol=1e-3)\n # Precision of estimate on \"z\" is very poor (0.040488). Why is it so bad?\n assert_allclose(pars[\"z\"].value, 4e-5, rtol=2e-2)\n\n # Check that minuit sees the parameter factors correctly\n assert_allclose(factors, [2, 3, 4], rtol=1e-3)\n assert_allclose(minuit.values[\"par_000_x\"], 2, rtol=1e-3)\n assert_allclose(minuit.values[\"par_001_y\"], 3, rtol=1e-3)\n assert_allclose(minuit.values[\"par_002_z\"], 4, rtol=1e-3)\n\n\ndef test_iminuit_frozen(pars):\n pars[\"y\"].frozen = True\n\n factors, info, minuit = optimize_iminuit(function=fcn, parameters=pars)\n\n assert info[\"success\"]\n\n assert_allclose(pars[\"x\"].value, 2, rtol=1e-4)\n assert_allclose(pars[\"y\"].value, 3.1e5)\n assert_allclose(pars[\"z\"].value, 4.e-5, rtol=1e-4)\n assert_allclose(fcn(pars), 0.111112, rtol=1e-5)\n\n assert minuit.list_of_fixed_param() == [\"par_001_y\"]\n\n\ndef test_iminuit_limits(pars):\n pars[\"y\"].min = 301000\n\n factors, info, minuit = optimize_iminuit(function=fcn, parameters=pars)\n\n assert info[\"success\"]\n\n # Check the result in parameters is OK\n assert_allclose(pars[\"x\"].value, 2, rtol=1e-2)\n assert_allclose(pars[\"y\"].value, 301000, rtol=1e-3)\n\n # Check that minuit sees the limit factors correctly\n states = minuit.get_param_states()\n assert not states[0][\"has_limits\"]\n\n y = states[1]\n assert y[\"has_limits\"]\n assert_allclose(y[\"lower_limit\"], 3.01)\n\n # The next assert can be added when we no longer test on iminuit 1.2\n # See https://github.com/gammapy/gammapy/pull/1771\n # assert states[1][\"upper_limit\"] is None\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom collections import Counter\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord, Angle\nfrom astropy.table import Table\nfrom ...utils.testing import assert_quantity_allclose\nfrom ...utils.testing import requires_data, requires_dependency\nfrom ...spectrum.models import PowerLaw, ExponentialCutoffPowerLaw\nfrom ..hess import SourceCatalogHGPS, SourceCatalogLargeScaleHGPS\n\n\[email protected](scope=\"session\")\ndef cat():\n return SourceCatalogHGPS(\"$GAMMAPY_DATA/catalogs/hgps_catalog_v1.fits.gz\")\n\n\n@requires_data(\"gammapy-data\")\nclass TestSourceCatalogHGPS:\n @staticmethod\n def test_source_table(cat):\n assert cat.name == \"hgps\"\n assert len(cat.table) == 78\n\n @staticmethod\n def test_table_components(cat):\n assert len(cat.table_components) == 98\n\n @staticmethod\n def test_table_associations(cat):\n assert len(cat.table_associations) == 223\n\n @staticmethod\n def test_table_identifications(cat):\n assert len(cat.table_identifications) == 31\n\n @staticmethod\n def test_gaussian_component(cat):\n # Row index starts at 0, component numbers at 1\n # Thus we expect `HGPSC 084` at row 83\n c = cat.gaussian_component(83)\n assert c.name == \"HGPSC 084\"\n\n @staticmethod\n def test_large_scale_component(cat):\n assert isinstance(cat.large_scale_component, SourceCatalogLargeScaleHGPS)\n\n\n@requires_data(\"gammapy-data\")\nclass TestSourceCatalogObjectHGPS:\n @pytest.fixture(scope=\"class\")\n def source(self, cat):\n return cat[\"HESS J1843-033\"]\n\n @staticmethod\n @pytest.mark.slow\n def test_all_sources(cat):\n \"\"\"Check that properties and methods work for all sources,\n i.e. don't raise an error.\"\"\"\n for source in cat:\n str(source)\n source.energy_range\n source.spectral_model_type\n source.spectral_model()\n source.spatial_model_type\n source.is_pointlike\n source.sky_model()\n source.flux_points\n\n @staticmethod\n def test_basics(source):\n assert source.name == \"HESS J1843-033\"\n assert source.index == 64\n data = source.data\n assert data[\"Source_Class\"] == \"Unid\"\n assert \"SourceCatalogObjectHGPS\" in repr(source)\n\n ss = str(source)\n assert \"Source name : HESS J1843-033\" in ss\n assert \"Component HGPSC 083:\" in ss\n\n @staticmethod\n def test_str(cat):\n source = cat[\"HESS J1930+188\"]\n assert source.data[\"Spatial_Model\"] == \"Gaussian\"\n assert \"Spatial components : HGPSC 097\" in str(source)\n\n source = cat[\"HESS J1825-137\"]\n assert source.data[\"Spatial_Model\"] == \"3-Gaussian\"\n assert \"Spatial components : HGPSC 065, HGPSC 066, HGPSC 067\" in str(source)\n\n source = cat[\"HESS J1713-397\"]\n assert source.data[\"Spatial_Model\"] == \"Shell\"\n assert \"Source name : HESS J1713-397\" in str(source)\n\n @staticmethod\n def test_components(source):\n components = source.components\n assert len(components) == 2\n c = components[1]\n assert c.name == \"HGPSC 084\"\n\n @staticmethod\n def test_energy_range(source):\n energy_range = source.energy_range\n assert energy_range.unit == \"TeV\"\n assert_allclose(energy_range.value, [0.21544346, 61.89658356])\n\n @staticmethod\n def test_spectral_model_type(cat):\n spec_types = Counter([_.spectral_model_type for _ in cat])\n assert spec_types == {\"pl\": 66, \"ecpl\": 12}\n\n @staticmethod\n @requires_dependency(\"uncertainties\")\n def test_spectral_model_pl(cat):\n source = cat[\"HESS J1843-033\"]\n\n model = source.spectral_model()\n\n assert isinstance(model, PowerLaw)\n pars = model.parameters\n assert_allclose(pars[\"amplitude\"].value, 9.140179932365378e-13)\n assert_allclose(pars[\"index\"].value, 2.1513476371765137)\n assert_allclose(pars[\"reference\"].value, 1.867810606956482)\n\n val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value\n assert_allclose(val, source.data[\"Flux_Spec_Int_1TeV\"].value, rtol=0.01)\n assert_allclose(err, source.data[\"Flux_Spec_Int_1TeV_Err\"].value, rtol=0.01)\n\n @staticmethod\n @requires_dependency(\"uncertainties\")\n def test_spectral_model_ecpl(cat):\n source = cat[\"HESS J0835-455\"]\n\n model = source.spectral_model()\n assert isinstance(model, ExponentialCutoffPowerLaw)\n\n pars = model.parameters\n assert_allclose(pars[\"amplitude\"].value, 6.408420542586617e-12)\n assert_allclose(pars[\"index\"].value, 1.3543991614920847)\n assert_allclose(pars[\"reference\"].value, 1.696938754239)\n assert_allclose(pars[\"lambda_\"].value, 0.081517637)\n\n val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value\n assert_allclose(val, source.data[\"Flux_Spec_Int_1TeV\"].value, rtol=0.01)\n assert_allclose(err, source.data[\"Flux_Spec_Int_1TeV_Err\"].value, rtol=0.01)\n\n model = source.spectral_model(\"pl\")\n assert isinstance(model, PowerLaw)\n\n pars = model.parameters\n assert_allclose(pars[\"amplitude\"].value, 1.833056926733856e-12)\n assert_allclose(pars[\"index\"].value, 1.8913707)\n assert_allclose(pars[\"reference\"].value, 3.0176312923431396)\n\n val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value\n assert_allclose(val, source.data[\"Flux_Spec_PL_Int_1TeV\"].value, rtol=0.01)\n assert_allclose(err, source.data[\"Flux_Spec_PL_Int_1TeV_Err\"].value, rtol=0.01)\n\n @staticmethod\n def test_spatial_model_type(cat):\n morph_types = Counter([_.spatial_model_type for _ in cat])\n assert morph_types == {\n \"gaussian\": 52,\n \"2-gaussian\": 8,\n \"shell\": 7,\n \"point-like\": 6,\n \"3-gaussian\": 5,\n }\n\n @staticmethod\n def test_sky_model_point(cat):\n model = cat[\"HESS J1826-148\"].sky_model()\n p = model.parameters\n assert_allclose(p[\"amplitude\"].value, 9.815771242691063e-13)\n assert_allclose(p[\"lon_0\"].value, 16.882482528686523)\n assert_allclose(p[\"lat_0\"].value, -1.2889292240142822)\n\n @staticmethod\n def test_sky_model_gaussian(cat):\n model = cat[\"HESS J1119-614\"].sky_model()\n p = model.parameters\n assert_allclose(p[\"amplitude\"].value, 7.959899015960725e-13)\n assert_allclose(p[\"lon_0\"].value, 292.1280822753906)\n assert_allclose(p[\"lat_0\"].value, -0.5332353711128235)\n assert_allclose(p[\"sigma\"].value, 0.09785966575145721)\n\n @staticmethod\n def test_sky_model_gaussian2(cat):\n model = cat[\"HESS J1843-033\"].sky_model()\n\n p = model.skymodels[0].parameters\n assert_allclose(p[\"amplitude\"].value, 4.259815e-13, rtol=1e-5)\n assert_allclose(p[\"lon_0\"].value, 29.047216415405273)\n assert_allclose(p[\"lat_0\"].value, 0.24389676749706268)\n assert_allclose(p[\"sigma\"].value, 0.12499100714921951)\n\n p = model.skymodels[1].parameters\n assert_allclose(p[\"amplitude\"].value, 4.880365e-13, rtol=1e-5)\n assert_allclose(p[\"lon_0\"].value, 28.77037811279297)\n assert_allclose(p[\"lat_0\"].value, -0.0727819949388504)\n assert_allclose(p[\"sigma\"].value, 0.2294706553220749)\n\n @staticmethod\n def test_sky_model_gaussian3(cat):\n model = cat[\"HESS J1825-137\"].sky_model()\n\n p = model.skymodels[0].parameters\n assert_allclose(p[\"amplitude\"].value, 1.8952104218765842e-11)\n assert_allclose(p[\"lon_0\"].value, 16.988601684570312)\n assert_allclose(p[\"lat_0\"].value, -0.4913068115711212)\n assert_allclose(p[\"sigma\"].value, 0.47650089859962463)\n\n p = model.skymodels[1].parameters\n assert_allclose(p[\"amplitude\"].value, 4.4639763971527836e-11)\n assert_allclose(p[\"lon_0\"].value, 17.71169090270996)\n assert_allclose(p[\"lat_0\"].value, -0.6598004102706909)\n assert_allclose(p[\"sigma\"].value, 0.3910967707633972)\n\n p = model.skymodels[2].parameters\n assert_allclose(p[\"amplitude\"].value, 5.870712920658374e-12)\n assert_allclose(p[\"lon_0\"].value, 17.840524673461914)\n assert_allclose(p[\"lat_0\"].value, -0.7057178020477295)\n assert_allclose(p[\"sigma\"].value, 0.10932201147079468)\n\n @staticmethod\n def test_sky_model_gaussian_extern(cat):\n # special test for the only extern source with a gaussian morphology\n model = cat[\"HESS J1801-233\"].sky_model()\n p = model.parameters\n assert_allclose(p[\"amplitude\"].value, 7.499999970031479e-13)\n assert_allclose(p[\"lon_0\"].value, 6.656888961791992)\n assert_allclose(p[\"lat_0\"].value, -0.267688125371933)\n assert_allclose(p[\"sigma\"].value, 0.17)\n\n @staticmethod\n def test_sky_model_shell(cat):\n model = cat[\"Vela Junior\"].sky_model()\n p = model.parameters\n assert_allclose(p[\"amplitude\"].value, 3.2163001428830995e-11)\n assert_allclose(p[\"lon_0\"].value, 266.2873840332031)\n assert_allclose(p[\"lat_0\"].value, -1.243260383605957)\n assert_allclose(p[\"radius\"].value, 0.95)\n assert_allclose(p[\"width\"].value, 0.05)\n\n\n@requires_data(\"gammapy-data\")\nclass TestSourceCatalogObjectHGPSComponent:\n @pytest.fixture(scope=\"class\")\n def component(self, cat):\n return cat.gaussian_component(83)\n\n @staticmethod\n def test_repr(component):\n assert \"SourceCatalogObjectHGPSComponent\" in repr(component)\n\n @staticmethod\n def test_str(component):\n assert \"Component HGPSC 084\" in str(component)\n\n @staticmethod\n def test_name(component):\n assert component.name == \"HGPSC 084\"\n\n @staticmethod\n def test_index(component):\n assert component.index == 83\n\n @staticmethod\n def test_spatial_model(component):\n model = component.spatial_model\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 28.77037811279297)\n assert_allclose(p.error(\"lon_0\"), 0.058748625218868256)\n assert_allclose(p[\"lat_0\"].value, -0.0727819949388504)\n assert_allclose(p.error(\"lat_0\"), 0.06880396604537964)\n assert_allclose(p[\"sigma\"].value, 0.2294706553220749)\n assert_allclose(p.error(\"sigma\"), 0.04618723690509796)\n\n\nclass TestSourceCatalogLargeScaleHGPS:\n def setup(self):\n table = Table()\n table[\"GLON\"] = [-30, -10, 10, 20] * u.deg\n table[\"Surface_Brightness\"] = [0, 1, 10, 0] * u.Unit(\"cm-2 s-1 sr-1\")\n table[\"GLAT\"] = [-1, 0, 1, 0] * u.deg\n table[\"Width\"] = [0.4, 0.5, 0.3, 1.0] * u.deg\n self.table = table\n self.model = SourceCatalogLargeScaleHGPS(table)\n\n def test_evaluate(self):\n x = np.linspace(-100, 20, 5)\n y = np.linspace(-2, 2, 7)\n x, y = np.meshgrid(x, y)\n coords = SkyCoord(x, y, unit=\"deg\", frame=\"galactic\")\n image = self.model.evaluate(coords)\n desired = 1.223962643740966 * u.Unit(\"cm-2 s-1 sr-1\")\n assert_quantity_allclose(image.sum(), desired)\n\n def test_parvals(self):\n glon = Angle(10, unit=\"deg\")\n assert_quantity_allclose(\n self.model.peak_brightness(glon), 10 * u.Unit(\"cm-2 s-1 sr-1\")\n )\n assert_quantity_allclose(self.model.peak_latitude(glon), 1 * u.deg)\n assert_quantity_allclose(self.model.width(glon), 0.3 * u.deg)\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom numpy.testing import assert_allclose\nfrom astropy.time import TimeDelta, Time\nfrom ..time import (\n time_ref_from_dict,\n time_ref_to_dict,\n time_relative_to_ref,\n absolute_time,\n)\n\n\ndef test_time_ref_from_dict():\n d = dict(MJDREFI=51910, MJDREFF=0.00074287036841269583)\n mjd = d[\"MJDREFF\"] + d[\"MJDREFI\"]\n\n time = time_ref_from_dict(d)\n assert time.format == \"mjd\"\n assert time.scale == \"tt\"\n assert_allclose(time.mjd, mjd)\n\n\ndef test_time_ref_to_dict():\n time = Time(\"2001-01-01T00:00:00\")\n\n d = time_ref_to_dict(time)\n\n assert set(d) == {\"MJDREFI\", \"MJDREFF\", \"TIMESYS\"}\n assert d[\"MJDREFI\"] == 51910\n assert_allclose(d[\"MJDREFF\"], 0.00074287036841269583)\n assert d[\"TIMESYS\"] == \"tt\"\n\n\ndef test_time_relative_to_ref():\n time_ref_dict = dict(MJDREFI=500, MJDREFF=0.5)\n time_ref = time_ref_from_dict(time_ref_dict)\n delta_time_1sec = TimeDelta(1.0, format=\"sec\")\n time = time_ref + delta_time_1sec\n\n delta_time = time_relative_to_ref(time, time_ref_dict)\n\n assert_allclose(delta_time.sec, delta_time_1sec.sec)\n\n\ndef test_absolute_time():\n time_ref_dict = dict(MJDREFI=51000, MJDREFF=0.5)\n time_ref = time_ref_from_dict(time_ref_dict)\n delta_time_1sec = TimeDelta(1.0, format=\"sec\")\n time = time_ref + delta_time_1sec\n\n abs_time = absolute_time(delta_time_1sec, time_ref_dict)\n\n assert abs_time.value == time.utc.isot\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport astropy.units as u\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom ...utils.testing import requires_data, requires_dependency\nfrom ...utils.random import get_random_state\nfrom ...irf import EffectiveAreaTable\nfrom ...spectrum import (\n PHACountsSpectrum,\n SpectrumObservationList,\n SpectrumObservation,\n SpectrumFit,\n SpectrumFitResult,\n models,\n)\n\n\n@requires_dependency(\"sherpa\")\nclass TestFit:\n \"\"\"Test fit on counts spectra without any IRFs\"\"\"\n\n def setup(self):\n self.nbins = 30\n binning = np.logspace(-1, 1, self.nbins + 1) * u.TeV\n self.source_model = models.PowerLaw(\n index=2, amplitude=1e5 / u.TeV, reference=0.1 * u.TeV\n )\n self.bkg_model = models.PowerLaw(\n index=3, amplitude=1e4 / u.TeV, reference=0.1 * u.TeV\n )\n\n self.alpha = 0.1\n random_state = get_random_state(23)\n npred = self.source_model.integral(binning[:-1], binning[1:])\n source_counts = random_state.poisson(npred)\n self.src = PHACountsSpectrum(\n energy_lo=binning[:-1],\n energy_hi=binning[1:],\n data=source_counts,\n backscal=1,\n )\n # Currently it's necessary to specify a lifetime\n self.src.livetime = 1 * u.s\n\n npred_bkg = self.bkg_model.integral(binning[:-1], binning[1:])\n\n bkg_counts = random_state.poisson(npred_bkg)\n off_counts = random_state.poisson(npred_bkg * 1.0 / self.alpha)\n self.bkg = PHACountsSpectrum(\n energy_lo=binning[:-1], energy_hi=binning[1:], data=bkg_counts\n )\n self.off = PHACountsSpectrum(\n energy_lo=binning[:-1],\n energy_hi=binning[1:],\n data=off_counts,\n backscal=1.0 / self.alpha,\n )\n\n def test_cash(self):\n \"\"\"Simple CASH fit to the on vector\"\"\"\n obs = SpectrumObservation(on_vector=self.src)\n obs_list = SpectrumObservationList([obs])\n\n fit = SpectrumFit(\n obs_list=obs_list,\n model=self.source_model,\n stat=\"cash\",\n forward_folded=False,\n )\n assert \"Spectrum\" in str(fit)\n\n fit.predict_counts()\n assert_allclose(fit.predicted_counts[0][5], 660.5171, rtol=1e-5)\n\n fit.calc_statval()\n assert_allclose(np.sum(fit.statval[0]), -107346.5291, rtol=1e-5)\n\n self.source_model.parameters[\"index\"].value = 1.12\n fit.run()\n # These values are check with sherpa fits, do not change\n pars = fit.result[0].model.parameters\n assert_allclose(pars[\"index\"].value, 1.995525, rtol=1e-3)\n assert_allclose(pars[\"amplitude\"].value, 100245.9, rtol=1e-3)\n\n def test_wstat(self):\n \"\"\"WStat with on source and background spectrum\"\"\"\n on_vector = self.src.copy()\n on_vector.data.data += self.bkg.data.data\n obs = SpectrumObservation(on_vector=on_vector, off_vector=self.off)\n obs_list = SpectrumObservationList([obs])\n\n self.source_model.parameters.index = 1.12\n fit = SpectrumFit(\n obs_list=obs_list,\n model=self.source_model,\n stat=\"wstat\",\n forward_folded=False,\n )\n fit.run()\n pars = fit.result[0].model.parameters\n assert_allclose(pars[\"index\"].value, 1.997342, rtol=1e-3)\n assert_allclose(pars[\"amplitude\"].value, 100245.187067, rtol=1e-3)\n assert_allclose(fit.result[0].statval, 30.022316, rtol=1e-3)\n\n def test_joint(self):\n \"\"\"Test joint fit for obs with different energy binning\"\"\"\n obs1 = SpectrumObservation(on_vector=self.src)\n src_rebinned = self.src.rebin(2)\n obs2 = SpectrumObservation(on_vector=src_rebinned)\n fit = SpectrumFit(\n obs_list=[obs1, obs2],\n stat=\"cash\",\n model=self.source_model,\n forward_folded=False,\n )\n fit.run()\n pars = fit.result[0].model.parameters\n assert_allclose(pars[\"index\"].value, 1.996456, rtol=1e-3)\n\n def test_fit_range(self):\n \"\"\"Test fit range without complication of thresholds\"\"\"\n obs = SpectrumObservation(on_vector=self.src)\n obs_list = SpectrumObservationList([obs])\n\n fit = SpectrumFit(\n obs_list=obs_list, model=self.source_model, stat=None, forward_folded=False\n )\n assert np.sum(fit._bins_in_fit_range[0]) == self.nbins\n assert_allclose(fit.true_fit_range[0][-1].value, 10)\n assert_allclose(fit.true_fit_range[0][0].value, 0.1)\n\n fit.fit_range = [200, 600] * u.GeV\n assert np.sum(fit._bins_in_fit_range[0]) == 6\n assert_allclose(fit.true_fit_range[0][0].value, 0.21544347, rtol=1e-5)\n assert_allclose(fit.true_fit_range[0][-1].value, 0.54117, rtol=1e-5)\n\n fit.fit_range = [0.11659144 + 1.0e-5, 1.0 - 1.0e-5] * u.TeV\n assert np.sum(fit._bins_in_fit_range[0]) == 14\n\n # Check different fit ranges for different observations\n on_vector2 = self.src.copy()\n obs2 = SpectrumObservation(on_vector=on_vector2)\n obs2.lo_threshold = 5 * u.TeV\n obs_list.append(obs2)\n fit = SpectrumFit(\n obs_list=obs_list, model=self.source_model, stat=None, forward_folded=False\n )\n fit.fit_range = [2, 8] * u.TeV\n assert_allclose(fit.true_fit_range[0][0].value, 2.15443, rtol=1e-3)\n assert_allclose(fit.true_fit_range[1][0].value, 5.41169, rtol=1e-3)\n\n def test_likelihood_profile(self):\n obs = SpectrumObservation(on_vector=self.src)\n fit = SpectrumFit(\n obs_list=obs, stat=\"cash\", model=self.source_model, forward_folded=False\n )\n result = fit.run()\n true_idx = result.parameters[\"index\"].value\n values = np.linspace(0.95 * true_idx, 1.05 * true_idx, 100)\n profile = fit.likelihood_profile(\"index\", values=values)\n actual = values[np.argmin(profile[\"likelihood\"])]\n assert_allclose(actual, true_idx, rtol=0.01)\n\n\n@requires_dependency(\"sherpa\")\n@requires_data(\"gammapy-data\")\nclass TestSpectralFit:\n \"\"\"Test fit in astrophysical scenario\"\"\"\n\n def setup(self):\n path = \"$GAMMAPY_DATA/joint-crab/spectra/hess/\"\n obs1 = SpectrumObservation.read(path + \"pha_obs23523.fits\")\n obs2 = SpectrumObservation.read(path + \"pha_obs23592.fits\")\n self.obs_list = SpectrumObservationList([obs1, obs2])\n\n self.pwl = models.PowerLaw(\n index=2, amplitude=1e-12 * u.Unit(\"cm-2 s-1 TeV-1\"), reference=1 * u.TeV\n )\n\n self.ecpl = models.ExponentialCutoffPowerLaw(\n index=2,\n amplitude=1e-12 * u.Unit(\"cm-2 s-1 TeV-1\"),\n reference=1 * u.TeV,\n lambda_=0.1 / u.TeV,\n )\n\n # Example fit for one observation\n self.fit = SpectrumFit(self.obs_list[0], self.pwl)\n\n @requires_dependency(\"iminuit\")\n def test_basic_results(self):\n self.fit.run()\n result = self.fit.result[0]\n pars = result.model.parameters\n\n assert self.pwl is self.fit._model\n assert self.fit._model is result.model\n assert self.pwl is result.model\n\n assert_allclose(result.statval, 38.343, rtol=1e-3)\n assert_allclose(pars[\"index\"].value, 2.817, rtol=1e-3)\n assert pars[\"amplitude\"].unit == \"cm-2 s-1 TeV-1\"\n assert_allclose(pars[\"amplitude\"].value, 5.142e-11, rtol=1e-3)\n assert_allclose(result.npred[60], 0.6102, rtol=1e-3)\n self.fit.result[0].to_table()\n\n def test_basic_errors(self):\n self.fit.run()\n result = self.fit.result[0]\n pars = result.model.parameters\n assert_allclose(pars.error(\"index\"), 0.1496, rtol=1e-3)\n assert_allclose(pars.error(\"amplitude\"), 6.423e-12, rtol=1e-3)\n self.fit.result[0].to_table()\n\n def test_compound(self):\n model = self.pwl * 2\n fit = SpectrumFit(self.obs_list[0], model)\n fit.run()\n result = fit.result[0]\n pars = result.model.parameters\n assert_allclose(pars[\"index\"].value, 2.8166, rtol=1e-3)\n p = pars[\"amplitude\"]\n assert p.unit == \"cm-2 s-1 TeV-1\"\n assert_allclose(p.value, 5.0714e-12, rtol=1e-3)\n\n def test_npred(self):\n self.fit.run()\n actual = (\n self.fit.obs_list[0]\n .predicted_counts(self.fit.result[0].model)\n .data.data.value\n )\n desired = self.fit.result[0].npred\n assert_allclose(actual, desired)\n\n def test_stats(self):\n self.fit.run()\n stats = self.fit.result[0].stat_per_bin\n actual = np.sum(stats)\n desired = self.fit.result[0].statval\n assert_allclose(actual, desired)\n\n def test_fit_range(self):\n # Fit range not restriced fit range should be the thresholds\n obs = self.fit.obs_list[0]\n desired = obs.on_vector.lo_threshold\n actual = self.fit.true_fit_range[0][0]\n assert actual.unit == \"keV\"\n assert_allclose(actual.value, desired.value)\n\n # Restrict fit range\n fit_range = [4, 20] * u.TeV\n self.fit.fit_range = fit_range\n\n range_bin = obs.on_vector.energy.find_node(fit_range[1])\n desired = obs.on_vector.energy.lo[range_bin]\n actual = self.fit.true_fit_range[0][1]\n assert_allclose(actual.value, desired.value)\n\n # Make sure fit range is not extended below threshold\n fit_range = [0.001, 10] * u.TeV\n self.fit.fit_range = fit_range\n desired = obs.on_vector.lo_threshold\n actual = self.fit.true_fit_range[0][0]\n assert_allclose(actual.value, desired.value)\n\n def test_no_edisp(self):\n obs = self.obs_list[0]\n # Bring aeff in RECO space\n data = obs.aeff.data.evaluate(energy=obs.on_vector.energy.nodes)\n obs.aeff = EffectiveAreaTable(\n data=data,\n energy_lo=obs.on_vector.energy.lo,\n energy_hi=obs.on_vector.energy.hi,\n )\n obs.edisp = None\n fit = SpectrumFit(obs_list=obs, model=self.pwl)\n fit.run()\n assert_allclose(\n fit.result[0].model.parameters[\"index\"].value, 2.7961, atol=0.02\n )\n\n def test_ecpl_fit(self):\n fit = SpectrumFit(self.obs_list[0], self.ecpl)\n fit.run()\n actual = fit.result[0].model.parameters[\"lambda_\"].quantity\n assert actual.unit == \"TeV-1\"\n assert_allclose(actual.value, 0.145215, rtol=1e-2)\n\n def test_joint_fit(self):\n fit = SpectrumFit(self.obs_list, self.pwl)\n fit.run()\n actual = fit.result[0].model.parameters[\"index\"].value\n assert_allclose(actual, 2.7806, rtol=1e-3)\n\n actual = fit.result[0].model.parameters[\"amplitude\"].quantity\n assert actual.unit == \"cm-2 s-1 TeV-1\"\n assert_allclose(actual.value, 5.200e-11, rtol=1e-3)\n\n def test_stacked_fit(self):\n stacked_obs = self.obs_list.stack()\n obs_list = SpectrumObservationList([stacked_obs])\n fit = SpectrumFit(obs_list, self.pwl)\n fit.run()\n pars = fit.result[0].model.parameters\n assert_allclose(pars[\"index\"].value, 2.7767, rtol=1e-3)\n assert u.Unit(pars[\"amplitude\"].unit) == \"cm-2 s-1 TeV-1\"\n assert_allclose(pars[\"amplitude\"].value, 5.191e-11, rtol=1e-3)\n\n def test_run(self, tmpdir):\n fit = SpectrumFit(self.obs_list, self.pwl)\n fit.run()\n\n result = fit.result[0]\n modelname = result.model.__class__.__name__\n filename = tmpdir / \"fit_result_{}.yaml\".format(modelname)\n result.to_yaml(filename)\n\n read_result = SpectrumFitResult.from_yaml(tmpdir / \"fit_result_PowerLaw.yaml\")\n\n desired = fit.result[0].model.evaluate_error(1 * u.TeV)\n actual = read_result.model.evaluate_error(1 * u.TeV)\n assert actual.unit == desired.unit\n assert_allclose(actual.value, desired.value)\n\n def test_sherpa_fit(self, tmpdir):\n # this is to make sure that the written PHA files work with sherpa\n import sherpa.astro.ui as sau\n from sherpa.models import PowLaw1D\n\n # TODO: this works a little bit, but some info and warnings\n # from Sherpa remain. Not sure what to do, OK as-is for now.\n import logging\n\n logging.getLogger(\"sherpa\").setLevel(\"ERROR\")\n\n self.obs_list.write(tmpdir, use_sherpa=True)\n filename = tmpdir / \"pha_obs23523.fits\"\n sau.load_pha(str(filename))\n sau.set_stat(\"wstat\")\n model = PowLaw1D(\"powlaw1d.default\")\n model.ref = 1e9\n model.ampl = 1\n model.gamma = 2\n sau.set_model(model * 1e-20)\n sau.fit()\n assert_allclose(model.pars[0].val, 2.732, rtol=1e-3)\n assert_allclose(model.pars[2].val, 4.647, rtol=1e-3)\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport pytest\nfrom numpy.testing import assert_allclose\nfrom astropy.time import Time\nfrom ...utils.testing import requires_data, assert_time_allclose\nfrom ...data import GTI\n\n\n@requires_data(\"gammapy-data\")\ndef test_gti_hess():\n filename = \"$GAMMAPY_DATA/tests/unbundled/hess/run_0023037_hard_eventlist.fits.gz\"\n gti = GTI.read(filename)\n assert \"GTI\" in str(gti)\n assert len(gti.table) == 1\n\n assert gti.time_delta[0].unit == \"s\"\n assert_allclose(gti.time_delta[0].value, 1568.00000)\n assert_allclose(gti.time_sum.value, 1568.00000)\n\n expected = Time(53292.00592592593, format=\"mjd\", scale=\"tt\")\n assert_time_allclose(gti.time_start[0], expected)\n\n expected = Time(53292.02407407408, format=\"mjd\", scale=\"tt\")\n assert_time_allclose(gti.time_stop[0], expected)\n\n\n@requires_data(\"gammapy-data\")\ndef test_gti_fermi():\n filename = \"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-events.fits.gz\"\n gti = GTI.read(filename)\n assert \"GTI\" in str(gti)\n assert len(gti.table) == 39042\n\n assert gti.time_delta[0].unit == \"s\"\n assert_allclose(gti.time_delta[0].value, 651.598893)\n assert_allclose(gti.time_sum.value, 1.831396e08)\n\n expected = Time(54682.65603794185, format=\"mjd\", scale=\"tt\")\n assert_time_allclose(gti.time_start[0], expected)\n\n expected = Time(54682.66357959571, format=\"mjd\", scale=\"tt\")\n assert_time_allclose(gti.time_stop[0], expected)\n\n\n@requires_data(\"gammapy-data\")\[email protected](\n \"time_interval, expected_length, expected_times\",\n [\n (\n Time(\n [\"2008-08-04T16:21:00\", \"2008-08-04T19:10:00\"],\n format=\"isot\",\n scale=\"tt\",\n ),\n 2,\n Time(\n [\"2008-08-04T16:21:00\", \"2008-08-04T19:10:00\"],\n format=\"isot\",\n scale=\"tt\",\n ),\n ),\n (\n Time([54682.68125, 54682.79861111], format=\"mjd\", scale=\"tt\"),\n 2,\n Time([54682.68125, 54682.79861111], format=\"mjd\", scale=\"tt\"),\n ),\n (\n Time([10.0, 100000.0], format=\"mjd\", scale=\"tt\"),\n 39042,\n Time([54682.65603794185, 57236.96833546296], format=\"mjd\", scale=\"tt\"),\n ),\n (Time([10.0, 20.0], format=\"mjd\", scale=\"tt\"), 0, None),\n ],\n)\ndef test_select_time(time_interval, expected_length, expected_times):\n filename = \"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-events.fits.gz\"\n gti = GTI.read(filename)\n\n gti_selected = gti.select_time(time_interval)\n\n assert len(gti_selected.table) == expected_length\n\n if expected_length != 0:\n expected_times.format = \"mjd\"\n assert_time_allclose(gti_selected.time_start[0], expected_times[0])\n assert_time_allclose(gti_selected.time_stop[-1], expected_times[1])\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport numpy as np\nfrom scipy.optimize import brentq\nfrom astropy.units import Quantity\n\n\n__all__ = [\n \"measure_containment_fraction\",\n \"measure_containment_radius\",\n \"measure_image_moments\",\n \"measure_containment\",\n \"measure_curve_of_growth\",\n]\n\n\ndef measure_image_moments(image):\n \"\"\"\n Compute 0th, 1st and 2nd moments of an image.\n\n NaN values are ignored in the computation.\n\n Parameters\n ----------\n image : `gammapy.maps.Map`\n Image to measure on.\n\n Returns\n -------\n image moments : list\n List of image moments:\n [A, x_cms, y_cms, x_sigma, y_sigma, sqrt(x_sigma * y_sigma)]\n \"\"\"\n data = image.quantity\n\n coords = image.geom.get_coord().skycoord\n x, y = coords.data.lon.wrap_at(\"180d\"), coords.data.lat\n\n A = data[np.isfinite(data)].sum()\n\n # Center of mass\n x_cms = (x * data)[np.isfinite(data)].sum() / A\n y_cms = (y * data)[np.isfinite(data)].sum() / A\n\n # Second moments\n x_var = ((x - x_cms) ** 2 * data)[np.isfinite(data)].sum() / A\n y_var = ((y - y_cms) ** 2 * data)[np.isfinite(data)].sum() / A\n x_sigma = np.sqrt(x_var)\n y_sigma = np.sqrt(y_var)\n\n return A, x_cms, y_cms, x_sigma, y_sigma, np.sqrt(x_sigma * y_sigma)\n\n\ndef measure_containment(image, position, radius):\n \"\"\"\n Measure containment in a given circle around the source position.\n\n Parameters\n ----------\n image :`gammapy.maps.Map`\n Image to measure on.\n position : `~astropy.coordinates.SkyCoord`\n Source position on the sky.\n radius : float\n Radius of the region to measure the containment in.\n \"\"\"\n coords = image.geom.get_coord()\n separation = coords.skycoord.separation(position)\n return measure_containment_fraction(image.quantity, radius, separation)\n\n\ndef measure_containment_radius(image, position, containment_fraction=0.8):\n \"\"\"\n Measure containment radius of a source.\n\n Uses `scipy.optimize.brentq`.\n\n Parameters\n ----------\n image :`gammapy.maps.Map`\n Image to measure on.\n position : `~astropy.coordinates.SkyCoord`\n Source position on the sky.\n containment_fraction : float (default 0.8)\n Containment fraction\n\n Returns\n -------\n containment_radius :\n Containment radius (pix)\n \"\"\"\n data = image.quantity\n coords = image.geom.get_coord()\n separation = coords.skycoord.separation(position)\n\n # Normalize image\n data = data / data[np.isfinite(data)].sum()\n\n def func(r):\n return (\n measure_containment_fraction(data, r, separation.value)\n - containment_fraction\n )\n\n containment_radius = brentq(func, a=0, b=separation.max().value)\n return Quantity(containment_radius, separation.unit)\n\n\ndef measure_containment_fraction(data, radius, separation):\n \"\"\"Measure containment fraction.\n\n Parameters\n ----------\n data :`~astropy.unit.Quantity`\n Image to measure on.\n radius : `~astropy.units.Quantity`\n Containment radius.\n separation : `~astropy.coordinates.Angle`\n Separation from the source position array.\n\n Returns\n -------\n containment_fraction : float\n Containment fraction\n \"\"\"\n # Set up indices and containment mask\n containment_mask = separation < radius\n mask = np.isfinite(data) & containment_mask\n containment_fraction = data[mask].sum()\n return containment_fraction\n\n\ndef measure_curve_of_growth(image, position, radius_max=None, radius_n=10):\n \"\"\"\n Measure the curve of growth for a given source position.\n\n The curve of growth is determined by measuring the flux in a circle around\n the source and radius of this circle is increased\n\n Parameters\n ----------\n image : `astropy.io.fits.ImageHDU`\n Image to measure on.\n position : `~astropy.coordinates.SkyCoord`\n Source position on the sky.\n radius_max : `~astropy.units.Quantity`\n Maximal radius, up to which the containment is measured (default 0.2 deg).\n radius_n : int\n Number of radius steps.\n\n Returns\n -------\n radii : `~astropy.units.Quantity`\n Radii where the containment was measured.\n containment : `~astropy.units.Quantity`\n Corresponding contained flux.\n \"\"\"\n radius_max = radius_max if radius_max is not None else Quantity(0.2, \"deg\")\n containment = []\n radii = Quantity(np.linspace(0, radius_max.value, radius_n), radius_max.unit)\n for radius in radii:\n containment.append(measure_containment(image, position, radius))\n return radii, Quantity(containment)\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom numpy.testing import assert_allclose\nfrom astropy.coordinates import Angle\nfrom ...data import observatory_locations\n\n\ndef test_observatory_locations():\n # Check one example\n location = observatory_locations[\"hess\"]\n assert_allclose(location.lon.deg, Angle(\"16d30m00.8s\").deg)\n assert_allclose(location.lat.deg, Angle(\"-23d16m18.4s\").deg)\n assert_allclose(location.height.value, 1835)\n assert str(location.height.unit) == \"m\"\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nimport numpy as np\nimport astropy.units as u\nfrom ..utils.energy import Energy\nfrom . import PSF3D, EnergyDependentTablePSF, IRFStacker, EffectiveAreaTable\n\n__all__ = [\"make_psf\", \"make_mean_psf\", \"make_mean_edisp\", \"apply_containment_fraction\"]\n\nlog = logging.getLogger(__name__)\n\n\ndef make_psf(observation, position, energy=None, rad=None):\n \"\"\"Make energy-dependent PSF for a given source position.\n\n Parameters\n ----------\n observation : `~gammapy.data.DataStoreObservation`\n Observation for which to compute the PSF\n position : `~astropy.coordinates.SkyCoord`\n Position at which to compute the PSF\n energy : `~astropy.units.Quantity`\n 1-dim energy array for the output PSF.\n If none is given, the energy array of the PSF from the observation is used.\n rad : `~astropy.coordinates.Angle`\n 1-dim offset wrt source position array for the output PSF.\n If none is given, the offset array of the PSF from the observation is used.\n\n Returns\n -------\n psf : `~gammapy.irf.EnergyDependentTablePSF`\n Energy dependent psf table\n \"\"\"\n offset = position.separation(observation.pointing_radec)\n\n if energy is None:\n energy = observation.psf.to_energy_dependent_table_psf(theta=offset).energy\n\n if rad is None:\n rad = observation.psf.to_energy_dependent_table_psf(theta=offset).rad\n\n psf_value = observation.psf.to_energy_dependent_table_psf(\n theta=offset, rad=rad\n ).evaluate(energy)\n\n arf = observation.aeff.data.evaluate(offset=offset, energy=energy)\n exposure = arf * observation.observation_live_time_duration\n\n psf = EnergyDependentTablePSF(\n energy=energy, rad=rad, exposure=exposure, psf_value=psf_value\n )\n return psf\n\n\ndef make_mean_psf(observations, position, energy=None, rad=None):\n \"\"\"Compute mean energy-dependent PSF.\n\n Parameters\n ----------\n observations : `~gammapy.data.Observations`\n Observations for which to compute the PSF\n position : `~astropy.coordinates.SkyCoord`\n Position at which to compute the PSF\n energy : `~astropy.units.Quantity`\n 1-dim energy array for the output PSF.\n If none is given, the energy array of the PSF from the first\n observation is used.\n rad : `~astropy.coordinates.Angle`\n 1-dim offset wrt source position array for the output PSF.\n If none is given, the energy array of the PSF from the first\n observation is used.\n\n Returns\n -------\n psf : `~gammapy.irf.EnergyDependentTablePSF`\n Mean PSF\n \"\"\"\n for idx, observation in enumerate(observations):\n psf = make_psf(observation, position, energy, rad)\n if idx == 0:\n stacked_psf = psf\n else:\n stacked_psf = stacked_psf.stack(psf)\n return stacked_psf\n\n\ndef make_mean_edisp(\n observations,\n position,\n e_true,\n e_reco,\n low_reco_threshold=Energy(0.002, \"TeV\"),\n high_reco_threshold=Energy(150, \"TeV\"),\n):\n \"\"\"Compute mean energy dispersion.\n\n Compute the mean edisp of a set of observations j at a given position\n\n The stacking is implemented in :func:`~gammapy.irf.IRFStacker.stack_edisp`\n\n Parameters\n ----------\n observations : `~gammapy.data.Observations`\n Observations for which to compute the EDISP\n position : `~astropy.coordinates.SkyCoord`\n Position at which to compute the EDISP\n e_true : `~gammapy.utils.energy.EnergyBounds`\n True energy axis\n e_reco : `~gammapy.utils.energy.EnergyBounds`\n Reconstructed energy axis\n low_reco_threshold : `~gammapy.utils.energy.Energy`\n low energy threshold in reco energy, default 0.002 TeV\n high_reco_threshold : `~gammapy.utils.energy.Energy`\n high energy threshold in reco energy , default 150 TeV\n\n Returns\n -------\n stacked_edisp : `~gammapy.irf.EnergyDispersion`\n Stacked EDISP for a set of observation\n \"\"\"\n list_aeff = []\n list_edisp = []\n list_livetime = []\n list_low_threshold = [low_reco_threshold] * len(observations)\n list_high_threshold = [high_reco_threshold] * len(observations)\n\n for obs in observations:\n offset = position.separation(obs.pointing_radec)\n list_aeff.append(obs.aeff.to_effective_area_table(offset, energy=e_true))\n list_edisp.append(\n obs.edisp.to_energy_dispersion(offset, e_reco=e_reco, e_true=e_true)\n )\n list_livetime.append(obs.observation_live_time_duration)\n\n irf_stack = IRFStacker(\n list_aeff=list_aeff,\n list_edisp=list_edisp,\n list_livetime=list_livetime,\n list_low_threshold=list_low_threshold,\n list_high_threshold=list_high_threshold,\n )\n irf_stack.stack_edisp()\n\n return irf_stack.stacked_edisp\n\n\ndef apply_containment_fraction(aeff, psf, radius):\n \"\"\" Estimate PSF containment inside a given radius and correct effective area for leaking flux.\n\n The PSF and effective area must have the same binning in energy.\n\n Parameters\n ----------\n aeff : `~gammapy.irf.EffectiveAreaTable`\n the input 1D effective area\n psf : `~gammapy.irf.EnergyDependentTablePSF`\n the input 1D PSF\n radius : `~astropy.coordinates.Angle`\n the maximum angle\n\n Returns\n -------\n correct_aeff : `~gammapy.irf.EffectiveAreaTable`\n the output corrected 1D effective area\n \"\"\"\n center_energies = aeff.energy.nodes\n\n containment = psf.containment(center_energies, radius)\n\n corrected_aeff = EffectiveAreaTable(\n aeff.energy.lo,\n aeff.energy.hi,\n data=aeff.data.data * np.squeeze(containment),\n meta=aeff.meta,\n )\n return corrected_aeff\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom scipy.interpolate import interp1d\nfrom astropy.version import version as astropy_version\nfrom astropy.utils import lazyproperty\nfrom astropy.units import Quantity\nfrom astropy.table import Table\nfrom astropy.coordinates import SkyCoord, AltAz, CartesianRepresentation\nfrom ..utils.scripts import make_path\nfrom ..utils.time import time_ref_from_dict\nfrom ..utils.fits import earth_location_from_dict\n\n__all__ = [\"PointingInfo\"]\n\n\nclass PointingInfo:\n \"\"\"IACT array pointing info.\n\n Data format specification: :ref:`gadf:iact-pnt`\n\n Parameters\n ----------\n table : `~astropy.table.Table`\n Table (with meta header info) on pointing\n\n Examples\n --------\n >>> from gammapy.data import PointingInfo\n >>> pointing_info = PointingInfo.read('$GAMMAPY_DATA/tests/hess_event_list.fits')\n >>> print(pointing_info)\n \"\"\"\n\n def __init__(self, table):\n self.table = table\n\n @classmethod\n def read(cls, filename, hdu=\"POINTING\"):\n \"\"\"Read `PointingInfo` table from file.\n\n Parameters\n ----------\n filename : str\n File name\n hdu : int or str\n HDU number or name\n\n Returns\n -------\n pointing_info : `PointingInfo`\n Pointing info object\n \"\"\"\n filename = make_path(filename)\n table = Table.read(str(filename), hdu=hdu)\n return cls(table=table)\n\n def __str__(self):\n ss = \"Pointing info:\\n\\n\"\n ss += \"Location: {}\\n\".format(self.location.geodetic)\n m = self.table.meta\n ss += \"MJDREFI, MJDREFF, TIMESYS = {}\\n\".format(\n (m[\"MJDREFI\"], m[\"MJDREFF\"], m[\"TIMESYS\"])\n )\n ss += \"Time ref: {}\\n\".format(self.time_ref.fits)\n ss += \"Time ref: {} MJD (TT)\\n\".format(self.time_ref.mjd)\n sec = self.duration.to(\"second\").value\n hour = self.duration.to(\"hour\").value\n ss += \"Duration: {} sec = {} hours\\n\".format(sec, hour)\n ss += \"Table length: {}\\n\".format(len(self.table))\n\n ss += \"\\nSTART:\\n\" + self._str_for_index(0) + \"\\n\"\n ss += \"\\nEND:\\n\" + self._str_for_index(-1) + \"\\n\"\n\n return ss\n\n def _str_for_index(self, idx):\n \"\"\"Information for one point in the pointing table.\"\"\"\n ss = \"Time: {}\\n\".format(self.time[idx].fits)\n ss += \"Time: {} MJD (TT)\\n\".format(self.time[idx].mjd)\n ss += \"RADEC: {} deg\\n\".format(self.radec[idx].to_string())\n ss += \"ALTAZ: {} deg\\n\".format(self.altaz[idx].to_string())\n return ss\n\n @lazyproperty\n def location(self):\n \"\"\"Observatory location (`~astropy.coordinates.EarthLocation`).\"\"\"\n return earth_location_from_dict(self.table.meta)\n\n @lazyproperty\n def time_ref(self):\n \"\"\"Time reference (`~astropy.time.Time`)\"\"\"\n return time_ref_from_dict(self.table.meta)\n\n @lazyproperty\n def duration(self):\n \"\"\"Pointing table duration (`~astropy.time.TimeDelta`).\n\n The time difference between the first and last entry.\n \"\"\"\n return self.time[-1] - self.time[0]\n\n @lazyproperty\n def time(self):\n \"\"\"Time array (`~astropy.time.Time`)\"\"\"\n met = Quantity(self.table[\"TIME\"].astype(\"float64\"), \"second\")\n time = self.time_ref + met\n return time.tt\n\n @lazyproperty\n def radec(self):\n \"\"\"RA / DEC position from table (`~astropy.coordinates.SkyCoord`)\"\"\"\n lon = self.table[\"RA_PNT\"]\n lat = self.table[\"DEC_PNT\"]\n return SkyCoord(lon, lat, unit=\"deg\", frame=\"icrs\")\n\n @lazyproperty\n def altaz_frame(self):\n \"\"\"ALT / AZ frame (`~astropy.coordinates.AltAz`).\"\"\"\n return AltAz(obstime=self.time, location=self.location)\n\n @lazyproperty\n def altaz(self):\n \"\"\"ALT / AZ position computed from RA / DEC (`~astropy.coordinates.SkyCoord`)\"\"\"\n return self.radec.transform_to(self.altaz_frame)\n\n @lazyproperty\n def altaz_from_table(self):\n \"\"\"ALT / AZ position from table (`~astropy.coordinates.SkyCoord`)\"\"\"\n lon = self.table[\"AZ_PNT\"]\n lat = self.table[\"ALT_PNT\"]\n return SkyCoord(lon, lat, unit=\"deg\", frame=self.altaz_frame)\n\n def altaz_interpolate(self, time):\n \"\"\"Interpolate pointing for a given time.\"\"\"\n t_new = time.mjd\n t = self.time.mjd\n xyz = self.altaz.cartesian\n x_new = interp1d(t, xyz.x)(t_new)\n y_new = interp1d(t, xyz.y)(t_new)\n z_new = interp1d(t, xyz.z)(t_new)\n xyz_new = CartesianRepresentation(x_new, y_new, z_new)\n altaz_frame = AltAz(obstime=time, location=self.location)\n\n # FIXME: an API change in Astropy in 3.1 broke this\n # See https://github.com/gammapy/gammapy/pull/1906\n if astropy_version >= \"3.1\":\n kwargs = {\"representation_type\": \"unitspherical\"}\n else:\n kwargs = {\"representation\": \"unitspherical\"}\n\n return SkyCoord(xyz_new, frame=altaz_frame, unit=\"deg\", **kwargs)\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport pytest\nimport numpy as np\nfrom astropy.coordinates import SkyCoord, Angle\nfrom astropy.time import Time\nfrom astropy.units import Quantity\nfrom astropy import units as u\nfrom regions import CircleSkyRegion\nfrom ...data import DataStore, ObservationFilter, EventListBase, GTI\nfrom ...utils.testing import requires_data, assert_time_allclose\n\n\ndef test_event_filter_types():\n for method_str in ObservationFilter.EVENT_FILTER_TYPES.values():\n assert hasattr(EventListBase, method_str)\n\n\[email protected](scope=\"session\")\ndef observation():\n ds = DataStore.from_dir(\"$GAMMAPY_DATA/hess-dl3-dr1/\")\n return ds.obs(20136)\n\n\n@requires_data(\"gammapy-data\")\ndef test_empty_observation_filter(observation):\n empty_obs_filter = ObservationFilter()\n\n events = observation.events\n filtered_events = empty_obs_filter.filter_events(events)\n assert filtered_events == events\n\n gti = observation.gti\n filtered_gti = empty_obs_filter.filter_gti(gti)\n assert filtered_gti == gti\n\n\n@requires_data(\"gammapy-data\")\ndef test_filter_events(observation):\n custom_filter = {\n \"type\": \"custom\",\n \"opts\": {\"parameter\": \"ENERGY\", \"band\": Quantity([0.8 * u.TeV, 10.0 * u.TeV])},\n }\n\n target_position = SkyCoord(ra=229.2, dec=-58.3, unit=\"deg\", frame=\"icrs\")\n region_radius = Angle(\"0.2 deg\")\n region = CircleSkyRegion(center=target_position, radius=region_radius)\n circular_region_filter = {\"type\": \"circular_region\", \"opts\": {\"region\": region}}\n\n time_filter = Time([53090.12, 53090.13], format=\"mjd\", scale=\"tt\")\n\n obs_filter = ObservationFilter(\n event_filters=[custom_filter, circular_region_filter], time_filter=time_filter\n )\n\n events = observation.events\n filtered_events = obs_filter.filter_events(events)\n\n assert np.all(\n (filtered_events.energy >= 0.8 * u.TeV)\n & (filtered_events.energy < 10.0 * u.TeV)\n )\n assert np.all(\n (filtered_events.time >= time_filter[0])\n & (filtered_events.time < time_filter[1])\n )\n assert np.all(region.center.separation(filtered_events.radec) < region_radius)\n\n\n@requires_data(\"gammapy-data\")\ndef test_filter_gti(observation):\n time_filter = Time([53090.125, 53090.130], format=\"mjd\", scale=\"tt\")\n\n obs_filter = ObservationFilter(time_filter=time_filter)\n\n gti = observation.gti\n filtered_gti = obs_filter.filter_gti(gti)\n\n assert isinstance(filtered_gti, GTI)\n assert_time_allclose(filtered_gti.time_start, time_filter[0])\n assert_time_allclose(filtered_gti.time_stop, time_filter[1])\n" ]
[ [ "numpy.testing.assert_allclose" ], [ "numpy.meshgrid", "numpy.linspace", "numpy.testing.assert_allclose" ], [ "numpy.testing.assert_allclose" ], [ "numpy.linspace", "numpy.logspace", "numpy.argmin", "numpy.testing.assert_allclose", "numpy.sum" ], [ "numpy.testing.assert_allclose" ], [ "numpy.sqrt", "numpy.isfinite", "numpy.linspace" ], [ "numpy.testing.assert_allclose" ], [ "numpy.squeeze" ], [ "scipy.interpolate.interp1d" ], [ "numpy.all" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mbilos/stribor
[ "76082c255653d6bd8d506519223183e5d8395578", "76082c255653d6bd8d506519223183e5d8395578", "76082c255653d6bd8d506519223183e5d8395578" ]
[ "stribor/flow.py", "stribor/flows/cumsum.py", "stribor/net/mlp.py" ]
[ "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.distributions as td\r\n\r\nclass Flow(nn.Module):\r\n \"\"\"\r\n Building both normalizing flows and neural flows.\r\n\r\n Example:\r\n >>> import stribor as st\r\n >>> torch.manual_seed(123)\r\n >>> dim = 2\r\n >>> flow = st.Flow(st.UnitNormal(dim), [st.Affine(dim)])\r\n >>> x = torch.rand(1, dim)\r\n >>> y, ljd = flow(x)\r\n >>> y_inv, ljd_inv = flow.inverse(y)\r\n\r\n Args:\r\n base_dist (Type[torch.distributions]): Base distribution\r\n transforms (List[st.flows]): List of invertible transformations\r\n \"\"\"\r\n def __init__(self, base_dist=None, transforms=[]):\r\n super().__init__()\r\n self.base_dist = base_dist\r\n self.transforms = nn.ModuleList(transforms)\r\n\r\n def forward(self, x, latent=None, mask=None, t=None, reverse=False, **kwargs):\r\n \"\"\"\r\n Args:\r\n x (tensor): Input sampled from base density with shape (..., dim)\r\n latent (tensor, optional): Conditional vector with shape (..., latent_dim)\r\n Default: None\r\n mask (tensor): Masking tensor with shape (..., 1)\r\n Default: None\r\n t (tensor, optional): Flow time end point. Default: None\r\n reverse (bool, optional): Whether to perform an inverse. Default: False\r\n\r\n Returns:\r\n y (tensor): Output that follows target density (..., dim)\r\n log_jac_diag (tensor): Log-Jacobian diagonal (..., dim)\r\n \"\"\"\r\n transforms = self.transforms[::-1] if reverse else self.transforms\r\n _mask = 1 if mask is None else mask\r\n\r\n log_jac_diag = torch.zeros_like(x).to(x)\r\n for f in transforms:\r\n if reverse:\r\n x, ld = f.inverse(x * _mask, latent=latent, mask=mask, t=t, **kwargs)\r\n else:\r\n x, ld = f.forward(x * _mask, latent=latent, mask=mask, t=t, **kwargs)\r\n log_jac_diag += ld * _mask\r\n return x, log_jac_diag\r\n\r\n def inverse(self, y, latent=None, mask=None, t=None, **kwargs):\r\n \"\"\" Inverse of forward function with the same arguments. \"\"\"\r\n return self.forward(y, latent=latent, mask=mask, t=t, reverse=True, **kwargs)\r\n\r\n def log_prob(self, x, **kwargs):\r\n \"\"\"\r\n Calculates log-probability of a sample.\r\n\r\n Args:\r\n x (tensor): Input with shape (..., dim)\r\n\r\n Returns:\r\n log_prob (tensor): Log-probability of the input with shape (..., 1)\r\n \"\"\"\r\n if self.base_dist is None:\r\n raise ValueError('Please define `base_dist` if you need log-probability')\r\n x, log_jac_diag = self.inverse(x, **kwargs)\r\n log_prob = self.base_dist.log_prob(x) + log_jac_diag.sum(-1)\r\n return log_prob.unsqueeze(-1)\r\n\r\n def sample(self, num_samples, latent=None, mask=None, **kwargs):\r\n \"\"\"\r\n Transforms samples from the base to the target distribution.\r\n Uses reparametrization trick.\r\n\r\n Args:\r\n num_samples (tuple or int): Shape of samples\r\n latent (tensor): Latent conditioning vector with shape (..., latent_dim)\r\n\r\n Returns:\r\n x (tensor): Samples from target distribution with shape (*num_samples, dim)\r\n \"\"\"\r\n if self.base_dist is None:\r\n raise ValueError('Please define `base_dist` if you need sampling')\r\n if isinstance(num_samples, int):\r\n num_samples = (num_samples,)\r\n\r\n x = self.base_dist.rsample(num_samples)\r\n x, log_jac_diag = self.forward(x, **kwargs)\r\n return x\r\n", "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef diff(x, dim=-1):\n \"\"\"\n Inverse of x.cumsum(dim=dim).\n Compute differences between subsequent elements of the tensor.\n Only works on dims -1 and -2.\n\n Args:\n x (tensor): Input of arbitrary shape\n Returns:\n diff (tensor): Result with the same shape as x\n \"\"\"\n if dim == 1:\n if x.dim() == 2:\n dim = -1\n elif x.dim() == 3:\n dim = -2\n else:\n raise ValueError('If dim=1, tensor must have 2 or 3 dimensions')\n\n if dim == 2:\n if x.dim() == 3:\n dim = -1\n elif x.dim() == 4:\n dim = -2\n else:\n raise ValueError('If dim=2, tensor should have 3 or 4 dimensions')\n\n if dim == -1:\n return x - F.pad(x, (1, 0))[..., :-1]\n elif dim == -2:\n return x - F.pad(x, (0, 0, 1, 0))[..., :-1, :]\n else:\n raise ValueError(\"dim must be equal to -1 or -2\")\n\n\nclass Cumsum(nn.Module):\n \"\"\"\n Compute cumulative sum along the specified dimension of the tensor.\n\n Example:\n >>> f = stribor.Cumsum(-1)\n >>> f(torch.ones(1, 4))\n (tensor([[1., 2., 3., 4.]]), tensor([[0., 0., 0., 0.]]))\n\n Args:\n dim (int): Tensor dimension over which to perform the summation. Options: -1 or -2.\n \"\"\"\n def __init__(self, dim):\n super().__init__()\n assert dim in [-1, -2], '`dim` must be either `-1` or `-2`'\n self.dim = dim\n\n def forward(self, x, **kwargs):\n y = x.cumsum(self.dim)\n return y, torch.zeros_like(y)\n\n def inverse(self, y, **kwargs):\n x = diff(y, self.dim)\n return x, torch.zeros_like(x)\n\nclass Diff(nn.Module):\n \"\"\"\n Inverse of Cumsum transformation.\n\n Args:\n dim (int): Tensor dimension over which to perform the diff. Options: -1 or -2.\n \"\"\"\n def __init__(self, dim):\n super().__init__()\n self.base_flow = Cumsum(dim)\n\n def forward(self, x, **kwargs):\n return self.base_flow.inverse(x, **kwargs)\n\n def inverse(self, x, **kwargs):\n return self.base_flow.forward(x, **kwargs)\n\n\nclass CumsumColumn(nn.Module):\n \"\"\"\n Cumulative sum along the specific column in (..., M, N) matrix.\n\n Example:\n >>> f = stribor.CumsumColumn(1)\n >>> f(torch.ones(3, 3))[0]\n tensor([[1., 1., 1.],\n [1., 2., 1.],\n [1., 3., 1.]])\n\n Args:\n column (int): Column in the (batched) matrix (..., M, N) over which to\n perform the summation\n \"\"\"\n def __init__(self, column):\n super().__init__()\n self.column = column\n\n def forward(self, x, **kwargs):\n y = x.clone()\n y[..., self.column] = y[..., self.column].cumsum(-1)\n return y, torch.zeros_like(y)\n\n def inverse(self, y, **kwargs):\n x = y.clone()\n x[..., self.column] = diff(x[..., self.column], -1)\n return x, torch.zeros_like(x)\n\nclass DiffColumn(nn.Module):\n def __init__(self, column):\n super().__init__()\n self.base_flow = CumsumColumn(column)\n\n def forward(self, x, **kwargs):\n return self.base_flow.inverse(x, **kwargs)\n\n def inverse(self, x, **kwargs):\n return self.base_flow.forward(x, **kwargs)\n", "import torch\nimport torch.nn as nn\n\nclass MLP(nn.Module):\n \"\"\"\n Simple multi-layer neural network.\n\n Example:\n >>> torch.manual_seed(123)\n >>> net = stribor.net.MLP(2, [64, 64], 1)\n >>> net(torch.randn(1, 2))\n tensor([[-0.0132]], grad_fn=<AddmmBackward>)\n\n Args:\n in_dim (int): Input size\n hidden_dims (List[int]): Hidden dimensions\n out_dim (int): Output size\n activation (str, optional): Activation function from `torch.nn`.\n Default: 'Tanh'\n final_activation (str, optional): Last activation. Default: None\n wrapper_func (callable, optional): Wrapper function for `nn.Linear`,\n e.g. st.util.spectral_norm. Default: None\n \"\"\"\n def __init__(self, in_dim, hidden_dims, out_dim, activation='Tanh',\n final_activation=None, wrapper_func=None, **kwargs):\n super().__init__()\n\n if not wrapper_func:\n wrapper_func = lambda x: x\n\n hidden_dims = hidden_dims[:]\n hidden_dims.append(out_dim)\n layers = [nn.Linear(in_dim, hidden_dims[0])]\n\n for i in range(len(hidden_dims) - 1):\n layers.append(getattr(nn, activation)())\n layers.append(wrapper_func(nn.Linear(hidden_dims[i], hidden_dims[i+1])))\n layers[-1].bias.data.fill_(0.0)\n\n if final_activation is not None:\n layers.append(getattr(nn, final_activation)())\n\n self.net = nn.Sequential(*layers)\n\n def forward(self, x, **kwargs):\n \"\"\"\n Args:\n x (tensor): Input with shape (..., in_dim)\n\n Returns:\n y (tensor): Output with shape (..., out_dim)\n \"\"\"\n\n return self.net(x)\n" ]
[ [ "torch.nn.ModuleList", "torch.zeros_like" ], [ "torch.zeros_like", "torch.nn.functional.pad" ], [ "torch.nn.Linear", "torch.nn.Sequential" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xiaodashuaiya/fairseq
[ "9e3850bd87f4da751671d503406115730b99ea8a" ]
[ "fairseq/utils.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport contextlib\nimport copy\nimport importlib.util\nimport logging\nimport math\nimport os\nimport sys\nimport warnings\nfrom collections import defaultdict\nfrom itertools import accumulate\nfrom typing import Callable, Dict, List, Optional\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom fairseq.logging.meters import safe_round\nfrom fairseq.modules import gelu, gelu_accurate, sin, swish\nfrom fairseq.modules.multihead_attention import MultiheadAttention\nfrom torch import Tensor\n\ntry:\n from amp_C import multi_tensor_l2norm\n multi_tensor_l2norm_available = True\nexcept ImportError:\n multi_tensor_l2norm_available = False\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef split_paths(paths: str) -> List[str]:\n return paths.split(os.pathsep) if \"://\" not in paths else paths.split(\"|\")\n\n\ndef load_ensemble_for_inference(filenames, task, model_arg_overrides=None):\n from fairseq import checkpoint_utils\n\n deprecation_warning(\n \"utils.load_ensemble_for_inference is deprecated. \"\n \"Please use checkpoint_utils.load_model_ensemble instead.\"\n )\n return checkpoint_utils.load_model_ensemble(\n filenames, arg_overrides=model_arg_overrides, task=task\n )\n\n\ndef apply_to_sample(f, sample):\n if hasattr(sample, '__len__') and len(sample) == 0:\n return {}\n\n def _apply(x):\n if torch.is_tensor(x):\n return f(x)\n elif isinstance(x, dict):\n return {key: _apply(value) for key, value in x.items()}\n elif isinstance(x, list):\n return [_apply(x) for x in x]\n elif isinstance(x, tuple):\n return tuple(_apply(x) for x in x)\n elif isinstance(x, set):\n return {_apply(x) for x in x}\n else:\n return x\n\n return _apply(sample)\n\n\ndef move_to_cuda(sample):\n def _move_to_cuda(tensor):\n return tensor.cuda()\n\n return apply_to_sample(_move_to_cuda, sample)\n\n\ndef move_to_cpu(sample):\n def _move_to_cpu(tensor):\n # PyTorch has poor support for half tensors (float16) on CPU.\n # Move any such tensors to float32.\n if tensor.dtype in {torch.bfloat16, torch.float16}:\n tensor = tensor.to(dtype=torch.float32)\n return tensor.cpu()\n\n return apply_to_sample(_move_to_cpu, sample)\n\n\ndef get_incremental_state(\n module: MultiheadAttention,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],\n key: str,\n) -> Optional[Dict[str, Optional[Tensor]]]:\n \"\"\"Helper for getting incremental state for an nn.Module.\"\"\"\n return module.get_incremental_state(incremental_state, key)\n\n\ndef set_incremental_state(\n module: MultiheadAttention,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],\n key: str,\n value: Dict[str, Optional[Tensor]],\n) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:\n \"\"\"Helper for setting incremental state for an nn.Module.\"\"\"\n if incremental_state is not None:\n result = module.set_incremental_state(incremental_state, key, value)\n if result is not None:\n incremental_state = result\n return incremental_state\n\n\ndef load_align_dict(replace_unk):\n if replace_unk is None:\n align_dict = None\n elif isinstance(replace_unk, str) and len(replace_unk) > 0:\n # Load alignment dictionary for unknown word replacement if it was passed as an argument.\n align_dict = {}\n with open(replace_unk, \"r\") as f:\n for line in f:\n cols = line.split()\n align_dict[cols[0]] = cols[1]\n else:\n # No alignment dictionary provided but we still want to perform unknown word replacement by copying the\n # original source word.\n align_dict = {}\n return align_dict\n\n\ndef print_embed_overlap(embed_dict, vocab_dict):\n embed_keys = set(embed_dict.keys())\n vocab_keys = set(vocab_dict.symbols)\n overlap = len(embed_keys & vocab_keys)\n logger.info(\"found {}/{} types in embedding file\".format(overlap, len(vocab_dict)))\n\n\ndef parse_embedding(embed_path):\n \"\"\"Parse embedding text file into a dictionary of word and embedding tensors.\n\n The first line can have vocabulary size and dimension. The following lines\n should contain word and embedding separated by spaces.\n\n Example:\n 2 5\n the -0.0230 -0.0264 0.0287 0.0171 0.1403\n at -0.0395 -0.1286 0.0275 0.0254 -0.0932\n \"\"\"\n embed_dict = {}\n with open(embed_path) as f_embed:\n next(f_embed) # skip header\n for line in f_embed:\n pieces = line.rstrip().split(\" \")\n embed_dict[pieces[0]] = torch.Tensor(\n [float(weight) for weight in pieces[1:]]\n )\n return embed_dict\n\n\ndef load_embedding(embed_dict, vocab, embedding):\n for idx in range(len(vocab)):\n token = vocab[idx]\n if token in embed_dict:\n embedding.weight.data[idx] = embed_dict[token]\n return embedding\n\n\ndef replace_unk(hypo_str, src_str, alignment, align_dict, unk):\n from fairseq import tokenizer\n\n # Tokens are strings here\n hypo_tokens = tokenizer.tokenize_line(hypo_str)\n # TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully\n src_tokens = tokenizer.tokenize_line(src_str) + [\"<eos>\"]\n for i, ht in enumerate(hypo_tokens):\n if ht == unk:\n src_token = src_tokens[alignment[i]]\n # Either take the corresponding value in the aligned dictionary or just copy the original value.\n hypo_tokens[i] = align_dict.get(src_token, src_token)\n return \" \".join(hypo_tokens)\n\n\ndef post_process_prediction(\n hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe=None, extra_symbols_to_ignore=None\n):\n hypo_str = tgt_dict.string(hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore)\n if align_dict is not None:\n hypo_str = replace_unk(\n hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string()\n )\n if align_dict is not None or remove_bpe is not None:\n # Convert back to tokens for evaluating with unk replacement or without BPE\n # Note that the dictionary can be modified inside the method.\n hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True)\n return hypo_tokens, hypo_str, alignment\n\n\ndef make_positions(tensor, padding_idx: int, onnx_trace: bool = False):\n \"\"\"Replace non-padding symbols with their position numbers.\n\n Position numbers begin at padding_idx+1. Padding symbols are ignored.\n \"\"\"\n # The series of casts and type-conversions here are carefully\n # balanced to both work with ONNX export and XLA. In particular XLA\n # prefers ints, cumsum defaults to output longs, and ONNX doesn't know\n # how to handle the dtype kwarg in cumsum.\n mask = tensor.ne(padding_idx).int()\n return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx\n\n\ndef strip_pad(tensor, pad):\n return tensor[tensor.ne(pad)]\n\n\ndef buffered_arange(max):\n if not hasattr(buffered_arange, \"buf\"):\n buffered_arange.buf = torch.LongTensor()\n if max > buffered_arange.buf.numel():\n buffered_arange.buf.resize_(max)\n torch.arange(max, out=buffered_arange.buf)\n return buffered_arange.buf[:max]\n\n\ndef convert_padding_direction(\n src_tokens, padding_idx, right_to_left: bool = False, left_to_right: bool = False\n):\n assert right_to_left ^ left_to_right\n pad_mask = src_tokens.eq(padding_idx)\n if not pad_mask.any():\n # no padding, return early\n return src_tokens\n if left_to_right and not pad_mask[:, 0].any():\n # already right padded\n return src_tokens\n if right_to_left and not pad_mask[:, -1].any():\n # already left padded\n return src_tokens\n max_len = src_tokens.size(1)\n buffered = torch.empty(0).long()\n if max_len > 0:\n torch.arange(max_len, out=buffered)\n range = buffered.type_as(src_tokens).expand_as(src_tokens)\n num_pads = pad_mask.long().sum(dim=1, keepdim=True)\n if right_to_left:\n index = torch.remainder(range - num_pads, max_len)\n else:\n index = torch.remainder(range + num_pads, max_len)\n return src_tokens.gather(1, index)\n\n\ndef item(tensor):\n if hasattr(tensor, \"item\"):\n return tensor.item()\n if hasattr(tensor, \"__getitem__\"):\n return tensor[0]\n return tensor\n\n\ndef multi_tensor_total_norm(grads, chunk_size=2048*32) -> torch.Tensor:\n per_device_grads = {}\n norms = []\n for grad in grads:\n device = grad.device\n cur_device_grads = per_device_grads.get(device)\n if cur_device_grads is None:\n cur_device_grads = []\n per_device_grads[device] = cur_device_grads\n cur_device_grads.append(grad)\n for device in per_device_grads.keys():\n cur_device_grads = per_device_grads[device]\n if device.type == \"cuda\":\n # TODO(msb) return has_inf\n has_inf = torch.zeros((1, 1), dtype=torch.int, device=device)\n with torch.cuda.device(device):\n norm = multi_tensor_l2norm(chunk_size, has_inf, [cur_device_grads], False)\n norms.append(norm[0])\n else:\n norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_device_grads]\n total_norm = torch.norm(torch.stack(norms))\n return total_norm\n\n\ndef clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor:\n if isinstance(params, torch.Tensor):\n params = [params]\n params = list(params)\n grads = [p.grad.detach() for p in filter(lambda p: p.grad is not None, params)]\n if len(grads) == 0:\n if len(params) > 0:\n return params[0].new_tensor(0.)\n else:\n return torch.tensor(0.)\n\n if len(grads) == 1:\n total_norm = torch.norm(grads[0], p=2, dtype=torch.float32)\n else:\n if multi_tensor_l2norm_available:\n total_norm = multi_tensor_total_norm(grads)\n else:\n warnings.warn(\n \"amp_C fused kernels unavailable, disabling multi_tensor_l2norm; \"\n \"you may get better performance by installing NVIDIA's apex library\"\n )\n total_norm = torch.norm(\n torch.stack([torch.norm(g, p=2, dtype=torch.float32) for g in grads])\n )\n\n if aggregate_norm_fn is not None:\n total_norm = aggregate_norm_fn(total_norm)\n\n if max_norm > 0:\n max_norm = float(max_norm)\n clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)\n for g in grads:\n g.mul_(clip_coef)\n return total_norm\n\n\ndef fill_with_neg_inf(t):\n \"\"\"FP16-compatible function that fills a tensor with -inf.\"\"\"\n return t.float().fill_(float(\"-inf\")).type_as(t)\n\n\ndef _match_types(arg1, arg2):\n \"\"\"Convert the numerical argument to the same type as the other argument\"\"\"\n\n def upgrade(arg_number, arg_structure):\n if isinstance(arg_structure, tuple):\n return tuple([arg_number] * len(arg_structure))\n elif isinstance(arg_structure, dict):\n arg = copy.deepcopy(arg_structure)\n for k in arg:\n arg[k] = upgrade(arg_number, arg_structure[k])\n return arg\n else:\n return arg_number\n\n if isinstance(arg1, float) or isinstance(arg1, int):\n return upgrade(arg1, arg2), arg2\n elif isinstance(arg2, float) or isinstance(arg2, int):\n return arg1, upgrade(arg2, arg1)\n\n return arg1, arg2\n\n\ndef resolve_max_positions(*args):\n \"\"\"Resolve max position constraints from multiple sources.\"\"\"\n\n def map_value_update(d1, d2):\n updated_value = copy.deepcopy(d1)\n for key in d2:\n if key not in updated_value:\n updated_value[key] = d2[key]\n else:\n updated_value[key] = min(d1[key], d2[key])\n return updated_value\n\n def nullsafe_min(l):\n minim = None\n for item in l:\n if minim is None:\n minim = item\n elif item is not None and item < minim:\n minim = item\n return minim\n\n max_positions = None\n for arg in args:\n if max_positions is None:\n max_positions = arg\n elif arg is not None:\n max_positions, arg = _match_types(max_positions, arg)\n if isinstance(arg, float) or isinstance(arg, int):\n max_positions = min(max_positions, arg)\n elif isinstance(arg, dict):\n max_positions = map_value_update(max_positions, arg)\n else:\n max_positions = tuple(map(nullsafe_min, zip(max_positions, arg)))\n\n return max_positions\n\n\ndef import_user_module(args):\n module_path = getattr(args, \"user_dir\", None)\n if module_path is not None:\n module_path = os.path.abspath(args.user_dir)\n if not os.path.exists(module_path):\n fairseq_rel_path = os.path.join(\n os.path.dirname(__file__), \"..\", args.user_dir\n )\n if os.path.exists(fairseq_rel_path):\n module_path = fairseq_rel_path\n module_parent, module_name = os.path.split(module_path)\n\n if module_name not in sys.modules:\n sys.path.insert(0, module_parent)\n importlib.import_module(module_name)\n\n\ndef softmax(x, dim: int, onnx_trace: bool = False):\n if onnx_trace:\n return F.softmax(x.float(), dim=dim)\n else:\n return F.softmax(x, dim=dim, dtype=torch.float32)\n\n\ndef log_softmax(x, dim: int, onnx_trace: bool = False):\n if onnx_trace:\n return F.log_softmax(x.float(), dim=dim)\n else:\n return F.log_softmax(x, dim=dim, dtype=torch.float32)\n\n\ndef get_perplexity(loss, round=2, base=2):\n if loss is None:\n return 0.\n try:\n return safe_round(base ** loss, round)\n except OverflowError:\n return float('inf')\n\n\ndef deprecation_warning(message, stacklevel=3):\n # don't use DeprecationWarning, since it's ignored by default\n warnings.warn(message, stacklevel=stacklevel)\n\n\ndef get_activation_fn(activation: str) -> Callable:\n \"\"\" Returns the activation function corresponding to `activation` \"\"\"\n if activation == \"relu\":\n return F.relu\n elif activation == \"gelu\":\n return gelu\n elif activation == \"gelu_fast\":\n deprecation_warning(\n \"--activation-fn=gelu_fast has been renamed to gelu_accurate\"\n )\n return gelu_accurate\n elif activation == \"gelu_accurate\":\n return gelu_accurate\n elif activation == 'sin':\n return sin\n elif activation == 'swish':\n return swish\n elif activation == \"tanh\":\n return torch.tanh\n elif activation == \"linear\":\n return lambda x: x\n else:\n raise RuntimeError(\"--activation-fn {} not supported\".format(activation))\n\n\ndef get_available_activation_fns() -> List:\n return [\n \"relu\",\n \"gelu\",\n \"gelu_fast\", # deprecated\n \"gelu_accurate\",\n \"sin\",\n \"swish\",\n \"tanh\",\n \"linear\",\n ]\n\n\[email protected]\ndef eval(model):\n is_training = model.training\n model.eval()\n yield\n model.train(is_training)\n\n\ndef has_parameters(module):\n try:\n next(module.parameters())\n return True\n except StopIteration:\n return False\n\n\ndef set_torch_seed(seed):\n # Set seed based on args.seed and the update number so that we get\n # reproducible results when resuming from checkpoints\n assert isinstance(seed, int)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n\[email protected]\ndef with_torch_seed(seed):\n assert isinstance(seed, int)\n rng_state = torch.get_rng_state()\n cuda_rng_state = torch.cuda.get_rng_state()\n set_torch_seed(seed)\n yield\n torch.set_rng_state(rng_state)\n torch.cuda.set_rng_state(cuda_rng_state)\n\n\ndef parse_alignment(line):\n \"\"\"\n Parses a single line from the alingment file.\n\n Args:\n line (str): String containing the alignment of the format:\n <src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> ..\n <src_idx_m>-<tgt_idx_m>. All indices are 0 indexed.\n\n Returns:\n torch.IntTensor: packed alignments of shape (2 * m).\n \"\"\"\n alignments = line.strip().split()\n parsed_alignment = torch.IntTensor(2 * len(alignments))\n for idx, alignment in enumerate(alignments):\n src_idx, tgt_idx = alignment.split(\"-\")\n parsed_alignment[2 * idx] = int(src_idx)\n parsed_alignment[2 * idx + 1] = int(tgt_idx)\n return parsed_alignment\n\n\ndef get_token_to_word_mapping(tokens, exclude_list):\n n = len(tokens)\n word_start = [int(token not in exclude_list) for token in tokens]\n word_idx = list(accumulate(word_start))\n token_to_word = {i: word_idx[i] for i in range(n)}\n return token_to_word\n\n\ndef extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos):\n tgt_valid = ((tgt_sent != pad) & (tgt_sent != eos)).nonzero().squeeze(dim=-1)\n src_invalid = ((src_sent == pad) | (src_sent == eos)).nonzero().squeeze(dim=-1)\n src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad])\n tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad])\n alignment = []\n if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent):\n attn_valid = attn[tgt_valid]\n attn_valid[:, src_invalid] = float(\"-inf\")\n _, src_indices = attn_valid.max(dim=1)\n for tgt_idx, src_idx in zip(tgt_valid, src_indices):\n alignment.append(\n (\n src_token_to_word[src_idx.item()] - 1,\n tgt_token_to_word[tgt_idx.item()] - 1,\n )\n )\n return alignment\n\n\ndef new_arange(x, *size):\n \"\"\"\n Return a Tensor of `size` filled with a range function on the device of x.\n If size is empty, using the size of the variable x.\n \"\"\"\n if len(size) == 0:\n size = x.size()\n return torch.arange(size[-1], device=x.device).expand(*size).contiguous()\n\n\ndef get_tpu_device(args):\n import torch_xla.core.xla_model as xm\n return xm.xla_device()\n\n\ndef logging_multiple_line_messages(msg):\n msg_arr = msg.split(\"\\n\")\n for line in msg_arr:\n logger.info(line)\n\n\nclass CudaEnvironment(object):\n def __init__(self):\n cur_device = torch.cuda.current_device()\n prop = torch.cuda.get_device_properties(\"cuda:{}\".format(cur_device))\n self.name = prop.name\n self.major = prop.major\n self.minor = prop.minor\n self.total_memory_in_GB = prop.total_memory / 1024 / 1024 / 1024\n\n @staticmethod\n def pretty_print_cuda_env_list(cuda_env_list):\n \"\"\"\n Given a list of CudaEnviorments, pretty print them\n \"\"\"\n num_workers = len(cuda_env_list)\n center = \"CUDA enviroments for all {} workers\".format(num_workers)\n banner_len = 40 - len(center) // 2\n first_line = \"*\" * banner_len + center + \"*\" * banner_len\n msg_arr = [first_line]\n for r, env in enumerate(cuda_env_list):\n msg_arr.append(\n \"rank {:3d}: \".format(r)\n + \"capabilities = {:2d}.{:<2d} ; \".format(env.major, env.minor)\n + \"total memory = {:.3f} GB ; \".format(env.total_memory_in_GB)\n + \"name = {:40s}\".format(env.name)\n )\n msg_arr.append(first_line)\n logging_multiple_line_messages(\"\\n\".join(msg_arr))\n" ]
[ [ "torch.set_rng_state", "torch.nn.functional.softmax", "torch.zeros", "torch.remainder", "torch.norm", "torch.tensor", "torch.arange", "torch.cuda.set_rng_state", "torch.LongTensor", "torch.empty", "torch.cuda.current_device", "torch.is_tensor", "torch.get_rng_state", "torch.stack", "torch.cuda.manual_seed", "torch.nn.functional.log_softmax", "torch.manual_seed", "torch.cuda.device", "torch.cuda.get_rng_state", "torch.cumsum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MaryZolfaghar/WCSLS
[ "fcb3bfd11c19bb90690ec772f91bbd107832d636" ]
[ "utils/analyze.py" ]
[ "from numpy.core.fromnumeric import reshape\nimport torch \nimport numpy as np\nimport pickle\nfrom itertools import combinations, permutations\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import MDS, TSNE\nfrom scipy.stats import pearsonr, ttest_ind\nimport statsmodels.api as sm\nfrom dataset import get_loaders, WineGrid\n\ndef analyze_episodic(model, test_data, args):\n # Collect attention weights for each sample in test set\n model.eval()\n m, x_ = test_data[0] # only 1 episode in test data\n m = m.to(args.device) # m: [1, n_train, sample_dim]\n x = x_[:,:,:-1].to(args.device) # x: [1, n_test, sample_dim]\n y = x_[:,:,-1].type(torch.long).to(args.device)\n y = y.squeeze() # y: [1, n_test]\n with torch.no_grad():\n y_hat, attention = model(x, m) \n attention = attention[0] # first (only) memory layer\n attention = np.squeeze(attention)\n # attention: [n_train, n_test]\n \n # Check the retrieval weights of relevant vs. irrelevant training samples\n grid = test_data.grid\n train = grid.train # train *samples* in test *episode*\n test = grid.test # test *samples* in test *episode*\n n_train = len(train)\n n_test = len(test)\n rel_ids = grid.hub_sample_ids # relevant memory ids (train samples)\n attn_ranks = np.zeros_like(attention)\n for i in range(n_test):\n argsorted_attn = np.argsort(attention[i])\n ranks = np.zeros([n_train])\n ranks[argsorted_attn] = np.arange(n_train)\n attn_ranks[i] = ranks\n relevant = []\n irrelevant = []\n for i in range(n_test):\n for j in range(n_train):\n if j in rel_ids[i]:\n relevant.append(attn_ranks[i,j])\n else:\n irrelevant.append(attn_ranks[i,j])\n rank_data = {\"relevant\": relevant, \"irrelevant\": irrelevant}\n\n # Check how often a legitimate \"path\" was retrieved in the top 5%\n k = 8 # top k memories with highest weights (k = 8 means 5 percent)\n used_hub = []\n for i in range(n_test):\n highest_attn = np.argsort(attention[i])[-k:]\n test_f1, test_f2, test_ctx, test_y = test[i]\n\n # Get relevant hubs for current test sample\n hubs = []\n for rel_id in rel_ids[i]:\n train_sample = train[rel_id]\n train_f1, train_f2 = train_sample[0], train_sample[1]\n if train_f1 in [test_f1, test_f2]: \n hubs.append(train_f2)\n if train_f2 in [test_f1, test_f2]:\n hubs.append(train_f1)\n hubs = list(set(hubs))\n hubs_dict = {h:[] for h in hubs}\n assert len(hubs) == 2, \"shouldn't be more than 2 hubs?\"\n\n # Check if one of the hubs appears with f1 and f2\n attended_train = [train[idx] for idx in highest_attn]\n for sample in attended_train:\n train_f1, train_f2, train_ctx, train_y = sample\n if train_ctx != test_ctx:\n continue # must be samples testing the same axis to be relevant\n if hubs[0] == train_f1:\n hubs_dict[hubs[0]].append(sample[1])\n if hubs[1] == sample[0]:\n hubs_dict[hubs[1]].append(sample[1])\n if hubs[0] == sample[1]:\n hubs_dict[hubs[0]].append(sample[0])\n if hubs[1] == sample[1]:\n hubs_dict[hubs[1]].append(sample[0])\n if test_f1 in hubs_dict[hubs[0]] and test_f2 in hubs_dict[hubs[0]]:\n used_hub.append(True)\n elif test_f1 in hubs_dict[hubs[1]] and test_f2 in hubs_dict[hubs[1]]:\n used_hub.append(True)\n else:\n used_hub.append(False)\n p_used_hub = np.mean(used_hub)\n print(\"Proportion that episodic system retrieved a hub path:\", p_used_hub)\n\n results = {\"rank_data\":rank_data, \"p_used_hub\": p_used_hub}\n return results\n\ndef analyze_cortical(model, test_data, analyze_loader, args):\n # Useful dictionaries from test dataset\n n_states = test_data.n_states \n loc2idx = test_data.loc2idx \n idx2loc = {idx:loc for loc, idx in loc2idx.items()}\n idxs = [idx for idx in range(n_states)]\n # locs = [idx2loc[idx] for idx in idxs]\n idx2tensor = test_data.idx2tensor \n\n model.eval()\n # Get embeddings from model for each face\n face_embedding = model.face_embedding\n face_embedding.to(args.device)\n embeddings = []\n # Get hiddens from the recurrent model for each face\n \n # if the model was stepwisemlp\n if args.cortical_model=='stepwisemlp':\n hiddens = [[] for i in range(2)]\n hiddens_cong = [[] for i in range(2)]\n hiddens_incong = [[] for i in range(2)] \n hiddens_ctxs = [[[] for j in range(args.N_contexts)] for i in range(2)]\n else:\n hiddens = [] # hidden reps. for both contexts\n hiddens_incong = []\n hiddens_cong = []\n hiddens_ctxs = [[] for i in range(args.N_contexts)]\n \n idxs1 = []\n idxs2 = []\n idxs1_ctxs = [[] for i in range(args.N_contexts)]\n idxs2_ctxs = [[] for i in range(args.N_contexts)]\n samples = []\n samples_ctxs = [[] for i in range(args.N_contexts)]\n samples_cong = []\n samples_incong = []\n\n with torch.no_grad():\n for idx in range(n_states):\n face_tensor = idx2tensor[idx].unsqueeze(0).to(args.device) \n embedding = face_embedding(face_tensor) # [1, state_dim]\n embedding = embedding.cpu().numpy()\n embeddings.append(embedding)\n embeddings = np.concatenate(embeddings, axis=0) # [n_states, state_dim]\n for batch in analyze_loader:\n if args.cortical_task == 'face_task':\n f1, f2, ctx, out, idx1, idx2 = batch\n elif args.cortical_task == 'wine_task':\n f1, f2, ctx, out1, out2, idx1, idx2 = batch\n idx1 = idx1[0]\n idx2 = idx2[0]\n samples.append(batch)\n (x1, y1), (x2, y2) = idx2loc[idx1], idx2loc[idx2]\n f1 = f1.to(args.device) \n f2 = f2.to(args.device) \n ctx = ctx.to(args.device)\n\n # create congruent and incongruent groups\n grid_angle = np.arctan2((y2-y1),(x2-x1))\n phi = np.sin(2*grid_angle)\n if np.abs(phi)<1e-5:\n # for congrunet trials, \n # zero out those very close to zero angles\n # so it won't turn into 1 or -1 by sign\n cong = 0 \n else:\n cong = np.sign(phi) # 1: congruent, -1:incongruent, 0:none\n\n # get the hidden reps. \n y_hat, out = model(f1, f2, ctx) \n # y_hat: [1, 2]\n # rnn_out: [seq_length, 1, hidden_dim]: [3, 1, 128]\n # mlp_out: [1, hidden_dim]: [1, 128]\n if args.order_ctx == 'first':\n f1_ind = 1\n f2_ind = 2\n elif args.order_ctx == 'last':\n f1_ind = 0\n f2_ind = 1\n if args.cortical_model=='stepwisemlp':\n out1, out2 = out\n out1 = out1.cpu().numpy()\n out2 = out2.cpu().numpy()\n hiddens[0].append(out1)\n hiddens[1].append(out2)\n hiddens_ctxs[0][ctx].append(out1)\n hiddens_ctxs[1][ctx].append(out2)\n else:\n out = out.cpu().numpy()\n hiddens.append(out)\n hiddens_ctxs[ctx].append(out)\n \n ctx = ctx[0].cpu().numpy()\n idxs1.append(idx1)\n idxs2.append(idx2)\n idxs1_ctxs[ctx].append(idx1)\n idxs2_ctxs[ctx].append(idx2)\n samples_ctxs[ctx].append(batch)\n if ((cong==1) and ((ctx==0) or (ctx==1))):\n if args.cortical_model=='stepwisemlp':\n hiddens_cong[0].append(out1)\n hiddens_cong[1].append(out2)\n else:\n hiddens_cong.append(out)\n samples_cong.append(batch)\n elif ((cong==-1) and ((ctx==0) or (ctx==1))):\n if args.cortical_model=='stepwisemlp':\n hiddens_incong[0].append(out1)\n hiddens_incong[1].append(out2)\n else:\n hiddens_incong.append(out)\n samples_incong.append(batch)\n\n hiddens = np.asarray(hiddens).squeeze() \n # for n_ctx=2, data_len = 16*12*2=384 (n_states:16, n_states-ties:12, permutation:2)\n # rnn hiddens: [data_len, seq_length, hidden_dim] : [384, 3, 128]\n # mlp hiddens: [data_len, hidden_dim]: [384, 128]\n # stepwisemlp hiddens: [num_hidds, data_len, hidden_dim]: [2, 384, 128]\n # with diagonals - wine task = data_len = (n_ctx-n_diag)*192+n_diag*212 \n # [n_ctx:2, data_len:384], [n_ctx:4, data_len:768], [n_ctx:8, data_len: 1616]\n hiddens_incong = np.asarray(hiddens_incong).squeeze() \n hiddens_cong = np.asarray(hiddens_cong).squeeze() \n # rnn hiddens_cong/incong: [144, 3, 128]\n # mlp hiddens_cong/incong: [144, 128]\n # stepwise mlp hiddens_cong/incong: [2, 144, 128]\n \n # hiddens_ctx: even tho it is 384, but it is ordered based on the contexts\n if args.cortical_model=='stepwisemlp':\n hiddens_ctx = np.concatenate(np.asarray(hiddens_ctxs).squeeze(), axis=1)\n # hiddens_ctxs: [n_hidds=2, n_ctx, 192, 1, 128]\n # hiddens_ctx: [n_hidds=2, 384, 128]\n hiddens_inc_c = np.concatenate((hiddens_incong, hiddens_cong), axis=1) \n # hiddens_inc_c: [n_hidds, 384-ties, 128]: [2, 288, 128]\n else:\n hiddens_ctx = np.concatenate(hiddens_ctxs, axis = 0).squeeze()\n # mlp hiddens_ctxs: [n_ctx, 192, 1, 128]\n # rnn hiddens_ctxs: [n_ctx, n_trials=192, 3, 1, 128]\n # rnn hiddens_ctx: [384, 3, 128]\n # mlp hiddens_ctx: [384, 128]\n hiddens_inc_c = np.concatenate((hiddens_incong, hiddens_cong), axis=0) \n # rnn hiddens_inc_c: [384-ties, seq_length, 128]: [288, 3, 128]\n # mlp hiddens_inc_c: [384-ties, 128]: [288, 128]\n\n if ((args.cortical_model=='rnn') or (args.cortical_model=='rnncell')):\n hiddens_ctx = hiddens_ctx[:, -1, :] # [384, 128]\n hiddens_inc_c = hiddens_inc_c[:, -1, :] #[288, 128]\n samples_inc_c = np.concatenate((samples_incong, samples_cong), axis=0)\n \n if args.cortical_model=='stepwisemlp':\n avg_hidden = np.zeros([2, n_states, hiddens.shape[-1]])\n avg_hidden_ctxs = np.zeros([2, args.N_contexts, n_states, hiddens.shape[-1]])\n else:\n avg_hidden = np.zeros([n_states, hiddens.shape[-1]])\n avg_hidden_ctxs = np.zeros([args.N_contexts, n_states, hiddens.shape[-1]])\n \n if ((args.cortical_model=='rnn') or (args.cortical_model=='rnncell')):\n hiddens_ctxs = np.asarray(hiddens_ctxs).squeeze() # [n_ctx, n_tirals=192, seq_len=3, hidd_dim=128]\n # Take average for each face based on its location\n for f in range(n_states):\n temp1 = [np.expand_dims(hiddens[i,f1_ind,:], axis=0) \n for i, idx1 in enumerate(idxs1) if idx1==f]\n temp2 = [np.expand_dims(hiddens[i,f2_ind,:], axis=0)\n for i, idx2 in enumerate(idxs2) if idx2==f]\n if len(temp1 + temp2)>1:\n avg_hidden[f] = np.concatenate(temp1 + temp2, axis=0).mean(axis=0) \n for ctx in range(args.N_contexts):\n temp1_ctxs = [hiddens_ctxs[ctx,i,f1_ind,:] \n for i, idx1 in enumerate(idxs1_ctxs[ctx]) if idx1==f]\n temp2_ctxs = [hiddens_ctxs[ctx,i,f2_ind,:] \n for i, idx2 in enumerate(idxs2_ctxs[ctx]) if idx2==f]\n if len(temp1_ctxs + temp2_ctxs)>1:\n m = np.zeros([2,hiddens_ctxs.shape[-1]])\n m[0] = np.mean(np.asarray(temp1_ctxs), axis=0)\n m[1] = np.mean(np.asarray(temp2_ctxs), axis=0)\n avg_hidden_ctxs[ctx, f, :] = np.mean(m, axis=0)\n # avg_hidden_ctxs[ctx, f, :] = np.concatenate(temp1_ctxs + temp2_ctxs, axis=0).mean(axis=0)\n # avg_hidden_ctxs: [n_ctx, n_states, hidden_dim]: [2, 16, 128] \n avg_hidden_ctx = np.concatenate(avg_hidden_ctxs, axis=0)\n elif args.cortical_model in ['mlp', 'mlp_cc']:\n for f in range(n_states):\n temp = [hiddens[i,:] \n for i, (idx1, idx2) in enumerate(zip(idxs1, idxs2))\n if ((idx1==f) | (idx2==f))]\n if len(temp)>1:\n avg_hidden[f] = np.mean(temp, axis=0)\n for ctx in range(args.N_contexts): \n temp_ctxs = [hiddens_ctxs[ctx][i]\n for i, (idx1, idx2) in enumerate(zip(idxs1_ctxs[ctx], idxs2_ctxs[ctx]))\n if ((idx1==f) | (idx2==f))]\n if len(temp_ctxs)>1:\n avg_hidden_ctxs[ctx, f, :] = np.mean(temp_ctxs, axis=0)\n # avg_hidden_ctxs: [n_contexts, n_states, hidden_dim]: [2, 16, 128] \n avg_hidden_ctx = np.concatenate(avg_hidden_ctxs, axis=0)\n elif args.cortical_model=='stepwisemlp':\n # todo: how to do the averaging? over both hidden reps?\n # hiddens_ctxs anf hiddens_inc_c for the pca results should have two dimensions, \n hiddens_ctxs = np.asarray(hiddens_ctxs).squeeze()\n for f in range(n_states):\n temp1 = [hiddens[0,i,:] \n for i, idx1 in enumerate(idxs1) if idx1==f]\n temp2 = [hiddens[1,i,:] \n for i, idx2 in enumerate(idxs2) if idx2==f]\n if len(temp1)>1:\n avg_hidden[0,f,:] = np.mean(temp1, axis=0)\n if len(temp2)>1:\n avg_hidden[1,f,:] = np.mean(temp2, axis=0)\n # avg_hidden: [n_hidd, n_states, hidd_dim]: [2,16,128]\n for ctx in range(args.N_contexts):\n temp1_ctxs = [hiddens_ctxs[0,ctx,i,:] \n for i, idx1 in enumerate(idxs1_ctxs[ctx]) if idx1==f]\n temp2_ctxs = [hiddens_ctxs[1,ctx,i,:] \n for i, idx2 in enumerate(idxs2_ctxs[ctx]) if idx2==f] \n if len(temp1_ctxs)>1:\n avg_hidden_ctxs[0,ctx,f,:] = np.mean(temp1_ctxs, axis=0)\n if len(temp2_ctxs)>1:\n avg_hidden_ctxs[1,ctx,f,:] = np.mean(temp2_ctxs, axis=0)\n # avg_hidden_ctxs: [n_hidd, n_contexts, n_states, hidden_dim]: [2, 2, 16, 128] \n avg_hidden_ctx = np.concatenate(avg_hidden_ctxs, axis=1)\n samples_res = {'samples': samples, \n 'samples_ctxs': samples_ctxs,\n 'samples_inc_c': samples_inc_c}\n\n results = {'samples_res':samples_res,\n 'idxs1': idxs1, 'idxs2': idxs2,\n 'embeddings': embeddings, # [16, 32]\n 'hiddens_ctx':hiddens_ctx, # mlp/rnn: [384,128] or in stepwisedmlp: [2,384,128]\n 'hiddens_ctxs':hiddens_ctxs, # mlp: [n_ctx, 192, 1, 128], rnn: [n_ctx, 192, 3, 128]\n 'avg_hidden':avg_hidden, # [16, 128] or [n_hidd=2, 16, 128]\n 'avg_hidden_ctx':avg_hidden_ctx, # mlp/rnn: [32, 128] or stepwisedmlp: [n_hidd=2, 32, 128]\n # the reaosn to have these is because the concat for each model is diff and want to deal with it here\n 'avg_hidden_ctxs':avg_hidden_ctxs, # [mlp/rnn: n_ctx, 16, 128] or stepwisedmlp: [n_hidd=2, n_ctx, 16, 128]\n 'hiddens_inc_c': hiddens_inc_c} # mlp/rnn: [288, 128] or stepwisedmlp: [n_hidd=2, 288, 128]\n return results\n\ndef analyze_accs(args, test_data, cortical_result, dist_results):\n resutls = {'train_acc': cortical_result['train_acc'],\n 'test_acc': cortical_result['test_acc'],\n 'cong_train_acc': cortical_result['cong_train_acc'],\n 'incong_train_acc': cortical_result['incong_train_acc'],\n 'cong_test_acc': cortical_result['cong_test_acc'],\n 'incong_test_acc': cortical_result['incong_test_acc']}\n return resutls\n \n # cortical_analyze_acc = cortical_result['analyze_acc']\n # cortical_analyze_correct = cortical_result['analyze_correct']\n\ndef analyze_credit_assignment(args, test_data, cortical_result, dist_results):\n resutls = {'grad_ctx': cortical_result['grad_ctx'],\n 'grad_f1': cortical_result['grad_f1'],\n 'grad_f2': cortical_result['grad_f2'],\n 'grad_ctx_cong': cortical_result['grad_ctx_cong'],\n 'grad_f1_cong': cortical_result['grad_f1_cong'],\n 'grad_f2_cong': cortical_result['grad_f2_cong'],\n 'grad_ctx_incong': cortical_result['grad_ctx_incong'],\n 'grad_f1_incong': cortical_result['grad_f1_incong'],\n 'grad_f2_incong': cortical_result['grad_f2_incong']\n }\n return resutls\n\ndef proportions(args, test_data, cortical_result, dist_results):\n hiddens_ctxs = cortical_result['hiddens_ctxs'] # list of len [n_ctx]\n hiddens_ctxs = [np.concatenate(h, axis=0) for h in hiddens_ctxs] # list of len [n_ctx] each has either [192,128] or [224,128]\n # when n_ctx=8, we have diff number of ties, therefore, \n # in the first 4 contexts we have [192, 128], and in \n # the second 4 contexts (diagonals) we have [224, 128]\n # that is why we go over each of the hiddens in hiddens_ctxs\n # and then concat them to create [n_trials, hidden_dim] for each\n ps = []\n p_pies = []\n for h in hiddens_ctxs: # h: [n_trials, hidden_dim]\n p_pies.append(np.any(h>0, axis=0)) # list of len [n_ctx], each shape [128,]\n ps.append(np.mean(h>0, axis=0)) # [n_ctx, 128]\n ps = np.asarray(ps) \n # ps: [n_ctx, 128]\n # avg num of the trials that were active for each unit, and for each context\n s = np.sum(ps, axis=0, keepdims=True) \n # s: [1, hidden_dim], overall activity of each hidden unit, \n # if that unit was active at all, over all trials (regardless of the context)\n n = ps / s \n # n: [n_ctx, hidden_dim] \n # normalized - how much each unit is active for each ctx over trials \n # normalized by the overall activity of that unit for all ctx and trials\n # f = n > threshold\n # there are some NaNs\n prop_results = {'hiddens_ctxs': hiddens_ctxs,\n 'p_pies': p_pies, # which trials are active for each hidden unit, \n 'ps': ps, # on average, how many trials were active for each hidden unit\n 'n': n}\n return prop_results\n\ndef calc_dist_ctx(args, test_data, cortical_result, dist_results):\n N_contexts = 2 #ToDo: for now it works only for x and y, because of the angles\n # Useful dictionaries from test dataset\n n_states = test_data.n_states \n loc2idx = test_data.loc2idx \n idx2loc = {idx:loc for loc, idx in loc2idx.items()}\n idxs = [idx for idx in range(n_states)]\n N_contexts = args.N_contexts\n N_responses = args.N_responses\n avg_hidden_ctxs = cortical_result['avg_hidden_ctxs'] # [2, 16, 128]\n # Correlation\n grid_dists = []\n hidd_dists_ctxs = [[] for i in range(N_contexts)]\n grid_1ds_ctxs = [[] for i in range(N_contexts)]\n grid_angles = []\n samples = []\n\n for idx1, idx2 in combinations(idxs, 2):\n (x1, y1), (x2, y2) = idx2loc[idx1], idx2loc[idx2]\n samples.append((idx1, idx2))\n grid_dist = np.sqrt((x1-x2)**2 + (y1-y2)**2)\n grid_dists.append(grid_dist)\n for ctx in range(N_contexts):\n # Euclidean distance between hidden reps. in context ctx\n if args.cortical_model=='stepwisemlp':\n hidd_dist = np.zeros([2])\n hidd1, hidd2 = avg_hidden_ctxs[0,ctx,idx1,:], avg_hidden_ctxs[0,ctx,idx2,:]\n hidd_dist[0] = np.linalg.norm(hidd1 - hidd2)\n hidd1, hidd2 = avg_hidden_ctxs[1,ctx,idx1,:], avg_hidden_ctxs[1,ctx,idx2,:]\n hidd_dist[1] = np.linalg.norm(hidd1 - hidd2)\n else:\n hidd1, hidd2 = avg_hidden_ctxs[ctx][idx1], avg_hidden_ctxs[ctx][idx2]\n hidd_dist = np.linalg.norm(hidd1 - hidd2)\n hidd_dists_ctxs[ctx].append(hidd_dist)\n # 1D rank - Manhattan distance\n loc1 = [x1, y1]\n loc2 = [x2, y2]\n winegrid = WineGrid(N_responses, N_contexts)\n r1, r2 = winegrid.ctx_to_r(ctx, loc1, loc2) \n grid_1ds_ctxs[ctx].append(np.abs(r1-r2))\n # create on and off diagonal groups\n \n grid_angle = np.arctan2((y2-y1),(x2-x1))\n grid_angles.append(grid_angle)\n \n grid_dists = np.array(grid_dists) # [(n_states*(nstates-1))/2]: [120]\n grid_angles = np.array(grid_angles) # [120]\n samples = np.array(samples)\n hidd_dists_ctxs = np.array(hidd_dists_ctxs) # [n_ctx, sampels, n_hidds]: in mlp: [2,120], in stepwisemlp: [2,120,2]\n\n phi = np.sin(2*grid_angles)\n binary_phi = np.sign(phi)\n for i, p in enumerate(phi):\n if np.abs(p)<1e-5:\n binary_phi[i] = 0\n\n angle_results = {'grid_angles': grid_angles,\n 'phi': phi,\n 'binary_phi': binary_phi}\n dist_results = {'samples': samples,\n 'hidd_dists_ctxs': hidd_dists_ctxs,\n 'grid_1ds_ctxs': grid_1ds_ctxs,\n 'grid_dists': grid_dists,\n 'angle_results': angle_results}\n return dist_results\n\ndef calc_dist(args, test_data, cortical_result, dist_results=None):\n # Useful dictionaries from test dataset\n n_states = test_data.n_states \n loc2idx = test_data.loc2idx \n idx2loc = {idx:loc for loc, idx in loc2idx.items()}\n idxs = [idx for idx in range(n_states)]\n\n # Correlation\n grid_dists = []\n cong_grid_dists = []\n incong_grid_dists = []\n embed_dists = []\n hidd_dists = []\n cong_hidd_dists = []\n incong_hidd_dists = []\n cong_embed_dists = []\n incong_embed_dists = []\n grid_angles = []\n cong_grid_angles = []\n incong_grid_angles = []\n samples = []\n\n embeddings = cortical_result['embeddings']\n avg_hidden = cortical_result['avg_hidden'] # [16, 128]\n\n for idx1, idx2 in combinations(idxs, 2):\n (x1, y1), (x2, y2) = idx2loc[idx1], idx2loc[idx2]\n samples.append((idx1, idx2))\n grid_dist = np.sqrt((x1-x2)**2 + (y1-y2)**2)\n grid_dists.append(grid_dist)\n # Euclidean distance between embeddings\n emb1, emb2 = embeddings[idx1], embeddings[idx2]\n embed_dist = np.linalg.norm(emb1 - emb2)\n embed_dists.append(embed_dist)\n # Euclidean distance between hidden reps.\n if args.cortical_model=='stepwisemlp':\n hidd_dist = np.zeros([2])\n hidd1, hidd2 = avg_hidden[0,idx1], avg_hidden[0,idx2]\n hidd_dist[0] = np.linalg.norm(hidd1 - hidd2)\n hidd1, hidd2 = avg_hidden[1,idx1], avg_hidden[1,idx2]\n hidd_dist[1] = np.linalg.norm(hidd1 - hidd2)\n else:\n hidd1, hidd2 = avg_hidden[idx1], avg_hidden[idx2]\n hidd_dist = np.linalg.norm(hidd1 - hidd2)\n hidd_dists.append(hidd_dist)\n # create on and off diagonal groups\n grid_angle = np.arctan2((y2-y1),(x2-x1))\n grid_angles.append(grid_angle)\n phi = np.sin(2*grid_angle)\n if np.abs(phi)<1e-5:\n # for congrunet trials, \n # zero out those very close to zero angles\n # so it won't turn into 1 or -1 by sign\n cong = 0\n else:\n cong = np.sign(phi) # 1: congruent, -1:incongruent, 0:none\n if cong==1:\n cong_hidd_dists.append(hidd_dist)\n cong_grid_dists.append(grid_dist)\n cong_embed_dists.append(embed_dist)\n cong_grid_angles.append(grid_angle)\n if cong==-1:\n incong_hidd_dists.append(hidd_dist)\n incong_grid_dists.append(grid_dist)\n incong_embed_dists.append(embed_dist)\n incong_grid_angles.append(grid_angle) \n grid_dists = np.array(grid_dists) # [(n_states*(nstates-1))/2]: [120]\n embed_dists = np.array(embed_dists)\n hidd_dists = np.array(hidd_dists)\n cong_grid_dists = np.array(cong_grid_dists) # [36]\n incong_grid_dists = np.array(incong_grid_dists) # [36]\n cong_hidd_dists = np.array(cong_hidd_dists)\n incong_hidd_dists = np.array(incong_hidd_dists)\n cong_embed_dists = np.array(cong_embed_dists)\n incong_embed_dists = np.array(incong_embed_dists)\n grid_angles = np.array(grid_angles) # [120]\n cong_grid_angles = np.array(cong_grid_angles) # [36]\n incong_grid_angles = np.array(incong_grid_angles) # [36]\n samples = np.array(samples)\n\n phi = np.sin(2*grid_angles)\n binary_phi = np.sign(phi)\n for i, p in enumerate(phi):\n if np.abs(p)<1e-5:\n binary_phi[i] = 0\n\n cong_dist_results = {'cong_grid_dists': cong_grid_dists,\n 'cong_hidd_dists': cong_hidd_dists,\n 'cong_embed_dists': cong_embed_dists}\n incong_dist_results = {'incong_grid_dists': incong_grid_dists,\n 'incong_hidd_dists': incong_hidd_dists,\n 'incong_embed_dists': incong_embed_dists}\n angle_results = {'grid_angles': grid_angles,\n 'cong_grid_angles': cong_grid_angles, \n 'incong_grid_angles': incong_grid_angles,\n 'phi': phi,\n 'binary_phi': binary_phi}\n dist_results = {'samples': samples, \n 'grid_dists': grid_dists,\n 'embed_dists': embed_dists,\n 'hidd_dists':hidd_dists,\n 'cong_dist_results': cong_dist_results,\n 'incong_dist_results': incong_dist_results,\n 'angle_results': angle_results}\n return dist_results\n\ndef analyze_dim_red(args, test_data, cortical_result, dist_results, n_components=2):\n method = args.dimred_method\n n_states = test_data.n_states \n loc2idx = test_data.loc2idx \n idx2loc = {idx:loc for loc, idx in loc2idx.items()}\n idxs = [idx for idx in range(n_states)]\n locs = [idx2loc[idx] for idx in idxs]\n embeddings = cortical_result['embeddings'] # [16, 32]\n hiddens_ctx = cortical_result['hiddens_ctx'] # [384, 128] or in stepwisemlp: [2,384,128]\n avg_hidden = cortical_result['avg_hidden'] # [16, 128] or in stepwisemlp: [2,16,128]\n avg_hidden_ctx = cortical_result['avg_hidden_ctx'] # [32, 128] or in stepwisemlp: [2,32,128]\n hiddens_inc_c = cortical_result['hiddens_inc_c'] # [288, 128] or in stepwisemlp: [2,288,128]\n # hiddens_ctx = np.asarray(hiddens_ctxs)\n # hiddens_ctxs = np.concatenate(hiddens_ctxs, axis=0).squeeze() # [384, 128] or [384, 3, 128]\n # if ((args.cortical_model == 'rnn') or (args.cortical_model == 'rnncell')):\n # hiddens_ctx = hiddens_ctx[:,-1, :]\n # avg_hidden_ctxs = np.concatenate(avg_hidden_ctxs, axis=0) # [32, 128]\n \n results = {}\n # PCA\n if method == 'pca':\n pca = PCA(n_components=n_components)\n pca_2d_embed = pca.fit_transform(embeddings)\n if args.cortical_model=='stepwisemlp':\n pca_2d_hidd = np.zeros([hiddens_ctx.shape[0], hiddens_ctx.shape[1], n_components])\n pca_2d_avg_hidd = np.zeros([avg_hidden.shape[0], avg_hidden.shape[1], n_components])\n pca_2d_ctx_hidd = np.zeros([avg_hidden_ctx.shape[0], avg_hidden_ctx.shape[1], n_components])\n pca_2d_incong_cong = np.zeros([hiddens_inc_c.shape[0], hiddens_inc_c.shape[1], n_components])\n for h in range(hiddens_ctx.shape[0]):\n pca_2d_hidd[h,:,:] = pca.fit_transform(hiddens_ctx[h,:,:]) # this is all the hiddens, no averaging for each face\n pca_2d_avg_hidd[h,:,:] = pca.fit_transform(avg_hidden[h,:,:]) \n pca_2d_ctx_hidd[h,:,:] = pca.fit_transform(avg_hidden_ctx[h,:,:])\n pca_2d_incong_cong[h,:,:] = pca.fit_transform(hiddens_inc_c[h,:,:])\n \n else:\n pca_2d_hidd = pca.fit_transform(hiddens_ctx) # this is all the hiddens, no averaging for each face\n pca_2d_avg_hidd = pca.fit_transform(avg_hidden) # I might need to save this at all\n pca_2d_ctx_hidd = pca.fit_transform(avg_hidden_ctx)\n pca_2d_incong_cong = pca.fit_transform(hiddens_inc_c)\n results = {'embed_2d': pca_2d_embed, \n 'hidd_2d': pca_2d_hidd,\n 'avg_hidd_2d': pca_2d_avg_hidd,\n 'ctx_hidd_2d': pca_2d_ctx_hidd,\n 'incong_cong_2d': pca_2d_incong_cong,\n 'grid_locations': locs,\n 'samples_res': cortical_result['samples_res']}\n elif method == 'mds':\n # MDS\n mds = MDS(n_components=n_components)\n mds_2d_embed = mds.fit_transform(embeddings)\n mds_2d_hidd = mds.fit_transform(hiddens_ctx) # this is all the hiddens, no averaging for each face\n mds_2d_avg_hidd = mds.fit_transform(avg_hidden) # I might need to save this at all\n mds_2d_ctx_hidd = mds.fit_transform(avg_hidden_ctx)\n mds_2d_incong_cong = mds.fit_transform(hiddens_inc_c)\n results = {'embed_2d': mds_2d_embed, \n 'hidd_2d': mds_2d_hidd,\n 'avg_hidd_2d': mds_2d_avg_hidd,\n 'ctx_hidd_2d': mds_2d_ctx_hidd,\n 'incong_cong_2d': mds_2d_incong_cong}\n elif method == 'tsne':\n # tSNE\n tsne = TSNE(n_components=n_components)\n tsne_2d_embed = tsne.fit_transform(embeddings)\n tsne_2d_hidd = tsne.fit_transform(hiddens_ctx) # this is all the hiddens, no averaging for each face\n tsne_2d_avg_hidd = tsne.fit_transform(avg_hidden) # I might need to save this at all\n tsne_2d_ctx_hidd = tsne.fit_transform(avg_hidden_ctx)\n tsne_2d_incong_cong = tsne.fit_transform(hiddens_inc_c)\n results = {'embed_2d': tsne_2d_embed, \n 'hidd_2d': tsne_2d_hidd,\n 'avg_hidd_2d': tsne_2d_avg_hidd,\n 'ctx_hidd_2d': tsne_2d_ctx_hidd,\n 'incong_cong_2d': tsne_2d_incong_cong}\n return results\n\ndef hist_data(args, test_data, cortical_result, dist_results):\n # embeddings\n cong_embed_dists = dist_results['cong_dist_results']['cong_embed_dists']\n incong_embed_dists = dist_results['incong_dist_results']['incong_embed_dists']\n \n # hiddens\n cong_hidd_dists = dist_results['cong_dist_results']['cong_hidd_dists']\n incong_hidd_dists = dist_results['incong_dist_results']['incong_hidd_dists']\n \n dist_c_inc_results = {'cong_embed_dist': cong_embed_dists, \n 'incong_embed_dist': incong_embed_dists,\n 'cong_hidd_dist': cong_hidd_dists,\n 'incong_hidd_dist': incong_hidd_dists}\n \n return dist_c_inc_results\n\ndef calc_ratio(args, test_data, cortical_result, dist_results):\n # embeddings\n cong_embed_dists = dist_results['cong_dist_results']['cong_embed_dists']\n incong_embed_dists = dist_results['incong_dist_results']['incong_embed_dists']\n avg_cong_embed = np.mean(cong_embed_dists)\n avg_incong_embed = np.mean(incong_embed_dists)\n ratio_embed = (avg_cong_embed/avg_incong_embed)\n \n # hiddens\n cong_hidd_dists = dist_results['cong_dist_results']['cong_hidd_dists']\n incong_hidd_dists = dist_results['incong_dist_results']['incong_hidd_dists']\n avg_cong_hidd = np.mean(cong_hidd_dists, axis=0)\n avg_incong_hidd = np.mean(incong_hidd_dists, axis=0)\n # ratio_hidd = (avg_cong_hidd/avg_incong_hidd)\n ratio_hidd = (avg_incong_hidd/avg_cong_hidd)\n \n ratio_results = {'ratio_embed': ratio_embed, 'ratio_hidd': ratio_hidd,\\\n 'avg_cong_hidd': avg_cong_hidd, 'avg_incong_hidd': avg_incong_hidd}\n \n return ratio_results\n\ndef extract_hidd_dist(dist_results):\n # hiddens\n cong_hidd_dists = dist_results['cong_dist_results']['cong_hidd_dists']\n incong_hidd_dists = dist_results['incong_dist_results']['incong_hidd_dists']\n dist_result_hidd = {'cong_hidd_dists': cong_hidd_dists, 'incong_hidd_dists': incong_hidd_dists}\n \n return dist_result_hidd\n\ndef analyze_ttest(args, test_data, cortical_result, dist_results): \n cong_res = dist_results['cong_dist_results']\n incong_res = dist_results['incong_dist_results']\n \n incong_hidd_dists = incong_res['incong_hidd_dists']\n cong_hidd_dists = cong_res['cong_hidd_dists']\n if args.cortical_model == 'stepwisemlp':\n t_hidd, t_p_val_hidd = np.zeros([2]), np.zeros([2])\n for h in range(2):\n t_hidd[h], t_p_val_hidd[h] = ttest_ind(cong_hidd_dists[:,h], incong_hidd_dists[:,h])\n else:\n t_hidd, t_p_val_hidd = ttest_ind(cong_res['cong_hidd_dists'], \n incong_res['incong_hidd_dists'])\n t_embed, t_p_val_embed = ttest_ind(cong_res['cong_embed_dists'], \n incong_res['incong_embed_dists'])\n t_grid, t_p_val_grid = ttest_ind(cong_res['cong_grid_dists'], \n incong_res['incong_grid_dists'])\n ttest_results = {'t_stat_hidd':t_hidd, 't_p_val_hidd': t_p_val_hidd,\n 't_stat_embed':t_embed, 't_p_val_embed': t_p_val_embed,\n 't_grid':t_grid, 't_p_val_grid': t_p_val_grid}\n return ttest_results\n\ndef analyze_corr(args, test_data, cortical_result, dist_results):\n grid_dists = dist_results['grid_dists']\n embed_dists = dist_results['embed_dists'] \n hidd_dists = dist_results['hidd_dists'] \n cong_res = dist_results['cong_dist_results']\n incong_res = dist_results['incong_dist_results']\n r_embed, p_val_embed = pearsonr(grid_dists, embed_dists)\n if args.cortical_model == 'stepwisemlp':\n r_hidd, p_val_hidd = np.zeros([2]), np.zeros([2])\n r_cong_hidd, p_val_cong_hidd, r_incong_hidd, p_val_incong_hidd = \\\n np.zeros([2]), np.zeros([2]), np.zeros([2]), np.zeros([2])\n cong_hidd_dists, incong_hidd_dists = cong_res['cong_hidd_dists'], \\\n incong_res['incong_hidd_dists']\n for h in range(2):\n r_hidd[h], p_val_hidd[h] = pearsonr(grid_dists, hidd_dists[:,h])\n r_cong_hidd[h], p_val_cong_hidd[h] = pearsonr(cong_res['cong_grid_dists'], \n cong_hidd_dists[:,h]) \n r_incong_hidd[h], p_val_incong_hidd[h] = pearsonr(incong_res['incong_grid_dists'],\n incong_hidd_dists[:,h]) \n else:\n r_hidd, p_val_hidd = pearsonr(grid_dists, hidd_dists)\n r_cong_hidd, p_val_cong_hidd = pearsonr(cong_res['cong_grid_dists'], \n cong_res['cong_hidd_dists'])\n r_incong_hidd, p_val_incong_hidd = pearsonr(incong_res['incong_grid_dists'],\n incong_res['incong_hidd_dists'])\n r_cong_embed, p_val_cong_embed = pearsonr(cong_res['cong_grid_dists'], \n cong_res['cong_embed_dists'])\n r_incong_embed, p_val_incong_embed = pearsonr(incong_res['incong_grid_dists'], \n incong_res['incong_embed_dists']) \n corr_results = {'r_embed': r_embed, 'p_val_embed': p_val_embed,\n 'r_cong_embed': r_cong_embed, \n 'p_val_cong_embed': p_val_cong_embed,\n 'r_incong_embed': r_incong_embed, \n 'p_val_incong_embed': p_val_incong_embed,\n 'r_hidd': r_hidd, 'p_val_hidd': p_val_hidd,\n 'r_cong_hidd': r_cong_hidd, \n 'p_val_cong_hidd': p_val_cong_hidd,\n 'r_incong_hidd': r_incong_hidd, \n 'p_val_incong_hidd': p_val_incong_hidd}\n return corr_results\n\ndef analyze_regression(args, test_data, cortical_result, dist_results):\n hidd_dists = dist_results['hidd_dists']\n grid_dists = dist_results['grid_dists']\n phi = dist_results['angle_results']['phi']\n binary_phi = dist_results['angle_results']['binary_phi']\n # prepare data for the regression analysis\n x_cat = np.concatenate((grid_dists.reshape((-1,1)), binary_phi.reshape((-1,1))),axis=1)\n x_con = np.concatenate((grid_dists.reshape((-1,1)), phi.reshape((-1,1))),axis=1)\n\n # categorical regression analysis\n x_cat = sm.add_constant(x_cat)\n if args.cortical_model == 'stepwisemlp':\n p_val, t_val, param, bse = ([[] for i in range(2)] for i in range(4))\n y_hat_E = np.zeros(hidd_dists.shape)\n y = np.zeros(hidd_dists.shape)\n for h in range(2):\n y[:,h] = hidd_dists[:,h]\n y_hat_E[:,h], p_val[h], t_val[h], param[h], bse[h] = run_regression(x_cat,y[:,h],grid_dists)\n else:\n y = hidd_dists\n y_hat_E, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists) \n cat_reg = {'p_val': p_val,\n 't_val': t_val,\n 'param': param,\n 'y_hat_E': y_hat_E,\n 'y': y,\n 'bse': bse}\n\n # continuous regression analysis\n x_con = sm.add_constant(x_con)\n if args.cortical_model == 'stepwisemlp':\n p_val, t_val, param, bse = ([[] for i in range(2)] for i in range(4))\n y_hat_E = np.zeros(hidd_dists.shape)\n y = np.zeros(hidd_dists.shape)\n for h in range(2):\n y[:,h] = hidd_dists[:,h]\n y_hat_E[:,h], p_val[h], t_val[h], param[h], bse[h] = run_regression(x_con,y[:,h],grid_dists)\n else:\n y = hidd_dists\n y_hat_E, p_val, t_val, param, bse = run_regression(x_con,y,grid_dists) \n con_reg = {'p_val': p_val,\n 't_val': t_val,\n 'param': param,\n 'y_hat_E': y_hat_E,\n 'y': y,\n 'bse': bse}\n\n reg_results = {'cat_reg': cat_reg, \n 'con_reg': con_reg}\n return reg_results\n\ndef run_regression(x,y,grid_dist):\n stats_model = sm.OLS(y,x).fit() \n y_hat_E = stats_model.params[0] + (stats_model.params[1]*grid_dist) \n p_val, t_val, param, bse = stats_model.pvalues, stats_model.tvalues, \\\n stats_model.params, stats_model.bse\n return y_hat_E, p_val, t_val, param, bse\n\ndef analyze_regression_1D(args, test_data, cortical_result, dist_results):\n # make sure dist_results is dist_ctx_results\n hidd_dists_ctxs = dist_results['hidd_dists_ctxs']\n hidd_dists_ctx0 = hidd_dists_ctxs[0]\n hidd_dists_ctx1 = hidd_dists_ctxs[1]\n grid_1ds_ctxs = dist_results['grid_1ds_ctxs']\n grid_1ds_ctx0 = grid_1ds_ctxs[0]\n grid_1ds_ctx1 = grid_1ds_ctxs[1]\n grid_dists = dist_results['grid_dists']\n \n phi = dist_results['angle_results']['phi']\n binary_phi = dist_results['angle_results']['binary_phi']\n \n hidd_dists_ctx = np.concatenate((hidd_dists_ctx0, hidd_dists_ctx1), axis=0)\n grid_1ds_ctx = np.concatenate((grid_1ds_ctx0, grid_1ds_ctx1), axis=0)\n grid_dists_ctx = np.concatenate((grid_dists, grid_dists), axis=0)\n binary_phi_ctx = np.concatenate((binary_phi, binary_phi), axis=0)\n phi_ctx = np.concatenate((phi, phi), axis=0)\n # prepare data for the regression analysis\n x_cat = np.concatenate((grid_dists_ctx.reshape((-1,1)), grid_1ds_ctx.reshape((-1,1)),\n binary_phi_ctx.reshape((-1,1))),axis=1) # [240, 3]\n x_con = np.concatenate((grid_dists_ctx.reshape((-1,1)), grid_1ds_ctx.reshape((-1,1)),\n phi_ctx.reshape((-1,1))),axis=1)\n \n # categorical regression analysis\n x_cat = sm.add_constant(x_cat)\n if args.cortical_model == 'stepwisemlp':\n p_val, t_val, param, y_hat_E, y, bse = ([[] for i in range(2)] for i in range(6))\n y_hat_E = np.zeros(hidd_dists_ctx.shape)\n y = np.zeros(hidd_dists_ctx.shape)\n for h in range(2):\n y[:,h] = hidd_dists_ctx[:,h]\n y_hat_E[:,h], p_val[h], t_val[h], param[h], bse[h] = run_regression(x_cat,y[:,h],grid_dists_ctx)\n else:\n y = hidd_dists_ctx\n y_hat_E, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists_ctx)\n cat_reg = {'p_val': p_val,\n 't_val': t_val,\n 'param': param,\n 'y_hat_E': y_hat_E,\n 'y': y,\n 'bse': bse}\n # continuous regression analysis\n x_con = sm.add_constant(x_con)\n if args.cortical_model == 'stepwisemlp':\n p_val, t_val, param, bse = ([[] for i in range(2)] for i in range(4))\n y_hat_E = np.zeros(hidd_dists_ctx.shape)\n y = np.zeros(hidd_dists_ctx.shape)\n for h in range(2):\n y[:,h] = hidd_dists_ctx[:,h]\n y_hat_E[:,h], p_val[h], t_val[h], param[h], bse[h] = run_regression(x_con,y[:,h],grid_dists_ctx)\n else:\n y = hidd_dists_ctx\n y_hat_E, p_val, t_val, param, bse = run_regression(x_con,y,grid_dists_ctx)\n con_reg = {'p_val': p_val,\n 't_val': t_val,\n 'param': param,\n 'y_hat_E': y_hat_E,\n 'y': y,\n 'bse': bse}\n\n reg_results = {'cat_reg': cat_reg, \n 'con_reg': con_reg}\n return reg_results\n\ndef analyze_regression_exc(args, test_data, cortical_result, dist_results):\n # Useful dictionaries from test dataset\n n_states = test_data.n_states \n hidd_dists = dist_results['hidd_dists'] #[n_combinations]: [120]\n grid_dists = dist_results['grid_dists']\n binary_phi = dist_results['angle_results']['binary_phi'] # [120]\n samples = dist_results['samples'] # [120, 2]\n states=[]\n if args.cortical_model=='stepwisemlp':\n p_vals, t_vals, params, bses = ([[] for i in range(2)] for i in range(4))\n else:\n p_vals, t_vals, params, bses = ([] for i in range(4))\n\n for state in range(n_states):\n s_idxs = [i for i, sample in enumerate(samples) if state not in sample] # [105]\n # prepare data for the regression analysis\n x_cat = np.concatenate((grid_dists[s_idxs].reshape((-1,1)), binary_phi[s_idxs].reshape((-1,1))),axis=1)\n # regression analysis\n x_cat = sm.add_constant(x_cat)\n if args.cortical_model == 'stepwisemlp':\n for h in range(2):\n y = hidd_dists[s_idxs,h]\n _ , p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)\n p_vals[h].append(p_val)\n t_vals[h].append(t_val)\n params[h].append(param)\n bses[h].append(bse)\n else:\n y = hidd_dists[s_idxs]\n _, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)\n p_vals.append(p_val)\n t_vals.append(t_val)\n params.append(param)\n bses.append(bse)\n states.append(state)\n \n # regression analysis - after removing (0,0) and (3,3)\n s_idxs = [i for i, sample in enumerate(samples) if ((0 not in sample) & (15 not in sample))] # [91]\n x_cat = np.concatenate((grid_dists[s_idxs].reshape((-1,1)), binary_phi[s_idxs].reshape((-1,1))),axis=1)\n x_cat = sm.add_constant(x_cat)\n if args.cortical_model == 'stepwisemlp':\n for h in range(2):\n y = hidd_dists[s_idxs,h]\n _, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)\n p_vals[h].append(p_val)\n t_vals[h].append(t_val)\n params[h].append(param)\n bses[h].append(bse)\n else:\n y = hidd_dists[s_idxs]\n _, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)\n p_vals.append(p_val)\n t_vals.append(t_val)\n params.append(param)\n bses.append(bse)\n states.append(16)\n \n # regression analysis - after removing (0,0) and (3,3), (3,0) and (0.3)\n s_idxs = [i for i, sample in enumerate(samples) if ((0 not in sample) & (15 not in sample) &\n (3 not in sample) & (12 not in sample))] #[66]\n x_cat = np.concatenate((grid_dists[s_idxs].reshape((-1,1)), binary_phi[s_idxs].reshape((-1,1))),axis=1)\n x_cat = sm.add_constant(x_cat)\n if args.cortical_model == 'stepwisemlp':\n for h in range(2):\n y = hidd_dists[s_idxs,h] \n _, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)\n p_vals[h].append(p_val)\n t_vals[h].append(t_val)\n params[h].append(param)\n bses[h].append(bse)\n else:\n y = hidd_dists[s_idxs]\n _, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)\n p_vals.append(p_val)\n t_vals.append(t_val)\n params.append(param)\n bses.append(bse)\n states.append(17)\n\n states = np.array(states)\n p_vals = np.array(p_vals)\n t_vals = np.array(t_vals)\n params = np.array(params)\n bses = np.array(bses)\n \n exc_reg_results = {'excluded_states': states,\n 'p_vals': p_vals,\n 't_vals': t_vals,\n 'params': params,\n 'bses': bses} \n\n return exc_reg_results\n\ndef analyze_test_seq(args, test_data, cortical_result, dist_results):\n import sys\n sys.path.append(\"..\")\n data = get_loaders(batch_size=32, meta=False,\n use_images=True, image_dir='./images/',\n n_episodes=None,\n N_responses=args.N_responses, N_contexts=args.N_contexts,\n cortical_task = args.cortical_task, #ToDo:check why it was set to cortical_task='face_task',\n balanced = args.balanced)\n train_data, train_loader, test_data, test_loader, analyze_data, analyze_loader = data\n\n idx2loc = {idx:loc for loc, idx in test_data.loc2idx.items()}\n\n # ctx_order = 'first'\n # ctx_order_str = 'ctxF'\n \n analyze_correct = cortical_result['analyze_correct'] # [n_trials, time_steps]: [384, 3]\n analyze_correct = np.asarray(analyze_correct).squeeze()\n\n hidd_t_idx = 1 # at what time step, t = 1 means at the time of face1 \n # and t = 2 means at the time of face2\n # in axis First (axis is at t=0), it should be t = 1\n # create groups based on the row or columns\n # e.g, for context0 (xaxis), first column is group 1, sec col is group 2, and so on.\n # 4 groups for each axis/context; total 8 groups\n\n # ToDo: why it is always loc1???\n\n ctx0_g0=[]\n ctx0_g1=[]\n ctx0_g2=[]\n ctx0_g3=[]\n\n ctx1_g0=[]\n ctx1_g1=[]\n ctx1_g2=[]\n ctx1_g3=[]\n\n for i, batch in enumerate(analyze_loader):\n if args.cortical_task == 'face_task':\n f1, f2, ctx, y, idx1, idx2 = batch # face1, face2, context, y, index1, index2\n elif args.cortical_task == 'wine_task':\n f1, f2, ctx, y1, y2, idx1, idx2 = batch # face1, face2, context, y1, y2, index1, index2 \n msg = 'analyze_test_seq is only implemented for one response, two contexts'\n assert args.N_responses == 'one' and args.N_contexts == 2, msg\n\n if args.N_responses == 'one':\n y = y1\n # f1, f2, ax, y, idx1, idx2 = batch\n acc = analyze_correct[i][hidd_t_idx]\n ctx = ctx.cpu().numpy().squeeze()\n idx1 = idx1[0]\n idx2 = idx2[0]\n loc1 = idx2loc[idx1]\n loc2 = idx2loc[idx2]\n if ctx==0:\n if loc1[ctx]==0: ctx0_g0.append(acc) # (len(all_perms)/2) / 4 = [48]\n elif loc1[ctx]==1: ctx0_g1.append(acc)\n elif loc1[ctx]==2: ctx0_g2.append(acc)\n elif loc1[ctx]==3: ctx0_g3.append(acc)\n elif ctx==1:\n if loc1[ctx]==0: ctx1_g0.append(acc)\n elif loc1[ctx]==1: ctx1_g1.append(acc)\n elif loc1[ctx]==2: ctx1_g2.append(acc)\n elif loc1[ctx]==3: ctx1_g3.append(acc)\n ctx0_accs = [np.mean(ctx0_g0), np.mean(ctx0_g1), \n np.mean(ctx0_g2), np.mean(ctx0_g3) ]\n ctx1_accs = [np.mean(ctx1_g0), np.mean(ctx1_g1), \n np.mean(ctx1_g2), np.mean(ctx1_g3) ]\n \n # print('Accuracy at t=%s (face%s) contex 0:' %(hidd_t_idx,hidd_t_idx), ctx0_accs)\n # print('Accuracy at t=%s (face%s) contex 1:' %(hidd_t_idx,hidd_t_idx), ctx1_accs)\n return ctx0_accs, ctx1_accs" ]
[ [ "numpy.expand_dims", "numpy.sqrt", "numpy.asarray", "numpy.squeeze", "numpy.concatenate", "numpy.arctan2", "sklearn.manifold.MDS", "sklearn.manifold.TSNE", "numpy.zeros_like", "numpy.mean", "torch.no_grad", "numpy.any", "numpy.arange", "numpy.sin", "numpy.zeros", "scipy.stats.pearsonr", "numpy.argsort", "numpy.array", "sklearn.decomposition.PCA", "numpy.sum", "numpy.abs", "numpy.linalg.norm", "numpy.sign", "scipy.stats.ttest_ind" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
jirivrany/kagle-statoil
[ "8c70691fc7ca7d8a6a33a3544f76b22f1b508f7a" ]
[ "cnn_2bands.py" ]
[ "\n# coding: utf-8\n\n\"\"\"\n\n\"\"\"\n\n\nimport pandas as pd \nimport numpy as np \nimport cv2 # Used to manipulated the images \nfrom scipy.signal import wiener\n\nnp.random.seed(1207) # The seed I used - pick your own or comment out for a random seed. A constant seed allows for better comparisons though\n\n# Import Keras \nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Activation\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import Adam\nfrom sklearn.model_selection import train_test_split\n\n\n# ## Load Training Data\n\n# In[2]:\n\n\ndf_train = pd.read_json('./input/train.json') # this is a dataframe\n\n\n# Need to reshape and feature scale the images:\n\n# In[3]:\n\n\ndef get_scaled_imgs(df):\n imgs = []\n \n for i, row in df.iterrows():\n band_1 = np.array(row['band_1'])\n band_2 = np.array(row['band_2'])\n\n #make 75x75 image\n band_1 = band_1.reshape(75, 75)\n band_2 = band_2.reshape(75, 75)\n #band_3 = band_1 + band_2 # plus since log(x*y) = log(x) + log(y)\n \n # Rescale\n a = (band_1 - band_1.mean()) / (band_1.max() - band_1.min())\n b = (band_2 - band_2.mean()) / (band_2.max() - band_2.min())\n #c = (band_3 - band_3.mean()) / (band_3.max() - band_3.min())\n\n imgs.append(np.dstack((a, b)))\n\n return np.array(imgs)\n\n\n\n\ndef get_more_images(imgs):\n \n more_images = []\n vert_flip_imgs = []\n hori_flip_imgs = []\n \n for i in range(0,imgs.shape[0]):\n a=imgs[i,:,:,0]\n b=imgs[i,:,:,1]\n #c=imgs[i,:,:,2]\n \n av=cv2.flip(a,1)\n ah=cv2.flip(a,0)\n bv=cv2.flip(b,1)\n bh=cv2.flip(b,0)\n #cv=cv2.flip(c,1)\n #ch=cv2.flip(c,0)\n \n #vert_flip_imgs.append(np.dstack((av, bv, cv)))\n #hori_flip_imgs.append(np.dstack((ah, bh, ch)))\n vert_flip_imgs.append(np.dstack((av, bv)))\n hori_flip_imgs.append(np.dstack((ah, bh)))\n \n v = np.array(vert_flip_imgs)\n h = np.array(hori_flip_imgs)\n \n more_images = np.concatenate((imgs,v,h))\n \n return more_images\n\n\ndef getModel():\n #Build keras model\n \n model=Sequential()\n \n # CNN 1\n model.add(Conv2D(64, kernel_size=(3, 3),activation='relu', input_shape=(75, 75, 2)))\n model.add(Conv2D(64, kernel_size=(3, 3), activation='relu' ))\n model.add(Conv2D(64, kernel_size=(3, 3), activation='relu' ))\n model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))\n \n # CNN 2\n model.add(Conv2D(128, kernel_size=(3, 3), activation='relu' ))\n model.add(Conv2D(128, kernel_size=(3, 3), activation='relu' ))\n model.add(Conv2D(128, kernel_size=(3, 3), activation='relu' ))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n #model.add(Dropout(0.2))\n\n # CNN 3\n model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n #model.add(Dropout(0.2))\n\n #CNN 4\n model.add(Conv2D(256, kernel_size=(3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n \n # You must flatten the data for the dense layers\n model.add(Flatten())\n\n #Dense 1\n model.add(Dense(1024, activation='relu'))\n model.add(Dropout(0.5))\n\n #Dense 2\n model.add(Dense(256, activation='relu'))\n model.add(Dropout(0.2))\n\n # Output \n model.add(Dense(1, activation=\"sigmoid\"))\n\n optimizer = Adam(lr=0.0001, decay=0.0)\n model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n \n return model\n\n\n\nXtrain = get_scaled_imgs(df_train)\nYtrain = np.array(df_train['is_iceberg'])\ndf_train.inc_angle = df_train.inc_angle.replace('na',0)\nidx_tr = np.where(df_train.inc_angle>0)\n\nYtrain = Ytrain[idx_tr[0]]\nXtrain = Xtrain[idx_tr[0],...]\n\n#Xtr_more = get_more_images(Xtrain) \n#Ytr_more = np.concatenate((Ytrain,Ytrain,Ytrain))\n\nX_train, X_valid, y_train, y_valid = train_test_split(Xtrain, Ytrain, test_size=0.1)\n\nX_train_more = get_more_images(X_train)\ny_train_more = np.concatenate([y_train, y_train, y_train])\nX_valid_more = get_more_images(X_valid)\ny_valid_more = np.concatenate([y_valid, y_valid, y_valid])\n\n\nmodel = getModel()\nmodel.summary()\n\nbatch_size = 32\nmodel_file = '.mdl_2l2_wts.hdf5'\n\nearly_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min')\nmcp_save = ModelCheckpoint(model_file, save_best_only=True, monitor='val_loss', mode='min')\nreduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1, epsilon=1e-6, mode='min')\n\n\n#model.fit(Xtr_more, Ytr_more, batch_size=batch_size, epochs=50, verbose=1, callbacks=[earlyStopping, mcp_save, reduce_lr_loss], validation_split=0.25)\n#model.fit(Xtr_more, Ytr_more, batch_size=batch_size, epochs=60, verbose=1, callbacks=[mcp_save, reduce_lr_loss], validation_split=0.2)\n\nmodel.fit(X_train_more, y_train_more, batch_size=32, epochs=60, verbose=1,\n callbacks=[mcp_save, reduce_lr_loss],\n validation_data=(X_valid, y_valid))\n\n\nmodel.load_weights(filepath = model_file)\n\nscore = model.evaluate(Xtrain, Ytrain, verbose=1)\nprint('Train score:', score[0])\nprint('Train accuracy:', score[1])\n\n\ndf_test = pd.read_json('./input/test.json')\ndf_test.inc_angle = df_test.inc_angle.replace('na',0)\nXtest = (get_scaled_imgs(df_test))\npred_test = model.predict(Xtest)\n\nsubmission = pd.DataFrame({'id': df_test[\"id\"], 'is_iceberg': pred_test.reshape((pred_test.shape[0]))})\nprint(submission.head(10))\n\nsubmission.to_csv('sub-2bands-nodrop-aug.csv', index=False)\n\n" ]
[ [ "numpy.random.seed", "sklearn.model_selection.train_test_split", "numpy.dstack", "numpy.concatenate", "pandas.read_json", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
pagun12/predictive-monitoring-benchmark
[ "78a3c2723406dd85aec3b5b01e1ae2edb657f8e2", "78a3c2723406dd85aec3b5b01e1ae2edb657f8e2" ]
[ "bucketers/StateBasedBucketer.py", "transformers/IndexBasedTransformer.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom time import time\nimport sys\n\nclass StateBasedBucketer(object):\n \n def __init__(self, encoder):\n self.encoder = encoder\n \n self.dt_states = None\n self.n_states = 0\n \n \n def fit(self, X, y=None):\n \n dt_encoded = self.encoder.fit_transform(X)\n \n self.dt_states = dt_encoded.drop_duplicates()\n self.dt_states = self.dt_states.assign(state = range(len(self.dt_states)))\n \n self.n_states = len(self.dt_states)\n \n return self\n \n \n def predict(self, X, y=None):\n \n dt_encoded = self.encoder.transform(X)\n \n dt_transformed = pd.merge(dt_encoded, self.dt_states, how='left')\n dt_transformed.fillna(-1, inplace=True)\n \n return dt_transformed[\"state\"].astype(int).as_matrix()\n \n \n def fit_predict(self, X, y=None):\n \n self.fit(X)\n return self.predict(X)", "from sklearn.base import TransformerMixin\nimport pandas as pd\nimport numpy as np\nfrom time import time\n\nclass IndexBasedTransformer(TransformerMixin):\n \n def __init__(self, case_id_col, cat_cols, num_cols, max_events=None, fillna=True, create_dummies=True):\n self.case_id_col = case_id_col\n self.cat_cols = cat_cols\n self.num_cols = num_cols\n self.max_events = max_events\n self.fillna = fillna\n self.create_dummies = create_dummies\n \n self.columns = None\n \n self.fit_time = 0\n self.transform_time = 0\n \n \n def fit(self, X, y=None):\n return self\n \n def transform(self, X, y=None):\n start = time()\n \n grouped = X.groupby(self.case_id_col, as_index=False)\n \n if self.max_events is None:\n self.max_events = grouped.size().max()\n \n \n dt_transformed = pd.DataFrame(grouped.apply(lambda x: x.name), columns=[self.case_id_col])\n for i in range(self.max_events):\n dt_index = grouped.nth(i)[[self.case_id_col] + self.cat_cols + self.num_cols]\n dt_index.columns = [self.case_id_col] + [\"%s_%s\"%(col, i) for col in self.cat_cols] + [\"%s_%s\"%(col, i) for col in self.num_cols]\n dt_transformed = pd.merge(dt_transformed, dt_index, on=self.case_id_col, how=\"left\")\n dt_transformed.index = dt_transformed[self.case_id_col]\n \n # one-hot-encode cat cols\n if self.create_dummies:\n all_cat_cols = [\"%s_%s\"%(col, i) for col in self.cat_cols for i in range(self.max_events)]\n dt_transformed = pd.get_dummies(dt_transformed, columns=all_cat_cols).drop(self.case_id_col, axis=1)\n \n # fill missing values with 0-s\n if self.fillna:\n dt_transformed = dt_transformed.fillna(0)\n\n # add missing columns if necessary\n if self.columns is None:\n self.columns = dt_transformed.columns\n else:\n missing_cols = [col for col in self.columns if col not in dt_transformed.columns]\n for col in missing_cols:\n dt_transformed[col] = 0\n dt_transformed = dt_transformed[self.columns]\n\n self.transform_time = time() - start\n return dt_transformed\n \n def get_feature_names(self):\n return self.columns" ]
[ [ "pandas.merge" ], [ "pandas.merge", "pandas.get_dummies" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
EkremBayar/bayar
[ "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "d73a6b7f68d7bab25d134d3f85c6b63a86c206c5", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39", "aad1a32044da671d0b4f11908416044753360b39" ]
[ "venv/Lib/site-packages/pandas/tests/plotting/test_hist_method.py", "venv/Lib/site-packages/statsmodels/tsa/filters/tests/test_filters.py", "venv/Lib/site-packages/plotnine/scales/scale_stroke.py", "venv/Lib/site-packages/scipy/sparse/linalg/isolve/tests/test_gcrotmk.py", "venv/Lib/site-packages/scipy/sparse/linalg/tests/test_interface.py", "venv/Lib/site-packages/statsmodels/discrete/tests/test_margins.py", "venv/Lib/site-packages/pandas/io/excel/_pyxlsb.py", "venv/Lib/site-packages/mpl_toolkits/tests/test_axes_grid.py", "venv/Lib/site-packages/statsmodels/base/transform.py", "venv/Lib/site-packages/statsmodels/sandbox/nonparametric/kdecovclass.py", "venv/Lib/site-packages/scipy/optimize/tests/test__basinhopping.py", "venv/Lib/site-packages/plotnine/tests/conftest.py", "venv/Lib/site-packages/statsmodels/sandbox/panel/panelmod.py", "venv/Lib/site-packages/statsmodels/base/tests/test_penalties.py", "venv/Lib/site-packages/statsmodels/graphics/mosaicplot.py", "venv/Lib/site-packages/statsmodels/stats/tests/test_influence.py", "venv/Lib/site-packages/statsmodels/genmod/tests/results/glmnet_r_results.py", "venv/Lib/site-packages/matplotlib/pyplot.py", "venv/Lib/site-packages/statsmodels/tsa/arima/model.py", "venv/Lib/site-packages/numpy/distutils/intelccompiler.py", "venv/Lib/site-packages/statsmodels/stats/tests/test_proportion.py", "venv/Lib/site-packages/matplotlib/legend.py", "venv/Lib/site-packages/statsmodels/stats/tests/test_mediation.py", "venv/Lib/site-packages/statsmodels/sandbox/nonparametric/tests/ex_gam_am_new.py", "venv/Lib/site-packages/statsmodels/sandbox/distributions/gof_new.py", "venv/Lib/site-packages/numpy/typing/tests/data/pass/arithmetic.py", "venv/Lib/site-packages/scipy/stats/tests/test_rank.py", "venv/Lib/site-packages/pandas/tests/groupby/aggregate/test_aggregate.py", "venv/Lib/site-packages/plotnine/geoms/annotation_stripes.py", "venv/Lib/site-packages/pandas/tests/extension/decimal/test_decimal.py", "venv/Lib/site-packages/statsmodels/stats/_diagnostic_other.py", "venv/Lib/site-packages/plotnine/stats/stat_smooth.py", "venv/Lib/site-packages/scipy/_lib/_util.py", "venv/Lib/site-packages/statsmodels/emplike/tests/test_aft.py", "venv/Lib/site-packages/scipy/signal/signaltools.py", "venv/Lib/site-packages/statsmodels/sandbox/mle.py", "venv/Lib/site-packages/statsmodels/genmod/generalized_linear_model.py", "venv/Lib/site-packages/statsmodels/discrete/tests/test_constrained.py", "venv/Lib/site-packages/numpy/polynomial/hermite_e.py", "venv/Lib/site-packages/statsmodels/tsa/statespace/cfa_simulation_smoother.py", "venv/Lib/site-packages/scipy/integrate/tests/test_integrate.py" ]
[ "\"\"\" Test cases for .hist method \"\"\"\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nfrom pandas import DataFrame, Index, Series, to_datetime\nimport pandas._testing as tm\nfrom pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n\npytestmark = pytest.mark.slow\n\n\[email protected]_if_no_mpl\nclass TestSeriesPlots(TestPlotBase):\n def setup_method(self, method):\n TestPlotBase.setup_method(self, method)\n import matplotlib as mpl\n\n mpl.rcdefaults()\n\n self.ts = tm.makeTimeSeries()\n self.ts.name = \"ts\"\n\n def test_hist_legacy(self):\n _check_plot_works(self.ts.hist)\n _check_plot_works(self.ts.hist, grid=False)\n _check_plot_works(self.ts.hist, figsize=(8, 10))\n # _check_plot_works adds an ax so catch warning. see GH #13188\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(self.ts.hist, by=self.ts.index.month)\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(self.ts.hist, by=self.ts.index.month, bins=5)\n\n fig, ax = self.plt.subplots(1, 1)\n _check_plot_works(self.ts.hist, ax=ax)\n _check_plot_works(self.ts.hist, ax=ax, figure=fig)\n _check_plot_works(self.ts.hist, figure=fig)\n tm.close()\n\n fig, (ax1, ax2) = self.plt.subplots(1, 2)\n _check_plot_works(self.ts.hist, figure=fig, ax=ax1)\n _check_plot_works(self.ts.hist, figure=fig, ax=ax2)\n\n with pytest.raises(ValueError):\n self.ts.hist(by=self.ts.index, figure=fig)\n\n def test_hist_bins_legacy(self):\n df = DataFrame(np.random.randn(10, 2))\n ax = df.hist(bins=2)[0][0]\n assert len(ax.patches) == 2\n\n def test_hist_layout(self):\n df = self.hist_df\n with pytest.raises(ValueError):\n df.height.hist(layout=(1, 1))\n\n with pytest.raises(ValueError):\n df.height.hist(layout=[1, 1])\n\n def test_hist_layout_with_by(self):\n df = self.hist_df\n\n # _check_plot_works adds an `ax` kwarg to the method call\n # so we get a warning about an axis being cleared, even\n # though we don't explicing pass one, see GH #13188\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))\n self._check_axes_shape(axes, axes_num=2, layout=(2, 1))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.height.hist, by=df.gender, layout=(3, -1))\n self._check_axes_shape(axes, axes_num=2, layout=(3, 1))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 1))\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.height.hist, by=df.category, layout=(2, -1))\n self._check_axes_shape(axes, axes_num=4, layout=(2, 2))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.height.hist, by=df.category, layout=(3, -1))\n self._check_axes_shape(axes, axes_num=4, layout=(3, 2))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.height.hist, by=df.category, layout=(-1, 4))\n self._check_axes_shape(axes, axes_num=4, layout=(1, 4))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.height.hist, by=df.classroom, layout=(2, 2))\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n\n axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))\n self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7))\n\n def test_hist_no_overlap(self):\n from matplotlib.pyplot import gcf, subplot\n\n x = Series(np.random.randn(2))\n y = Series(np.random.randn(2))\n subplot(121)\n x.hist()\n subplot(122)\n y.hist()\n fig = gcf()\n axes = fig.axes\n assert len(axes) == 2\n\n def test_hist_by_no_extra_plots(self):\n df = self.hist_df\n axes = df.height.hist(by=df.gender) # noqa\n assert len(self.plt.get_fignums()) == 1\n\n def test_plot_fails_when_ax_differs_from_figure(self):\n from pylab import figure\n\n fig1 = figure()\n fig2 = figure()\n ax1 = fig1.add_subplot(111)\n with pytest.raises(AssertionError):\n self.ts.hist(ax=ax1, figure=fig2)\n\n @pytest.mark.parametrize(\n \"histtype, expected\",\n [\n (\"bar\", True),\n (\"barstacked\", True),\n (\"step\", False),\n (\"stepfilled\", True),\n ],\n )\n def test_histtype_argument(self, histtype, expected):\n # GH23992 Verify functioning of histtype argument\n ser = Series(np.random.randint(1, 10))\n ax = ser.hist(histtype=histtype)\n self._check_patches_all_filled(ax, filled=expected)\n\n @pytest.mark.parametrize(\n \"by, expected_axes_num, expected_layout\", [(None, 1, (1, 1)), (\"b\", 2, (1, 2))]\n )\n def test_hist_with_legend(self, by, expected_axes_num, expected_layout):\n # GH 6279 - Series histogram can have a legend\n index = 15 * [\"1\"] + 15 * [\"2\"]\n s = Series(np.random.randn(30), index=index, name=\"a\")\n s.index.name = \"b\"\n\n # Use default_axes=True when plotting method generate subplots itself\n axes = _check_plot_works(s.hist, default_axes=True, legend=True, by=by)\n self._check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout)\n self._check_legend_labels(axes, \"a\")\n\n @pytest.mark.parametrize(\"by\", [None, \"b\"])\n def test_hist_with_legend_raises(self, by):\n # GH 6279 - Series histogram with legend and label raises\n index = 15 * [\"1\"] + 15 * [\"2\"]\n s = Series(np.random.randn(30), index=index, name=\"a\")\n s.index.name = \"b\"\n\n with pytest.raises(ValueError, match=\"Cannot use both legend and label\"):\n s.hist(legend=True, by=by, label=\"c\")\n\n\[email protected]_if_no_mpl\nclass TestDataFramePlots(TestPlotBase):\n def test_hist_df_legacy(self):\n from matplotlib.patches import Rectangle\n\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(self.hist_df.hist)\n\n # make sure layout is handled\n df = DataFrame(np.random.randn(100, 2))\n df[2] = to_datetime(\n np.random.randint(\n self.start_date_to_int64,\n self.end_date_to_int64,\n size=100,\n dtype=np.int64,\n )\n )\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.hist, grid=False)\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n assert not axes[1, 1].get_visible()\n\n _check_plot_works(df[[2]].hist)\n df = DataFrame(np.random.randn(100, 1))\n _check_plot_works(df.hist)\n\n # make sure layout is handled\n df = DataFrame(np.random.randn(100, 5))\n df[5] = to_datetime(\n np.random.randint(\n self.start_date_to_int64,\n self.end_date_to_int64,\n size=100,\n dtype=np.int64,\n )\n )\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.hist, layout=(4, 2))\n self._check_axes_shape(axes, axes_num=6, layout=(4, 2))\n\n # make sure sharex, sharey is handled\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.hist, sharex=True, sharey=True)\n\n # handle figsize arg\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.hist, figsize=(8, 10))\n\n # check bins argument\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.hist, bins=5)\n\n # make sure xlabelsize and xrot are handled\n ser = df[0]\n xf, yf = 20, 18\n xrot, yrot = 30, 40\n axes = ser.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)\n self._check_ticks_props(\n axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot\n )\n\n xf, yf = 20, 18\n xrot, yrot = 30, 40\n axes = df.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)\n self._check_ticks_props(\n axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot\n )\n\n tm.close()\n\n ax = ser.hist(cumulative=True, bins=4, density=True)\n # height of last bin (index 5) must be 1.0\n rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]\n tm.assert_almost_equal(rects[-1].get_height(), 1.0)\n\n tm.close()\n ax = ser.hist(log=True)\n # scale of y must be 'log'\n self._check_ax_scales(ax, yaxis=\"log\")\n\n tm.close()\n\n # propagate attr exception from matplotlib.Axes.hist\n with pytest.raises(AttributeError):\n ser.hist(foo=\"bar\")\n\n def test_hist_non_numerical_or_datetime_raises(self):\n # gh-10444, GH32590\n df = DataFrame(\n {\n \"a\": np.random.rand(10),\n \"b\": np.random.randint(0, 10, 10),\n \"c\": to_datetime(\n np.random.randint(\n 1582800000000000000, 1583500000000000000, 10, dtype=np.int64\n )\n ),\n \"d\": to_datetime(\n np.random.randint(\n 1582800000000000000, 1583500000000000000, 10, dtype=np.int64\n ),\n utc=True,\n ),\n }\n )\n df_o = df.astype(object)\n\n msg = \"hist method requires numerical or datetime columns, nothing to plot.\"\n with pytest.raises(ValueError, match=msg):\n df_o.hist()\n\n def test_hist_layout(self):\n df = DataFrame(np.random.randn(100, 2))\n df[2] = to_datetime(\n np.random.randint(\n self.start_date_to_int64,\n self.end_date_to_int64,\n size=100,\n dtype=np.int64,\n )\n )\n\n layout_to_expected_size = (\n {\"layout\": None, \"expected_size\": (2, 2)}, # default is 2x2\n {\"layout\": (2, 2), \"expected_size\": (2, 2)},\n {\"layout\": (4, 1), \"expected_size\": (4, 1)},\n {\"layout\": (1, 4), \"expected_size\": (1, 4)},\n {\"layout\": (3, 3), \"expected_size\": (3, 3)},\n {\"layout\": (-1, 4), \"expected_size\": (1, 4)},\n {\"layout\": (4, -1), \"expected_size\": (4, 1)},\n {\"layout\": (-1, 2), \"expected_size\": (2, 2)},\n {\"layout\": (2, -1), \"expected_size\": (2, 2)},\n )\n\n for layout_test in layout_to_expected_size:\n axes = df.hist(layout=layout_test[\"layout\"])\n expected = layout_test[\"expected_size\"]\n self._check_axes_shape(axes, axes_num=3, layout=expected)\n\n # layout too small for all 4 plots\n with pytest.raises(ValueError):\n df.hist(layout=(1, 1))\n\n # invalid format for layout\n with pytest.raises(ValueError):\n df.hist(layout=(1,))\n with pytest.raises(ValueError):\n df.hist(layout=(-1, -1))\n\n # GH 9351\n def test_tight_layout(self):\n df = DataFrame(np.random.randn(100, 2))\n df[2] = to_datetime(\n np.random.randint(\n self.start_date_to_int64,\n self.end_date_to_int64,\n size=100,\n dtype=np.int64,\n )\n )\n # Use default_axes=True when plotting method generate subplots itself\n _check_plot_works(df.hist, default_axes=True)\n self.plt.tight_layout()\n\n tm.close()\n\n def test_hist_subplot_xrot(self):\n # GH 30288\n df = DataFrame(\n {\n \"length\": [1.5, 0.5, 1.2, 0.9, 3],\n \"animal\": [\"pig\", \"rabbit\", \"pig\", \"pig\", \"rabbit\"],\n }\n )\n # Use default_axes=True when plotting method generate subplots itself\n axes = _check_plot_works(\n df.hist,\n default_axes=True,\n filterwarnings=\"always\",\n column=\"length\",\n by=\"animal\",\n bins=5,\n xrot=0,\n )\n self._check_ticks_props(axes, xrot=0)\n\n @pytest.mark.parametrize(\n \"column, expected\",\n [\n (None, [\"width\", \"length\", \"height\"]),\n ([\"length\", \"width\", \"height\"], [\"length\", \"width\", \"height\"]),\n ],\n )\n def test_hist_column_order_unchanged(self, column, expected):\n # GH29235\n\n df = DataFrame(\n {\n \"width\": [0.7, 0.2, 0.15, 0.2, 1.1],\n \"length\": [1.5, 0.5, 1.2, 0.9, 3],\n \"height\": [3, 0.5, 3.4, 2, 1],\n },\n index=[\"pig\", \"rabbit\", \"duck\", \"chicken\", \"horse\"],\n )\n\n # Use default_axes=True when plotting method generate subplots itself\n axes = _check_plot_works(\n df.hist,\n default_axes=True,\n column=column,\n layout=(1, 3),\n )\n result = [axes[0, i].get_title() for i in range(3)]\n assert result == expected\n\n @pytest.mark.parametrize(\n \"histtype, expected\",\n [\n (\"bar\", True),\n (\"barstacked\", True),\n (\"step\", False),\n (\"stepfilled\", True),\n ],\n )\n def test_histtype_argument(self, histtype, expected):\n # GH23992 Verify functioning of histtype argument\n df = DataFrame(np.random.randint(1, 10, size=(100, 2)), columns=[\"a\", \"b\"])\n ax = df.hist(histtype=histtype)\n self._check_patches_all_filled(ax, filled=expected)\n\n @pytest.mark.parametrize(\"by\", [None, \"c\"])\n @pytest.mark.parametrize(\"column\", [None, \"b\"])\n def test_hist_with_legend(self, by, column):\n # GH 6279 - DataFrame histogram can have a legend\n expected_axes_num = 1 if by is None and column is not None else 2\n expected_layout = (1, expected_axes_num)\n expected_labels = column or [\"a\", \"b\"]\n if by is not None:\n expected_labels = [expected_labels] * 2\n\n index = Index(15 * [\"1\"] + 15 * [\"2\"], name=\"c\")\n df = DataFrame(np.random.randn(30, 2), index=index, columns=[\"a\", \"b\"])\n\n # Use default_axes=True when plotting method generate subplots itself\n axes = _check_plot_works(\n df.hist,\n default_axes=True,\n legend=True,\n by=by,\n column=column,\n )\n\n self._check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout)\n if by is None and column is None:\n axes = axes[0]\n for expected_label, ax in zip(expected_labels, axes):\n self._check_legend_labels(ax, expected_label)\n\n @pytest.mark.parametrize(\"by\", [None, \"c\"])\n @pytest.mark.parametrize(\"column\", [None, \"b\"])\n def test_hist_with_legend_raises(self, by, column):\n # GH 6279 - DataFrame histogram with legend and label raises\n index = Index(15 * [\"1\"] + 15 * [\"2\"], name=\"c\")\n df = DataFrame(np.random.randn(30, 2), index=index, columns=[\"a\", \"b\"])\n\n with pytest.raises(ValueError, match=\"Cannot use both legend and label\"):\n df.hist(legend=True, by=by, column=column, label=\"d\")\n\n\[email protected]_if_no_mpl\nclass TestDataFrameGroupByPlots(TestPlotBase):\n def test_grouped_hist_legacy(self):\n from matplotlib.patches import Rectangle\n\n from pandas.plotting._matplotlib.hist import _grouped_hist\n\n df = DataFrame(np.random.randn(500, 1), columns=[\"A\"])\n df[\"B\"] = to_datetime(\n np.random.randint(\n self.start_date_to_int64,\n self.end_date_to_int64,\n size=500,\n dtype=np.int64,\n )\n )\n df[\"C\"] = np.random.randint(0, 4, 500)\n df[\"D\"] = [\"X\"] * 500\n\n axes = _grouped_hist(df.A, by=df.C)\n self._check_axes_shape(axes, axes_num=4, layout=(2, 2))\n\n tm.close()\n axes = df.hist(by=df.C)\n self._check_axes_shape(axes, axes_num=4, layout=(2, 2))\n\n tm.close()\n # group by a key with single value\n axes = df.hist(by=\"D\", rot=30)\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n self._check_ticks_props(axes, xrot=30)\n\n tm.close()\n # make sure kwargs to hist are handled\n xf, yf = 20, 18\n xrot, yrot = 30, 40\n\n axes = _grouped_hist(\n df.A,\n by=df.C,\n cumulative=True,\n bins=4,\n xlabelsize=xf,\n xrot=xrot,\n ylabelsize=yf,\n yrot=yrot,\n density=True,\n )\n # height of last bin (index 5) must be 1.0\n for ax in axes.ravel():\n rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]\n height = rects[-1].get_height()\n tm.assert_almost_equal(height, 1.0)\n self._check_ticks_props(\n axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot\n )\n\n tm.close()\n axes = _grouped_hist(df.A, by=df.C, log=True)\n # scale of y must be 'log'\n self._check_ax_scales(axes, yaxis=\"log\")\n\n tm.close()\n # propagate attr exception from matplotlib.Axes.hist\n with pytest.raises(AttributeError):\n _grouped_hist(df.A, by=df.C, foo=\"bar\")\n\n msg = \"Specify figure size by tuple instead\"\n with pytest.raises(ValueError, match=msg):\n df.hist(by=\"C\", figsize=\"default\")\n\n def test_grouped_hist_legacy2(self):\n n = 10\n weight = Series(np.random.normal(166, 20, size=n))\n height = Series(np.random.normal(60, 10, size=n))\n with tm.RNGContext(42):\n gender_int = np.random.choice([0, 1], size=n)\n df_int = DataFrame({\"height\": height, \"weight\": weight, \"gender\": gender_int})\n gb = df_int.groupby(\"gender\")\n axes = gb.hist()\n assert len(axes) == 2\n assert len(self.plt.get_fignums()) == 2\n tm.close()\n\n def test_grouped_hist_layout(self):\n df = self.hist_df\n msg = \"Layout of 1x1 must be larger than required size 2\"\n with pytest.raises(ValueError, match=msg):\n df.hist(column=\"weight\", by=df.gender, layout=(1, 1))\n\n msg = \"Layout of 1x3 must be larger than required size 4\"\n with pytest.raises(ValueError, match=msg):\n df.hist(column=\"height\", by=df.category, layout=(1, 3))\n\n msg = \"At least one dimension of layout must be positive\"\n with pytest.raises(ValueError, match=msg):\n df.hist(column=\"height\", by=df.category, layout=(-1, -1))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(\n df.hist, column=\"height\", by=df.gender, layout=(2, 1)\n )\n self._check_axes_shape(axes, axes_num=2, layout=(2, 1))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(\n df.hist, column=\"height\", by=df.gender, layout=(2, -1)\n )\n self._check_axes_shape(axes, axes_num=2, layout=(2, 1))\n\n axes = df.hist(column=\"height\", by=df.category, layout=(4, 1))\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\n\n axes = df.hist(column=\"height\", by=df.category, layout=(-1, 1))\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\n\n axes = df.hist(column=\"height\", by=df.category, layout=(4, 2), figsize=(12, 8))\n self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 8))\n tm.close()\n\n # GH 6769\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(\n df.hist, column=\"height\", by=\"classroom\", layout=(2, 2)\n )\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n\n # without column\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.hist, by=\"classroom\")\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n\n axes = df.hist(by=\"gender\", layout=(3, 5))\n self._check_axes_shape(axes, axes_num=2, layout=(3, 5))\n\n axes = df.hist(column=[\"height\", \"weight\", \"category\"])\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n\n def test_grouped_hist_multiple_axes(self):\n # GH 6970, GH 7069\n df = self.hist_df\n\n fig, axes = self.plt.subplots(2, 3)\n returned = df.hist(column=[\"height\", \"weight\", \"category\"], ax=axes[0])\n self._check_axes_shape(returned, axes_num=3, layout=(1, 3))\n tm.assert_numpy_array_equal(returned, axes[0])\n assert returned[0].figure is fig\n returned = df.hist(by=\"classroom\", ax=axes[1])\n self._check_axes_shape(returned, axes_num=3, layout=(1, 3))\n tm.assert_numpy_array_equal(returned, axes[1])\n assert returned[0].figure is fig\n\n with pytest.raises(ValueError):\n fig, axes = self.plt.subplots(2, 3)\n # pass different number of axes from required\n axes = df.hist(column=\"height\", ax=axes)\n\n def test_axis_share_x(self):\n df = self.hist_df\n # GH4089\n ax1, ax2 = df.hist(column=\"height\", by=df.gender, sharex=True)\n\n # share x\n assert ax1._shared_x_axes.joined(ax1, ax2)\n assert ax2._shared_x_axes.joined(ax1, ax2)\n\n # don't share y\n assert not ax1._shared_y_axes.joined(ax1, ax2)\n assert not ax2._shared_y_axes.joined(ax1, ax2)\n\n def test_axis_share_y(self):\n df = self.hist_df\n ax1, ax2 = df.hist(column=\"height\", by=df.gender, sharey=True)\n\n # share y\n assert ax1._shared_y_axes.joined(ax1, ax2)\n assert ax2._shared_y_axes.joined(ax1, ax2)\n\n # don't share x\n assert not ax1._shared_x_axes.joined(ax1, ax2)\n assert not ax2._shared_x_axes.joined(ax1, ax2)\n\n def test_axis_share_xy(self):\n df = self.hist_df\n ax1, ax2 = df.hist(column=\"height\", by=df.gender, sharex=True, sharey=True)\n\n # share both x and y\n assert ax1._shared_x_axes.joined(ax1, ax2)\n assert ax2._shared_x_axes.joined(ax1, ax2)\n\n assert ax1._shared_y_axes.joined(ax1, ax2)\n assert ax2._shared_y_axes.joined(ax1, ax2)\n\n @pytest.mark.parametrize(\n \"histtype, expected\",\n [\n (\"bar\", True),\n (\"barstacked\", True),\n (\"step\", False),\n (\"stepfilled\", True),\n ],\n )\n def test_histtype_argument(self, histtype, expected):\n # GH23992 Verify functioning of histtype argument\n df = DataFrame(np.random.randint(1, 10, size=(100, 2)), columns=[\"a\", \"b\"])\n ax = df.hist(by=\"a\", histtype=histtype)\n self._check_patches_all_filled(ax, filled=expected)\n", "from statsmodels.compat.pandas import assert_frame_equal, make_dataframe\n\nfrom datetime import datetime\nimport numpy as np\nfrom numpy.testing import (assert_almost_equal, assert_equal, assert_allclose,\n assert_raises, assert_)\nfrom numpy import array, column_stack\n\nfrom statsmodels.tsa.filters._utils import pandas_wrapper\nfrom statsmodels.datasets import macrodata\nfrom pandas import DataFrame, date_range, concat\nfrom statsmodels.tsa.filters.api import (bkfilter, hpfilter, cffilter,\n convolution_filter, recursive_filter)\n\n\ndef test_bking1d():\n # Test Baxter King band-pass filter. Results are taken from Stata\n bking_results = array([\n 7.320813, 2.886914, -6.818976, -13.49436,\n -13.27936, -9.405913, -5.691091, -5.133076, -7.273468,\n -9.243364, -8.482916, -4.447764, 2.406559, 10.68433,\n 19.46414, 28.09749, 34.11066, 33.48468, 24.64598, 9.952399,\n -4.265528, -12.59471, -13.46714, -9.049501, -3.011248,\n .5655082, 2.897976, 7.406077, 14.67959, 18.651, 13.05891,\n -2.945415, -24.08659, -41.86147, -48.68383, -43.32689,\n -31.66654, -20.38356, -13.76411, -9.978693, -3.7704, 10.27108,\n 31.02847, 51.87613, 66.93117, 73.51951, 73.4053, 69.17468,\n 59.8543, 38.23899, -.2604809, -49.0107, -91.1128, -112.1574,\n -108.3227, -86.51453, -59.91258, -40.01185, -29.70265,\n -22.76396, -13.08037, 1.913622, 20.44045, 37.32873, 46.79802,\n 51.95937, 59.67393, 70.50803, 81.27311, 83.53191, 67.72536,\n 33.78039, -6.509092, -37.31579, -46.05207, -29.81496, 1.416417,\n 28.31503,\n 32.90134, 8.949259, -35.41895, -84.65775, -124.4288, -144.6036,\n -140.2204, -109.2624, -53.6901, 15.07415, 74.44268, 104.0403,\n 101.0725, 76.58291, 49.27925, 36.15751, 36.48799, 37.60897,\n 27.75998, 4.216643, -23.20579, -39.33292, -36.6134, -20.90161,\n -4.143123, 5.48432, 9.270075, 13.69573, 22.16675, 33.01987,\n 41.93186, 47.12222, 48.62164, 47.30701, 40.20537, 22.37898,\n -7.133002, -43.3339, -78.51229, -101.3684, -105.2179,\n -90.97147,\n -68.30824, -48.10113, -35.60709, -31.15775, -31.82346,\n -32.49278, -28.22499, -14.42852, 10.1827, 36.64189, 49.43468,\n 38.75517, 6.447761, -33.15883, -62.60446, -72.87829, -66.54629,\n -52.61205, -38.06676, -26.19963, -16.51492, -7.007577,\n .6125674,\n 7.866972, 14.8123, 22.52388, 30.65265, 39.47801, 49.05027,\n 59.02925,\n 72.88999, 95.08865, 125.8983, 154.4283, 160.7638, 130.6092,\n 67.84406, -7.070272, -68.08128, -99.39944, -104.911,\n -100.2372, -98.11596, -104.2051, -114.0125, -113.3475,\n -92.98669, -51.91707, -.7313812, 43.22938, 64.62762, 64.07226,\n 59.35707, 67.06026, 91.87247, 124.4591, 151.2402, 163.0648,\n 154.6432])\n X = macrodata.load_pandas().data['realinv'].values\n Y = bkfilter(X, 6, 32, 12)\n assert_almost_equal(Y, bking_results, 4)\n\n\ndef test_bking2d():\n # Test Baxter-King band-pass filter with 2d input\n bking_results = array([\n [7.320813, -.0374475], [2.886914, -.0430094],\n [-6.818976, -.053456], [-13.49436, -.0620739], [-13.27936, -.0626929],\n [-9.405913, -.0603022], [-5.691091, -.0630016], [-5.133076, -.0832268],\n [-7.273468, -.1186448], [-9.243364, -.1619868], [-8.482916, -.2116604],\n [-4.447764, -.2670747], [2.406559, -.3209931], [10.68433, -.3583075],\n [19.46414, -.3626742], [28.09749, -.3294618], [34.11066, -.2773388],\n [33.48468, -.2436127], [24.64598, -.2605531], [9.952399, -.3305166],\n [-4.265528, -.4275561], [-12.59471, -.5076068], [-13.46714, -.537573],\n [-9.049501, -.5205845], [-3.011248, -.481673], [.5655082, -.4403994],\n [2.897976, -.4039957], [7.406077, -.3537394], [14.67959, -.2687359],\n [18.651, -.1459743], [13.05891, .0014926], [-2.945415, .1424277],\n [-24.08659, .2451936], [-41.86147, .288541], [-48.68383, .2727282],\n [-43.32689, .1959127], [-31.66654, .0644874], [-20.38356, -.1158372],\n [-13.76411, -.3518627], [-9.978693, -.6557535], [-3.7704, -1.003754],\n [10.27108, -1.341632], [31.02847, -1.614486], [51.87613, -1.779089],\n [66.93117, -1.807459], [73.51951, -1.679688], [73.4053, -1.401012],\n [69.17468, -.9954996], [59.8543, -.511261], [38.23899, -.0146745],\n [-.2604809, .4261311], [-49.0107, .7452514], [-91.1128, .8879492],\n [-112.1574, .8282748], [-108.3227, .5851508], [-86.51453, .2351699],\n [-59.91258, -.1208998], [-40.01185, -.4297895], [-29.70265, -.6821963],\n [-22.76396, -.9234254], [-13.08037, -1.217539], [1.913622, -1.57367],\n [20.44045, -1.927008], [37.32873, -2.229565], [46.79802, -2.463154],\n [51.95937, -2.614697], [59.67393, -2.681357], [70.50803, -2.609654],\n [81.27311, -2.301618], [83.53191, -1.720974], [67.72536, -.9837123],\n [33.78039, -.2261613], [-6.509092, .4546985], [-37.31579, 1.005751],\n [-46.05207, 1.457224], [-29.81496, 1.870815], [1.416417, 2.263313],\n [28.31503, 2.599906], [32.90134, 2.812282], [8.949259, 2.83358],\n [-35.41895, 2.632667], [-84.65775, 2.201077], [-124.4288, 1.598951],\n [-144.6036, .9504762], [-140.2204, .4187932], [-109.2624, .1646726],\n [-53.6901, .2034265], [15.07415, .398165], [74.44268, .5427476],\n [104.0403, .5454975], [101.0725, .4723354], [76.58291, .4626823],\n [49.27925, .5840143], [36.15751, .7187981], [36.48799, .6058422],\n [37.60897, .1221227], [27.75998, -.5891272], [4.216643, -1.249841],\n [-23.20579, -1.594972], [-39.33292, -1.545968], [-36.6134, -1.275494],\n [-20.90161, -1.035783], [-4.143123, -.9971732], [5.48432, -1.154264],\n [9.270075, -1.29987], [13.69573, -1.240559], [22.16675, -.9662656],\n [33.01987, -.6420301], [41.93186, -.4698712], [47.12222, -.4527797],\n [48.62164, -.4407153], [47.30701, -.2416076], [40.20537, .2317583],\n [22.37898, .8710276], [-7.133002, 1.426177], [-43.3339, 1.652785],\n [-78.51229, 1.488021], [-101.3684, 1.072096], [-105.2179, .6496446],\n [-90.97147, .4193682], [-68.30824, .41847], [-48.10113, .5253419],\n [-35.60709, .595076], [-31.15775, .5509905], [-31.82346, .3755519],\n [-32.49278, .1297979], [-28.22499, -.0916165], [-14.42852, -.2531037],\n [10.1827, -.3220784], [36.64189, -.2660561], [49.43468, -.1358522],\n [38.75517, -.0279508], [6.447761, .0168735], [-33.15883, .0315687],\n [-62.60446, .0819507], [-72.87829, .2274033], [-66.54629, .4641401],\n [-52.61205, .7211093], [-38.06676, .907773], [-26.19963, .9387103],\n [-16.51492, .7940786], [-7.007577, .5026631], [.6125674, .1224996],\n [7.866972, -.2714422], [14.8123, -.6273921], [22.52388, -.9124271],\n [30.65265, -1.108861], [39.47801, -1.199206], [49.05027, -1.19908],\n [59.02925, -1.139046], [72.88999, -.9775021], [95.08865, -.6592603],\n [125.8983, -.1609712], [154.4283, .4796201], [160.7638, 1.100565],\n [130.6092, 1.447148], [67.84406, 1.359608], [-7.070272, .8931825],\n [-68.08128, .2619787], [-99.39944, -.252208], [-104.911, -.4703874],\n [-100.2372, -.4430657], [-98.11596, -.390683], [-104.2051, -.5647846],\n [-114.0125, -.9397582], [-113.3475, -1.341633], [-92.98669, -1.567337],\n [-51.91707, -1.504943], [-.7313812, -1.30576], [43.22938, -1.17151],\n [64.62762, -1.136151], [64.07226, -1.050555], [59.35707, -.7308369],\n [67.06026, -.1766731], [91.87247, .3898467], [124.4591, .8135461],\n [151.2402, .9644226], [163.0648, .6865934], [154.6432, .0115685]])\n\n mdata = macrodata.load_pandas()\n X = mdata.data[['realinv', 'cpi']].values.astype(float)\n Y = bkfilter(X, 6, 32, 12)\n assert_almost_equal(Y, bking_results, 4)\n\n\ndef test_hpfilter():\n # Test Hodrick-Prescott Filter. Results taken from Stata.\n hpfilt_res = array([\n [3.951191484487844718e+01, 2.670837085155121713e+03],\n [8.008853245681075350e+01, 2.698712467543189177e+03],\n [4.887545512195401898e+01, 2.726612544878045810e+03],\n [3.059193256079834100e+01, 2.754612067439201837e+03],\n [6.488266733421960453e+01, 2.782816332665780465e+03],\n [2.304024204546703913e+01, 2.811349757954532834e+03],\n [-1.355312369487364776e+00, 2.840377312369487299e+03],\n [-6.746236512580753697e+01, 2.870078365125807522e+03],\n [-8.136743836853429457e+01, 2.900631438368534418e+03],\n [-6.016789026443257171e+01, 2.932172890264432681e+03],\n [-4.636922433138215638e+01, 2.964788224331382025e+03],\n [-2.069533915570400495e+01, 2.998525339155703932e+03],\n [-2.162152558595607843e+00, 3.033403152558595593e+03],\n [-4.718647774311648391e+00, 3.069427647774311481e+03],\n [-1.355645669169007306e+01, 3.106603456691690099e+03],\n [-4.436926204475639679e+01, 3.144932262044756499e+03],\n [-4.332027378211660107e+01, 3.184407273782116590e+03],\n [-4.454697106352068658e+01, 3.224993971063520803e+03],\n [-2.629875787765286077e+01, 3.266630757877652741e+03],\n [-4.426119635629265758e+01, 3.309228196356292756e+03],\n [-1.443441190762496262e+01, 3.352680411907625057e+03],\n [-2.026686669186437939e+01, 3.396853866691864368e+03],\n [-1.913700136208899494e+01, 3.441606001362089046e+03],\n [-5.482458977940950717e+01, 3.486781589779409387e+03],\n [-1.596244517937793717e+01, 3.532213445179378141e+03],\n [-1.374011542874541192e+01, 3.577700115428745448e+03],\n [1.325482813403914406e+01, 3.623030171865960710e+03],\n [5.603040174253828809e+01, 3.667983598257461836e+03],\n [1.030743373627105939e+02, 3.712348662637289181e+03],\n [7.217534795943993231e+01, 3.755948652040559864e+03],\n [5.462972503693208637e+01, 3.798671274963067845e+03],\n [4.407065050666142270e+01, 3.840449349493338559e+03],\n [3.749016270204992907e+01, 3.881249837297949853e+03],\n [-1.511244199923112319e+00, 3.921067244199923152e+03],\n [-9.093507374079763395e+00, 3.959919507374079785e+03],\n [-1.685361946760258434e+01, 3.997823619467602384e+03],\n [2.822211031434289907e+01, 4.034790889685657021e+03],\n [6.117590627896424849e+01, 4.070822093721035344e+03],\n [5.433135391434370831e+01, 4.105935646085656117e+03],\n [3.810480376716623141e+01, 4.140188196232833434e+03],\n [7.042964928802848590e+01, 4.173670350711971878e+03],\n [4.996346842507591646e+01, 4.206496531574924120e+03],\n [4.455282059571254649e+01, 4.238825179404287155e+03],\n [-7.584961950576143863e+00, 4.270845961950576566e+03],\n [-4.620339247697120300e+01, 4.302776392476971523e+03],\n [-7.054024364552969928e+01, 4.334829243645529459e+03],\n [-6.492941099801464588e+01, 4.367188410998014660e+03],\n [-1.433567024239555394e+02, 4.399993702423955256e+03],\n [-5.932834493089012540e+01, 4.433344344930889747e+03],\n [-6.842096758743628016e+01, 4.467249967587436004e+03],\n [-6.774011924654860195e+01, 4.501683119246548813e+03],\n [-9.030958565658056614e+01, 4.536573585656580690e+03],\n [-4.603981499136807543e+01, 4.571808814991368308e+03],\n [2.588118806672991923e+01, 4.607219811933269739e+03],\n [3.489419371912299539e+01, 4.642608806280876706e+03],\n [7.675179642495095322e+01, 4.677794203575049323e+03],\n [1.635497817724171910e+02, 4.712616218227582976e+03],\n [1.856079654765617306e+02, 4.746963034523438182e+03],\n [1.254269446392718237e+02, 4.780825055360728584e+03],\n [1.387413113837174024e+02, 4.814308688616282780e+03],\n [6.201826599282230745e+01, 4.847598734007177882e+03],\n [4.122129542972197669e+01, 4.880966704570278125e+03],\n [-4.120287475842360436e+01, 4.914722874758424041e+03],\n [-9.486328233441963675e+01, 4.949203282334419782e+03],\n [-1.894232132641573116e+02, 4.984718213264157384e+03],\n [-1.895766639620087517e+02, 5.021518663962008759e+03],\n [-1.464092413342650616e+02, 5.059737241334265491e+03],\n [-1.218770668721217589e+02, 5.099388066872122181e+03],\n [-4.973075629078175552e+01, 5.140393756290781312e+03],\n [-5.365375213897277717e+01, 5.182600752138972894e+03],\n [-7.175241524251214287e+01, 5.225824415242512259e+03],\n [-7.834757283225462743e+01, 5.269846572832254424e+03],\n [-6.264220687943907251e+01, 5.314404206879438789e+03],\n [-3.054332122210325906e+00, 5.359185332122210639e+03],\n [4.808218808024685131e+01, 5.403838811919753425e+03],\n [2.781399326736391231e+00, 5.448011600673263274e+03],\n [-2.197570415173231595e+01, 5.491380704151732061e+03],\n [1.509441335012807031e+02, 5.533624866498719712e+03],\n [1.658909029574851957e+02, 5.574409097042514986e+03],\n [2.027292548049981633e+02, 5.613492745195001589e+03],\n [1.752101578176061594e+02, 5.650738842182393455e+03],\n [1.452808749847536092e+02, 5.686137125015246056e+03],\n [1.535481629475025329e+02, 5.719786837052497503e+03],\n [1.376169777998875361e+02, 5.751878022200112355e+03],\n [1.257703080340770612e+02, 5.782696691965922582e+03],\n [-2.524186846895645431e+01, 5.812614868468956047e+03],\n [-6.546618027042404719e+01, 5.842083180270424236e+03],\n [1.192352023580315290e+01, 5.871536479764196883e+03],\n [1.043482970188742911e+02, 5.901368702981125352e+03],\n [2.581376184768396342e+01, 5.931981238152316109e+03],\n [6.634330880534071184e+01, 5.963840691194659485e+03],\n [-4.236780162594641297e+01, 5.997429801625946311e+03],\n [-1.759397735321817891e+02, 6.033272773532181418e+03],\n [-1.827933311233055065e+02, 6.071867331123305121e+03],\n [-2.472312362505917918e+02, 6.113601236250591683e+03],\n [-2.877470049336488955e+02, 6.158748004933649099e+03],\n [-2.634066336693540507e+02, 6.207426633669354487e+03],\n [-1.819572770763625158e+02, 6.259576277076362203e+03],\n [-1.175034606274621183e+02, 6.314971460627461965e+03],\n [-4.769898649718379602e+01, 6.373272986497183410e+03],\n [1.419578280287896632e+01, 6.434068217197121157e+03],\n [6.267929662760798237e+01, 6.496914703372392069e+03],\n [6.196413196753746888e+01, 6.561378868032462378e+03],\n [5.019769125317907310e+01, 6.627066308746821051e+03],\n [4.665364933213822951e+01, 6.693621350667861407e+03],\n [3.662430749527266016e+01, 6.760719692504727391e+03],\n [7.545680850246480986e+01, 6.828066191497535328e+03],\n [6.052940492147536133e+01, 6.895388595078524304e+03],\n [6.029518881462354329e+01, 6.962461811185376064e+03],\n [2.187042136652689805e+01, 7.029098578633473153e+03],\n [2.380067926824722235e+01, 7.095149320731752596e+03],\n [-7.119129802169481991e+00, 7.160478129802169860e+03],\n [-3.194497359120850888e+01, 7.224963973591208742e+03],\n [-1.897137038934124575e+01, 7.288481370389341464e+03],\n [-1.832687287845146784e+01, 7.350884872878451461e+03],\n [4.600482336597542599e+01, 7.412017176634024509e+03],\n [2.489047706403016491e+01, 7.471709522935970199e+03],\n [6.305909392127250612e+01, 7.529821906078727807e+03],\n [4.585212309498183458e+01, 7.586229876905018500e+03],\n [9.314260180878318351e+01, 7.640848398191216802e+03],\n [1.129819097095369216e+02, 7.693621090290463144e+03],\n [1.204662123176703972e+02, 7.744549787682329224e+03],\n [1.336860614601246198e+02, 7.793706938539875409e+03],\n [1.034567175813735957e+02, 7.841240282418626521e+03],\n [1.403118873372050075e+02, 7.887381112662795204e+03],\n [1.271726169351004501e+02, 7.932425383064899506e+03],\n [8.271925765282139764e+01, 7.976756742347178260e+03],\n [-3.197432211752584408e+01, 8.020838322117525422e+03],\n [-1.150209535194062482e+02, 8.065184953519406008e+03],\n [-1.064694837456772802e+02, 8.110291483745677397e+03],\n [-1.190428718925368230e+02, 8.156580871892536379e+03],\n [-1.353635336292991269e+02, 8.204409533629299403e+03],\n [-9.644348283027102298e+01, 8.254059482830271008e+03],\n [-6.143413116116607853e+01, 8.305728131161165948e+03],\n [-3.019161311097923317e+01, 8.359552613110980019e+03],\n [1.384333163552582846e+00, 8.415631666836447039e+03],\n [-4.156016073666614830e+01, 8.474045160736666730e+03],\n [-4.843882841860977351e+01, 8.534873828418609264e+03],\n [-6.706442838867042155e+01, 8.598172428388670596e+03],\n [-2.019644488579979225e+01, 8.663965444885800025e+03],\n [-4.316446881084630149e+00, 8.732235446881084499e+03],\n [4.435061943264736328e+01, 8.802952380567352520e+03],\n [2.820550564155564643e+01, 8.876083494358445023e+03],\n [5.155624419490777655e+01, 8.951623755805092514e+03],\n [-4.318760899315748247e+00, 9.029585760899315574e+03],\n [-6.534632828542271454e+01, 9.110014328285422380e+03],\n [-7.226757738268497633e+01, 9.192951577382684263e+03],\n [-9.412378615444868046e+01, 9.278398786154448317e+03],\n [-1.191240653288368776e+02, 9.366312065328836979e+03],\n [-4.953669826751865912e+01, 9.456588698267518339e+03],\n [-6.017251579067487910e+01, 9.549051515790675694e+03],\n [-5.103438828313483100e+01, 9.643492388283135369e+03],\n [-7.343057830678117170e+01, 9.739665578306781754e+03],\n [-2.774245193054957781e+01, 9.837293451930549054e+03],\n [-3.380481112519191811e+00, 9.936052481112519672e+03],\n [-2.672779877794346248e+01, 1.003560179877794326e+04],\n [-3.217342505148371856e+01, 1.013559842505148299e+04],\n [-4.140567518359966925e+01, 1.023568267518359971e+04],\n [-6.687756033938057953e+00, 1.033547475603393832e+04],\n [7.300600408459467872e+01, 1.043456899591540605e+04],\n [6.862345670680042531e+01, 1.053255554329319966e+04],\n [5.497882461487461114e+01, 1.062907017538512628e+04],\n [9.612244093055960548e+01, 1.072379155906944106e+04],\n [1.978212770103891671e+02, 1.081643272298961165e+04],\n [1.362772276848754700e+02, 1.090676677231512440e+04],\n [2.637635494867263333e+02, 1.099469045051327339e+04],\n [1.876813256815166824e+02, 1.108018567431848351e+04],\n [1.711447873158413131e+02, 1.116339921268415856e+04],\n [5.257586460826678376e+01, 1.124459513539173349e+04],\n [4.710652228531762375e+01, 1.132414447771468258e+04],\n [-6.237613484241046535e+01, 1.140245113484241119e+04],\n [-9.982044354035315337e+01, 1.147994844354035376e+04],\n [-7.916275548997509759e+01, 1.155703075548997549e+04],\n [-9.526003459472303803e+01, 1.163403003459472347e+04],\n [-1.147987680369169539e+02, 1.171122876803691724e+04],\n [-1.900259054765901965e+02, 1.178884990547659072e+04],\n [-2.212256473439556430e+02, 1.186704464734395515e+04],\n [-2.071394278781845060e+02, 1.194584542787818464e+04],\n [-8.968541528904825100e+01, 1.202514641528904758e+04],\n [-6.189531564415665343e+01, 1.210471231564415575e+04],\n [-5.662878162551714922e+01, 1.218425178162551674e+04],\n [-4.961678134413705266e+01, 1.226343478134413635e+04],\n [-3.836288992144181975e+01, 1.234189588992144127e+04],\n [-8.956671991456460091e+00, 1.241923867199145570e+04],\n [3.907028461866866564e+01, 1.249504271538133071e+04],\n [1.865299000184495526e+01, 1.256888200999815490e+04],\n [4.279803532226833340e+01, 1.264035496467773191e+04],\n [3.962735362631610769e+01, 1.270907164637368442e+04],\n [1.412691291877854383e+02, 1.277466887081221466e+04],\n [1.256537791844366438e+02, 1.283680822081556289e+04],\n [7.067642758858892194e+01, 1.289523957241141034e+04],\n [1.108876647603192396e+02, 1.294979133523968085e+04],\n [9.956490829291760747e+01, 1.300033609170708223e+04],\n [1.571612709880937473e+02, 1.304681572901190702e+04],\n [2.318746375812715996e+02, 1.308923436241872878e+04],\n [2.635546670125277160e+02, 1.312769433298747208e+04],\n [2.044220965739259555e+02, 1.316244290342607383e+04],\n [2.213739418903714977e+02, 1.319389205810962812e+04],\n [1.020184547767112235e+02, 1.322258154522328914e+04],\n [-1.072694716663390864e+02, 1.324918947166633916e+04],\n [-3.490477058718843182e+02, 1.327445770587188417e+04],\n [-3.975570728533530200e+02, 1.329906107285335383e+04],\n [-3.331152428080622485e+02, 1.332345624280806260e+04]])\n dta = macrodata.load_pandas().data['realgdp'].values\n res = column_stack((hpfilter(dta, 1600)))\n assert_almost_equal(res, hpfilt_res, 6)\n\n\ndef test_cfitz_filter():\n # Test Christiano-Fitzgerald Filter. Results taken from R.\n # NOTE: The Stata mata code and the matlab code it's based on are wrong.\n cfilt_res = array([\n [0.712599537179426, 0.439563468233128],\n [1.06824041304411, 0.352886666575907],\n [1.19422467791128, 0.257297004260607],\n [0.970845473140327, 0.114504692143872],\n [0.467026976628563, -0.070734782329146],\n [-0.089153511514031, -0.238609685132605],\n [-0.452339254128573, -0.32376584042956],\n [-0.513231214461187, -0.314288554228112],\n [-0.352372578720063, -0.258815055101336],\n [-0.160282602521333, -0.215076844089567],\n [-0.0918782593827686, -0.194120745417214],\n [-0.168083823205437, -0.158327420072693],\n [-0.291595204965808, -0.0742727139742986],\n [-0.348638756841307, 0.037008291163602],\n [-0.304328040874631, 0.108196527328748],\n [-0.215933150969686, 0.0869231107437175],\n [-0.165632621390694, -0.0130556619786275],\n [-0.182326839507151, -0.126570926191824],\n [-0.223737786804725, -0.205535321806185],\n [-0.228939291453403, -0.269110078201836],\n [-0.185518327227038, -0.375976507132174],\n [-0.143900152461529, -0.53760115656157],\n [-0.162749541550174, -0.660065018626038],\n [-0.236263634756884, -0.588542352053736],\n [-0.275785854309211, -0.236867929421996],\n [-0.173666515108109, 0.303436335579219],\n [0.0963135720251639, 0.779772338801993],\n [0.427070069032285, 0.929108075350647],\n [0.629034743259998, 0.658330841002647],\n [0.557941248993624, 0.118500049361018],\n [0.227866624051603, -0.385048321099911],\n [-0.179878859883227, -0.582223992561493],\n [-0.428263000051965, -0.394053702908091],\n [-0.381640684645912, 0.0445437406977307],\n [-0.0942745548364887, 0.493997792757968],\n [0.238132391504895, 0.764519811304315],\n [0.431293754256291, 0.814755206427316],\n [0.455010435813661, 0.745567043101108],\n [0.452800768971269, 0.709401694610443],\n [0.615754619329312, 0.798293251119636],\n [1.00256335412457, 0.975856845059388],\n [1.44841039351691, 1.09097252730799],\n [1.64651971120370, 0.967823457118036],\n [1.35534532901802, 0.522397724737059],\n [0.580492790312048, -0.16941343361609],\n [-0.410746188031773, -0.90760401289056],\n [-1.26148406066881, -1.49592867122591],\n [-1.75784179124566, -1.87404167409849],\n [-1.94478553960064, -2.14586210891112],\n [-2.03751202708559, -2.465855239868],\n [-2.20376059354166, -2.86294187189049],\n [-2.39722338315852, -3.15004697654831],\n [-2.38032366161537, -3.01390466643222],\n [-1.91798022532025, -2.23395210271226],\n [-0.982318490353716, -0.861346053067472],\n [0.199047030343412, 0.790266582335616],\n [1.28582776574786, 2.33731327460104],\n [2.03565905376430, 3.54085486821911],\n [2.41201557412526, 4.36519456268955],\n [2.52011070482927, 4.84810517685452],\n [2.45618479815452, 4.92906708807477],\n [2.22272146945388, 4.42591058990048],\n [1.78307567169034, 3.20962906108388],\n [1.18234431860844, 1.42568060336985],\n [0.590069172333348, -0.461896808688991],\n [0.19662302949837, -1.89020992539465],\n [0.048307034171166, -2.53490571941987],\n [-0.0141956981899000, -2.50020338531674],\n [-0.230505187108187, -2.20625973569823],\n [-0.700947410386801, -2.06643697511048],\n [-1.27085123163060, -2.21536883679783],\n [-1.64082547897928, -2.49016921117735],\n [-1.62286182971254, -2.63948740221362],\n [-1.31609762181362, -2.54685250637904],\n [-1.03085567704873, -2.27157435428923],\n [-1.01100120380112, -1.90404507430561],\n [-1.19823958399826, -1.4123209792214],\n [-1.26398933608383, -0.654000086153317],\n [-0.904710628949692, 0.447960016248203],\n [-0.151340093679588, 1.73970411237156],\n [0.592926881165989, 2.85741581650685],\n [0.851660587507523, 3.4410446351716],\n [0.480324393352127, 3.36870271362297],\n [-0.165153230782417, 2.82003806696544],\n [-0.459235919375844, 2.12858991660866],\n [0.0271158842479935, 1.55840980891556],\n [1.18759188180671, 1.17980298478623],\n [2.43238266962309, 0.904011534980672],\n [3.08277213720132, 0.595286911949837],\n [2.79953663720953, 0.148014782859571],\n [1.73694442845833, -0.496297332023011],\n [0.357638079951977, -1.33108149877570],\n [-0.891418825216945, -2.22650083183366],\n [-1.77646467793627, -2.89359299718574],\n [-2.24614790863088, -2.97921619243347],\n [-2.29048879096607, -2.30003092779280],\n [-1.87929656465888, -1.05298381273274],\n [-1.04510101454788, 0.215837488618531],\n [0.00413338508394524, 0.937866257924888],\n [0.906870625251025, 0.92664365343019],\n [1.33869057593416, 0.518564571494679],\n [1.22659678454440, 0.288096869652890],\n [0.79380139656044, 0.541053084632774],\n [0.38029431865832, 1.01905199983437],\n [0.183929413600038, 1.10529586616777],\n [0.140045425897033, 0.393618564826736],\n [0.0337313182352219, -0.86431819007665],\n [-0.269208622829813, -1.85638085246792],\n [-0.687276639992166, -1.82275359004533],\n [-1.00161592325614, -0.692695765071617],\n [-1.06320089194036, 0.803577361347341],\n [-0.927152307196776, 1.67366338751788],\n [-0.786802101366614, 1.42564362251793],\n [-0.772970884572502, 0.426446388877964],\n [-0.81275662801789, -0.437721213831647],\n [-0.686831250382476, -0.504255468075149],\n [-0.237936463020255, 0.148656301898438],\n [0.459631879129522, 0.832925905720478],\n [1.12717379822508, 0.889455302576383],\n [1.48640453200855, 0.268042676202216],\n [1.46515245776211, -0.446505038539178],\n [1.22993484959115, -0.563868578181134],\n [1.0272100765927, 0.0996849952196907],\n [0.979191212438404, 1.05053652824665],\n [1.00733490030391, 1.51658415000556],\n [0.932192535457706, 1.06262774912638],\n [0.643374300839414, -0.0865180803476065],\n [0.186885168954461, -1.24799408923277],\n [-0.290842337365465, -1.80035611156538],\n [-0.669446735516495, -1.58847333561510],\n [-0.928915624595538, -0.932116966867929],\n [-1.11758635926997, -0.307879396807850],\n [-1.26832454569756, -0.00856199983957032],\n [-1.35755577149251, -0.0303537516690989],\n [-1.34244112665546, -0.196807620887435],\n [-1.22227976023299, -0.342062643495923],\n [-1.04601473486818, -0.390474392372016],\n [-0.85158508717846, -0.322164402093596],\n [-0.605033439160543, -0.126930141915954],\n [-0.218304303942818, 0.179551077808122],\n [0.352173017779006, 0.512327303000081],\n [1.01389600097229, 0.733397490572755],\n [1.55149778750607, 0.748740387440165],\n [1.75499674757591, 0.601759717901009],\n [1.56636057468633, 0.457705308377562],\n [1.12239792537274, 0.470849913286519],\n [0.655802600286141, 0.646142040378738],\n [0.335285115340180, 0.824103600255079],\n [0.173454596506888, 0.808068498175582],\n [0.0666753011315252, 0.521488214487996],\n [-0.0842367474816212, 0.0583493276173476],\n [-0.285604762631464, -0.405958418332253],\n [-0.465735422869919, -0.747800086512926],\n [-0.563586691231348, -0.94982272350799],\n [-0.598110322024572, -1.04736894794361],\n [-0.65216025756061, -1.04858365218822],\n [-0.789663117801624, -0.924145633093637],\n [-0.984704045337959, -0.670740724179446],\n [-1.12449565589348, -0.359476803003931],\n [-1.07878318723543, -0.092290938944355],\n [-0.775555435407062, 0.102132527529259],\n [-0.231610677329856, 0.314409560305622],\n [0.463192794235131, 0.663523546243286],\n [1.17416973448423, 1.13156902460931],\n [1.74112278814906, 1.48967153067024],\n [2.00320855757084, 1.42571085941843],\n [1.8529912317336, 0.802460519079555],\n [1.30747261947211, -0.169219078629572],\n [0.540237070403222, -1.01621539672694],\n [-0.177136817092375, -1.3130784867977],\n [-0.611981468823591, -0.982477824460773],\n [-0.700240028737747, -0.344919609255406],\n [-0.572396497740112, 0.125083535035390],\n [-0.450934466600975, 0.142553112732280],\n [-0.494020014254326, -0.211429053871656],\n [-0.701707589094918, -0.599602868825992],\n [-0.94721339346157, -0.710669870591623],\n [-1.09297139748946, -0.47846194092245],\n [-1.08850658866583, -0.082258450179988],\n [-0.976082880696692, 0.235758921309309],\n [-0.81885695346771, 0.365298185204303],\n [-0.63165529525553, 0.384725179378064],\n [-0.37983149226421, 0.460240196164378],\n [-0.0375551354277652, 0.68580913832794],\n [0.361996927427804, 0.984470835955107],\n [0.739920615366072, 1.13195975020298],\n [1.03583478061534, 0.88812510421667],\n [1.25614938962160, 0.172561520611839],\n [1.45295030231799, -0.804979390544485],\n [1.64887158748426, -1.55662011197859],\n [1.78022721495313, -1.52921975346218],\n [1.71945683859668, -0.462240366424548],\n [1.36728880239190, 1.31213774341268],\n [0.740173894315912, 2.88362740582926],\n [-0.0205364331835904, 3.20319080963167],\n [-0.725643970956428, 1.75222466531151],\n [-1.23900506689782, -0.998432917440275],\n [-1.52651897508678, -3.72752870885448],\n [-1.62857516631435, -5.00551707196292],\n [-1.59657420180451, -4.18499132634584],\n [-1.45489013276495, -1.81759097305637],\n [-1.21309542313047, 0.722029457352468]])\n dta = macrodata.load_pandas().data[['tbilrate', 'infl']].values[1:]\n cyc, trend = cffilter(dta)\n assert_almost_equal(cyc, cfilt_res, 8)\n # do 1d\n cyc, trend = cffilter(dta[:, 1])\n assert_almost_equal(cyc, cfilt_res[:, 1], 8)\n\n\ndef test_bking_pandas():\n # 1d\n dta = macrodata.load_pandas().data\n index = date_range(start='1959-01-01', end='2009-10-01', freq='Q')\n dta.index = index\n filtered = bkfilter(dta[\"infl\"])\n nd_filtered = bkfilter(dta['infl'].values)\n assert_equal(filtered.values, nd_filtered)\n assert_equal(filtered.index[0], datetime(1962, 3, 31))\n assert_equal(filtered.index[-1], datetime(2006, 9, 30))\n assert_equal(filtered.name, \"infl_cycle\")\n\n # 2d\n filtered = bkfilter(dta[[\"infl\", \"unemp\"]])\n nd_filtered = bkfilter(dta[['infl', 'unemp']].values)\n assert_equal(filtered.values, nd_filtered)\n assert_equal(filtered.index[0], datetime(1962, 3, 31))\n assert_equal(filtered.index[-1], datetime(2006, 9, 30))\n assert_equal(filtered.columns.values, [\"infl_cycle\", \"unemp_cycle\"])\n\n\ndef test_cfitz_pandas():\n # 1d\n dta = macrodata.load_pandas().data\n index = date_range(start='1959-01-01', end='2009-10-01', freq='Q')\n dta.index = index\n cycle, trend = cffilter(dta[\"infl\"])\n ndcycle, ndtrend = cffilter(dta['infl'].values)\n assert_allclose(cycle.values, ndcycle, rtol=1e-14)\n assert_equal(cycle.index[0], datetime(1959, 3, 31))\n assert_equal(cycle.index[-1], datetime(2009, 9, 30))\n assert_equal(cycle.name, \"infl_cycle\")\n\n # 2d\n cycle, trend = cffilter(dta[[\"infl\", \"unemp\"]])\n ndcycle, ndtrend = cffilter(dta[['infl', 'unemp']].values)\n assert_allclose(cycle.values, ndcycle, rtol=1e-14)\n assert_equal(cycle.index[0], datetime(1959, 3, 31))\n assert_equal(cycle.index[-1], datetime(2009, 9, 30))\n assert_equal(cycle.columns.values, [\"infl_cycle\", \"unemp_cycle\"])\n\n\ndef test_hpfilter_pandas():\n dta = macrodata.load_pandas().data\n index = date_range(start='1959-01-01', end='2009-10-01', freq='Q')\n dta.index = index\n cycle, trend = hpfilter(dta[\"realgdp\"])\n ndcycle, ndtrend = hpfilter(dta['realgdp'].values)\n assert_equal(cycle.values, ndcycle)\n assert_equal(cycle.index[0], datetime(1959, 3, 31))\n assert_equal(cycle.index[-1], datetime(2009, 9, 30))\n assert_equal(cycle.name, \"realgdp_cycle\")\n\n\nclass TestFilters(object):\n @classmethod\n def setup_class(cls):\n # even\n data = [-50, 175, 149, 214, 247, 237, 225, 329, 729, 809,\n 530, 489, 540, 457, 195, 176, 337, 239, 128, 102,\n 232, 429, 3, 98, 43, -141, -77, -13, 125, 361, -45, 184]\n cls.data = DataFrame(data, date_range(start='1/1/1951',\n periods=len(data),\n freq='Q'))\n data[9] = np.nan\n cls.datana = DataFrame(data, date_range(start='1/1/1951',\n periods=len(data),\n freq='Q'))\n from .results import filter_results\n cls.expected = filter_results\n\n def test_convolution(self):\n x = self.data.values.squeeze()\n res = convolution_filter(x, [.75, .25])\n expected = self.expected.conv2\n np.testing.assert_almost_equal(res, expected)\n\n res = convolution_filter(x, [.75, .25], nsides=1)\n expected = self.expected.conv1\n np.testing.assert_almost_equal(res, expected)\n\n x = self.datana.values.squeeze()\n res = convolution_filter(x, [.75, .25])\n expected = self.expected.conv2_na\n np.testing.assert_almost_equal(res, expected)\n\n res = convolution_filter(x, [.75, .25], nsides=1)\n expected = self.expected.conv1_na\n np.testing.assert_almost_equal(res, expected)\n\n def test_convolution2d(self):\n x = self.data.values\n res = convolution_filter(x, [[.75], [.25]])\n expected = self.expected.conv2\n np.testing.assert_almost_equal(res, expected[:, None])\n res = convolution_filter(np.c_[x, x], [[.75, .75], [.25, .25]])\n np.testing.assert_almost_equal(res, np.c_[expected, expected])\n\n res = convolution_filter(x, [[.75], [.25]], nsides=1)\n expected = self.expected.conv1\n np.testing.assert_almost_equal(res, expected[:, None])\n\n x = self.datana.values\n res = convolution_filter(x, [[.75], [.25]])\n expected = self.expected.conv2_na\n np.testing.assert_almost_equal(res, expected[:, None])\n\n res = convolution_filter(x, [[.75], [.25]], nsides=1)\n expected = self.expected.conv1_na\n np.testing.assert_almost_equal(res, expected[:, None])\n\n def test_recursive(self):\n x = self.data.values.squeeze()\n res = recursive_filter(x, [.75, .25])\n expected = self.expected.recurse\n np.testing.assert_almost_equal(res, expected)\n\n res = recursive_filter(x, [.75, .25], init=[150, 100])\n expected = self.expected.recurse_init\n np.testing.assert_almost_equal(res, expected)\n\n x = self.datana.values.squeeze()\n res = recursive_filter(x, [.75, .25])\n expected = self.expected.recurse_na\n np.testing.assert_almost_equal(res, expected)\n\n res = recursive_filter(x, [.75, .25], init=[150, 100])\n expected = self.expected.recurse_init_na\n np.testing.assert_almost_equal(res, expected)\n\n assert_raises(ValueError, recursive_filter, x,\n [.75, .25, .5], [150, 100])\n\n def test_pandas(self):\n start = datetime(1951, 3, 31)\n end = datetime(1958, 12, 31)\n x = self.data[0]\n res = convolution_filter(x, [.75, .25])\n assert_(res.index[0] == start)\n assert_(res.index[-1] == end)\n\n res = convolution_filter(x, [.75, .25], nsides=1)\n assert_(res.index[0] == start)\n # with no nan-padding q1 if not\n assert_(res.index[-1] == end)\n\n res = recursive_filter(x, [.75, .25])\n assert_(res.index[0] == start)\n assert_(res.index[-1] == end)\n\n x = self.datana\n res = recursive_filter(x, [.75, .25])\n assert_(res.index[0] == start)\n assert_(res.index[-1] == end)\n\n def test_pandas2d(self):\n start = datetime(1951, 3, 31)\n end = datetime(1958, 12, 31)\n x = concat((self.data[0], self.data[0]), axis=1)\n res = convolution_filter(x, [[.75, .75], [.25, .25]])\n assert_(res.index[0] == start)\n assert_(res.index[-1] == end)\n\n def test_odd_length_filter(self):\n start = datetime(1951, 3, 31)\n end = datetime(1958, 12, 31)\n x = self.data[0]\n res = convolution_filter(x, [.75, .5, .3, .2, .1])\n expected = self.expected.conv2_odd\n np.testing.assert_almost_equal(res.values.squeeze(), expected)\n np.testing.assert_(res.index[0] == start)\n np.testing.assert_(res.index[-1] == end)\n\n res = convolution_filter(x, [.75, .5, .3, .2, .1], nsides=1)\n expected = self.expected.conv1_odd\n np.testing.assert_almost_equal(res.values.squeeze(), expected)\n np.testing.assert_(res.index[0] == start)\n np.testing.assert_(res.index[-1] == end)\n # with no NAs\n\n # not a stable filter\n res = recursive_filter(x, [.75, .5, .3, .2, .1], init=[150, 100,\n 125, 135,\n 145])\n expected = self.expected.recurse_odd\n # only have 12 characters in R and this blows up and gets big\n np.testing.assert_almost_equal(res.values.squeeze(), expected, 4)\n np.testing.assert_(res.index[0] == start)\n np.testing.assert_(res.index[-1] == end)\n\n\ndef dummy_func(x):\n return x\n\n\ndef dummy_func_array(x):\n return x.values\n\n\ndef dummy_func_pandas_columns(x):\n return x.values\n\n\ndef dummy_func_pandas_series(x):\n return x['A']\n\n\ndef test_pandas_freq_decorator():\n x = make_dataframe()\n # in x, get a function back that returns an x with the same columns\n func = pandas_wrapper(dummy_func)\n\n np.testing.assert_equal(func(x.values), x)\n\n func = pandas_wrapper(dummy_func_array)\n assert_frame_equal(func(x), x)\n\n expected = x.rename(columns=dict(zip('ABCD', 'EFGH')))\n func = pandas_wrapper(dummy_func_array, names=list('EFGH'))\n assert_frame_equal(func(x), expected)\n", "from warnings import warn\n\nimport numpy as np\nfrom mizani.palettes import rescale_pal\n\nfrom ..doctools import document\nfrom ..exceptions import PlotnineWarning\nfrom ..utils import alias\nfrom .scale import scale_discrete, scale_continuous\n\n\n@document\nclass scale_stroke_continuous(scale_continuous):\n \"\"\"\n Continuous Stroke Scale\n\n Parameters\n ----------\n range : array_like\n Range ([Minimum, Maximum]) of output stroke values.\n Should be between 0 and 1. Default is ``(1, 6)``\n {superclass_parameters}\n \"\"\"\n _aesthetics = ['stroke']\n\n def __init__(self, range=(1, 6), **kwargs):\n self.palette = rescale_pal(range)\n scale_continuous.__init__(self, **kwargs)\n\n\n@document\nclass scale_stroke_ordinal(scale_discrete):\n \"\"\"\n Discrete Stroke Scale\n\n Parameters\n ----------\n range : array_like\n Range ([Minimum, Maximum]) of output stroke values.\n Should be between 0 and 1. Default is ``(1, 6)``\n {superclass_parameters}\n \"\"\"\n _aesthetics = ['stroke']\n\n def __init__(self, range=(1, 6), **kwargs):\n def palette(n):\n return np.linspace(range[0], range[1], n)\n\n self.palette = palette\n scale_discrete.__init__(self, **kwargs)\n\n\n@document\nclass scale_stroke_discrete(scale_stroke_ordinal):\n \"\"\"\n Discrete Stroke Scale\n\n Parameters\n ----------\n {superclass_parameters}\n \"\"\"\n _aesthetics = ['stroke']\n\n def __init__(self, **kwargs):\n warn(\n \"Using stroke for a ordinal variable is not advised.\",\n PlotnineWarning\n )\n super().__init__(self, **kwargs)\n\n\nalias('scale_stroke', scale_stroke_continuous)\n", "#!/usr/bin/env python\n\"\"\"Tests for the linalg.isolve.gcrotmk module\n\"\"\"\n\nfrom numpy.testing import (assert_, assert_allclose, assert_equal,\n suppress_warnings)\n\nimport numpy as np\nfrom numpy import zeros, array, allclose\nfrom scipy.linalg import norm\nfrom scipy.sparse import csr_matrix, eye, rand\n\nfrom scipy.sparse.linalg.interface import LinearOperator\nfrom scipy.sparse.linalg import splu\nfrom scipy.sparse.linalg.isolve import gcrotmk, gmres\n\n\nAm = csr_matrix(array([[-2,1,0,0,0,9],\n [1,-2,1,0,5,0],\n [0,1,-2,1,0,0],\n [0,0,1,-2,1,0],\n [0,3,0,1,-2,1],\n [1,0,0,0,1,-2]]))\nb = array([1,2,3,4,5,6])\ncount = [0]\n\n\ndef matvec(v):\n count[0] += 1\n return Am*v\n\n\nA = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)\n\n\ndef do_solve(**kw):\n count[0] = 0\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \".*called without specifying.*\")\n x0, flag = gcrotmk(A, b, x0=zeros(A.shape[0]), tol=1e-14, **kw)\n count_0 = count[0]\n assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b))\n return x0, count_0\n\n\nclass TestGCROTMK(object):\n def test_preconditioner(self):\n # Check that preconditioning works\n pc = splu(Am.tocsc())\n M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)\n\n x0, count_0 = do_solve()\n x1, count_1 = do_solve(M=M)\n\n assert_equal(count_1, 3)\n assert_(count_1 < count_0/2)\n assert_(allclose(x1, x0, rtol=1e-14))\n\n def test_arnoldi(self):\n np.random.seed(1)\n\n A = eye(2000) + rand(2000, 2000, density=5e-4)\n b = np.random.rand(2000)\n\n # The inner arnoldi should be equivalent to gmres\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \".*called without specifying.*\")\n x0, flag0 = gcrotmk(A, b, x0=zeros(A.shape[0]), m=15, k=0, maxiter=1)\n x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1)\n\n assert_equal(flag0, 1)\n assert_equal(flag1, 1)\n assert np.linalg.norm(A.dot(x0) - b) > 1e-3\n\n assert_allclose(x0, x1)\n\n def test_cornercase(self):\n np.random.seed(1234)\n\n # Rounding error may prevent convergence with tol=0 --- ensure\n # that the return values in this case are correct, and no\n # exceptions are raised\n\n for n in [3, 5, 10, 100]:\n A = 2*eye(n)\n\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \".*called without specifying.*\")\n b = np.ones(n)\n x, info = gcrotmk(A, b, maxiter=10)\n assert_equal(info, 0)\n assert_allclose(A.dot(x) - b, 0, atol=1e-14)\n\n x, info = gcrotmk(A, b, tol=0, maxiter=10)\n if info == 0:\n assert_allclose(A.dot(x) - b, 0, atol=1e-14)\n\n b = np.random.rand(n)\n x, info = gcrotmk(A, b, maxiter=10)\n assert_equal(info, 0)\n assert_allclose(A.dot(x) - b, 0, atol=1e-14)\n\n x, info = gcrotmk(A, b, tol=0, maxiter=10)\n if info == 0:\n assert_allclose(A.dot(x) - b, 0, atol=1e-14)\n\n def test_nans(self):\n A = eye(3, format='lil')\n A[1,1] = np.nan\n b = np.ones(3)\n\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \".*called without specifying.*\")\n x, info = gcrotmk(A, b, tol=0, maxiter=10)\n assert_equal(info, 1)\n\n def test_truncate(self):\n np.random.seed(1234)\n A = np.random.rand(30, 30) + np.eye(30)\n b = np.random.rand(30)\n\n for truncate in ['oldest', 'smallest']:\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \".*called without specifying.*\")\n x, info = gcrotmk(A, b, m=10, k=10, truncate=truncate, tol=1e-4,\n maxiter=200)\n assert_equal(info, 0)\n assert_allclose(A.dot(x) - b, 0, atol=1e-3)\n\n def test_CU(self):\n for discard_C in (True, False):\n # Check that C,U behave as expected\n CU = []\n x0, count_0 = do_solve(CU=CU, discard_C=discard_C)\n assert_(len(CU) > 0)\n assert_(len(CU) <= 6)\n\n if discard_C:\n for c, u in CU:\n assert_(c is None)\n\n # should converge immediately\n x1, count_1 = do_solve(CU=CU, discard_C=discard_C)\n if discard_C:\n assert_equal(count_1, 2 + len(CU))\n else:\n assert_equal(count_1, 3)\n assert_(count_1 <= count_0/2)\n assert_allclose(x1, x0, atol=1e-14)\n\n def test_denormals(self):\n # Check that no warnings are emitted if the matrix contains\n # numbers for which 1/x has no float representation, and that\n # the solver behaves properly.\n A = np.array([[1, 2], [3, 4]], dtype=float)\n A *= 100 * np.nextafter(0, 1)\n\n b = np.array([1, 1])\n\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \".*called without specifying.*\")\n xp, info = gcrotmk(A, b)\n\n if info == 0:\n assert_allclose(A.dot(xp), b)\n", "\"\"\"Test functions for the sparse.linalg.interface module\n\"\"\"\n\nfrom functools import partial\nfrom itertools import product\nimport operator\nimport pytest\nfrom pytest import raises as assert_raises, warns\nfrom numpy.testing import assert_, assert_equal\n\nimport numpy as np\nimport scipy.sparse as sparse\n\nfrom scipy.sparse.linalg import interface\nfrom scipy.sparse.sputils import matrix\n\n\nclass TestLinearOperator(object):\n def setup_method(self):\n self.A = np.array([[1,2,3],\n [4,5,6]])\n self.B = np.array([[1,2],\n [3,4],\n [5,6]])\n self.C = np.array([[1,2],\n [3,4]])\n\n def test_matvec(self):\n def get_matvecs(A):\n return [{\n 'shape': A.shape,\n 'matvec': lambda x: np.dot(A, x).reshape(A.shape[0]),\n 'rmatvec': lambda x: np.dot(A.T.conj(),\n x).reshape(A.shape[1])\n },\n {\n 'shape': A.shape,\n 'matvec': lambda x: np.dot(A, x),\n 'rmatvec': lambda x: np.dot(A.T.conj(), x),\n 'rmatmat': lambda x: np.dot(A.T.conj(), x),\n 'matmat': lambda x: np.dot(A, x)\n }]\n\n for matvecs in get_matvecs(self.A):\n A = interface.LinearOperator(**matvecs)\n\n assert_(A.args == ())\n\n assert_equal(A.matvec(np.array([1,2,3])), [14,32])\n assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]])\n assert_equal(A * np.array([1,2,3]), [14,32])\n assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]])\n assert_equal(A.dot(np.array([1,2,3])), [14,32])\n assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]])\n\n assert_equal(A.matvec(matrix([[1],[2],[3]])), [[14],[32]])\n assert_equal(A * matrix([[1],[2],[3]]), [[14],[32]])\n assert_equal(A.dot(matrix([[1],[2],[3]])), [[14],[32]])\n\n assert_equal((2*A)*[1,1,1], [12,30])\n assert_equal((2 * A).rmatvec([1, 1]), [10, 14, 18])\n assert_equal((2*A).H.matvec([1,1]), [10, 14, 18])\n assert_equal((2*A)*[[1],[1],[1]], [[12],[30]])\n assert_equal((2 * A).matmat([[1], [1], [1]]), [[12], [30]])\n assert_equal((A*2)*[1,1,1], [12,30])\n assert_equal((A*2)*[[1],[1],[1]], [[12],[30]])\n assert_equal((2j*A)*[1,1,1], [12j,30j])\n assert_equal((A+A)*[1,1,1], [12, 30])\n assert_equal((A + A).rmatvec([1, 1]), [10, 14, 18])\n assert_equal((A+A).H.matvec([1,1]), [10, 14, 18])\n assert_equal((A+A)*[[1],[1],[1]], [[12], [30]])\n assert_equal((A+A).matmat([[1],[1],[1]]), [[12], [30]])\n assert_equal((-A)*[1,1,1], [-6,-15])\n assert_equal((-A)*[[1],[1],[1]], [[-6],[-15]])\n assert_equal((A-A)*[1,1,1], [0,0])\n assert_equal((A - A) * [[1], [1], [1]], [[0], [0]])\n\n X = np.array([[1, 2], [3, 4]])\n # A_asarray = np.array([[1, 2, 3], [4, 5, 6]])\n assert_equal((2 * A).rmatmat(X), np.dot((2 * self.A).T, X))\n assert_equal((A * 2).rmatmat(X), np.dot((self.A * 2).T, X))\n assert_equal((2j * A).rmatmat(X),\n np.dot((2j * self.A).T.conj(), X))\n assert_equal((A * 2j).rmatmat(X),\n np.dot((self.A * 2j).T.conj(), X))\n assert_equal((A + A).rmatmat(X),\n np.dot((self.A + self.A).T, X))\n assert_equal((A + 2j * A).rmatmat(X),\n np.dot((self.A + 2j * self.A).T.conj(), X))\n assert_equal((-A).rmatmat(X), np.dot((-self.A).T, X))\n assert_equal((A - A).rmatmat(X),\n np.dot((self.A - self.A).T, X))\n assert_equal((2j * A).rmatmat(2j * X),\n np.dot((2j * self.A).T.conj(), 2j * X))\n\n z = A+A\n assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is A)\n z = 2*A\n assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] == 2)\n\n assert_(isinstance(A.matvec([1, 2, 3]), np.ndarray))\n assert_(isinstance(A.matvec(np.array([[1],[2],[3]])), np.ndarray))\n assert_(isinstance(A * np.array([1,2,3]), np.ndarray))\n assert_(isinstance(A * np.array([[1],[2],[3]]), np.ndarray))\n assert_(isinstance(A.dot(np.array([1,2,3])), np.ndarray))\n assert_(isinstance(A.dot(np.array([[1],[2],[3]])), np.ndarray))\n\n assert_(isinstance(A.matvec(matrix([[1],[2],[3]])), np.ndarray))\n assert_(isinstance(A * matrix([[1],[2],[3]]), np.ndarray))\n assert_(isinstance(A.dot(matrix([[1],[2],[3]])), np.ndarray))\n\n assert_(isinstance(2*A, interface._ScaledLinearOperator))\n assert_(isinstance(2j*A, interface._ScaledLinearOperator))\n assert_(isinstance(A+A, interface._SumLinearOperator))\n assert_(isinstance(-A, interface._ScaledLinearOperator))\n assert_(isinstance(A-A, interface._SumLinearOperator))\n\n assert_((2j*A).dtype == np.complex_)\n\n assert_raises(ValueError, A.matvec, np.array([1,2]))\n assert_raises(ValueError, A.matvec, np.array([1,2,3,4]))\n assert_raises(ValueError, A.matvec, np.array([[1],[2]]))\n assert_raises(ValueError, A.matvec, np.array([[1],[2],[3],[4]]))\n\n assert_raises(ValueError, lambda: A*A)\n assert_raises(ValueError, lambda: A**2)\n\n for matvecsA, matvecsB in product(get_matvecs(self.A),\n get_matvecs(self.B)):\n A = interface.LinearOperator(**matvecsA)\n B = interface.LinearOperator(**matvecsB)\n # AtimesB = np.array([[22, 28], [49, 64]])\n AtimesB = self.A.dot(self.B)\n X = np.array([[1, 2], [3, 4]])\n\n assert_equal((A * B).rmatmat(X), np.dot((AtimesB).T, X))\n assert_equal((2j * A * B).rmatmat(X),\n np.dot((2j * AtimesB).T.conj(), X))\n\n assert_equal((A*B)*[1,1], [50,113])\n assert_equal((A*B)*[[1],[1]], [[50],[113]])\n assert_equal((A*B).matmat([[1],[1]]), [[50],[113]])\n\n assert_equal((A * B).rmatvec([1, 1]), [71, 92])\n assert_equal((A * B).H.matvec([1, 1]), [71, 92])\n\n assert_(isinstance(A*B, interface._ProductLinearOperator))\n\n assert_raises(ValueError, lambda: A+B)\n assert_raises(ValueError, lambda: A**2)\n\n z = A*B\n assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is B)\n\n for matvecsC in get_matvecs(self.C):\n C = interface.LinearOperator(**matvecsC)\n X = np.array([[1, 2], [3, 4]])\n\n assert_equal(C.rmatmat(X), np.dot((self.C).T, X))\n assert_equal((C**2).rmatmat(X),\n np.dot((np.dot(self.C, self.C)).T, X))\n\n assert_equal((C**2)*[1,1], [17,37])\n assert_equal((C**2).rmatvec([1, 1]), [22, 32])\n assert_equal((C**2).H.matvec([1, 1]), [22, 32])\n assert_equal((C**2).matmat([[1],[1]]), [[17],[37]])\n\n assert_(isinstance(C**2, interface._PowerLinearOperator))\n\n def test_matmul(self):\n D = {'shape': self.A.shape,\n 'matvec': lambda x: np.dot(self.A, x).reshape(self.A.shape[0]),\n 'rmatvec': lambda x: np.dot(self.A.T.conj(),\n x).reshape(self.A.shape[1]),\n 'rmatmat': lambda x: np.dot(self.A.T.conj(), x),\n 'matmat': lambda x: np.dot(self.A, x)}\n A = interface.LinearOperator(**D)\n B = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n b = B[0]\n\n assert_equal(operator.matmul(A, b), A * b)\n assert_equal(operator.matmul(A, B), A * B)\n assert_raises(ValueError, operator.matmul, A, 2)\n assert_raises(ValueError, operator.matmul, 2, A)\n\n\nclass TestAsLinearOperator(object):\n def setup_method(self):\n self.cases = []\n\n def make_cases(original, dtype):\n cases = []\n\n cases.append((matrix(original, dtype=dtype), original))\n cases.append((np.array(original, dtype=dtype), original))\n cases.append((sparse.csr_matrix(original, dtype=dtype), original))\n\n # Test default implementations of _adjoint and _rmatvec, which\n # refer to each other.\n def mv(x, dtype):\n y = original.dot(x)\n if len(x.shape) == 2:\n y = y.reshape(-1, 1)\n return y\n\n def rmv(x, dtype):\n return original.T.conj().dot(x)\n\n class BaseMatlike(interface.LinearOperator):\n args = ()\n\n def __init__(self, dtype):\n self.dtype = np.dtype(dtype)\n self.shape = original.shape\n\n def _matvec(self, x):\n return mv(x, self.dtype)\n\n class HasRmatvec(BaseMatlike):\n args = ()\n\n def _rmatvec(self,x):\n return rmv(x, self.dtype)\n\n class HasAdjoint(BaseMatlike):\n args = ()\n\n def _adjoint(self):\n shape = self.shape[1], self.shape[0]\n matvec = partial(rmv, dtype=self.dtype)\n rmatvec = partial(mv, dtype=self.dtype)\n return interface.LinearOperator(matvec=matvec,\n rmatvec=rmatvec,\n dtype=self.dtype,\n shape=shape)\n\n class HasRmatmat(HasRmatvec):\n def _matmat(self, x):\n return original.dot(x)\n\n def _rmatmat(self, x):\n return original.T.conj().dot(x)\n\n cases.append((HasRmatvec(dtype), original))\n cases.append((HasAdjoint(dtype), original))\n cases.append((HasRmatmat(dtype), original))\n return cases\n\n original = np.array([[1,2,3], [4,5,6]])\n self.cases += make_cases(original, np.int32)\n self.cases += make_cases(original, np.float32)\n self.cases += make_cases(original, np.float64)\n self.cases += [(interface.aslinearoperator(M).T, A.T)\n for M, A in make_cases(original.T, np.float64)]\n self.cases += [(interface.aslinearoperator(M).H, A.T.conj())\n for M, A in make_cases(original.T, np.float64)]\n\n original = np.array([[1, 2j, 3j], [4j, 5j, 6]])\n self.cases += make_cases(original, np.complex_)\n self.cases += [(interface.aslinearoperator(M).T, A.T)\n for M, A in make_cases(original.T, np.complex_)]\n self.cases += [(interface.aslinearoperator(M).H, A.T.conj())\n for M, A in make_cases(original.T, np.complex_)]\n\n def test_basic(self):\n\n for M, A_array in self.cases:\n A = interface.aslinearoperator(M)\n M,N = A.shape\n\n xs = [np.array([1, 2, 3]),\n np.array([[1], [2], [3]])]\n ys = [np.array([1, 2]), np.array([[1], [2]])]\n\n if A.dtype == np.complex_:\n xs += [np.array([1, 2j, 3j]),\n np.array([[1], [2j], [3j]])]\n ys += [np.array([1, 2j]), np.array([[1], [2j]])]\n\n x2 = np.array([[1, 4], [2, 5], [3, 6]])\n\n for x in xs:\n assert_equal(A.matvec(x), A_array.dot(x))\n assert_equal(A * x, A_array.dot(x))\n\n assert_equal(A.matmat(x2), A_array.dot(x2))\n assert_equal(A * x2, A_array.dot(x2))\n\n for y in ys:\n assert_equal(A.rmatvec(y), A_array.T.conj().dot(y))\n assert_equal(A.T.matvec(y), A_array.T.dot(y))\n assert_equal(A.H.matvec(y), A_array.T.conj().dot(y))\n\n for y in ys:\n if y.ndim < 2:\n continue\n assert_equal(A.rmatmat(y), A_array.T.conj().dot(y))\n assert_equal(A.T.matmat(y), A_array.T.dot(y))\n assert_equal(A.H.matmat(y), A_array.T.conj().dot(y))\n\n if hasattr(M,'dtype'):\n assert_equal(A.dtype, M.dtype)\n\n assert_(hasattr(A, 'args'))\n\n def test_dot(self):\n\n for M, A_array in self.cases:\n A = interface.aslinearoperator(M)\n M,N = A.shape\n\n x0 = np.array([1, 2, 3])\n x1 = np.array([[1], [2], [3]])\n x2 = np.array([[1, 4], [2, 5], [3, 6]])\n\n assert_equal(A.dot(x0), A_array.dot(x0))\n assert_equal(A.dot(x1), A_array.dot(x1))\n assert_equal(A.dot(x2), A_array.dot(x2))\n\n\ndef test_repr():\n A = interface.LinearOperator(shape=(1, 1), matvec=lambda x: 1)\n repr_A = repr(A)\n assert_('unspecified dtype' not in repr_A, repr_A)\n\n\ndef test_identity():\n ident = interface.IdentityOperator((3, 3))\n assert_equal(ident * [1, 2, 3], [1, 2, 3])\n assert_equal(ident.dot(np.arange(9).reshape(3, 3)).ravel(), np.arange(9))\n\n assert_raises(ValueError, ident.matvec, [1, 2, 3, 4])\n\n\ndef test_attributes():\n A = interface.aslinearoperator(np.arange(16).reshape(4, 4))\n\n def always_four_ones(x):\n x = np.asarray(x)\n assert_(x.shape == (3,) or x.shape == (3, 1))\n return np.ones(4)\n\n B = interface.LinearOperator(shape=(4, 3), matvec=always_four_ones)\n\n for op in [A, B, A * B, A.H, A + A, B + B, A**4]:\n assert_(hasattr(op, \"dtype\"))\n assert_(hasattr(op, \"shape\"))\n assert_(hasattr(op, \"_matvec\"))\n\ndef matvec(x):\n \"\"\" Needed for test_pickle as local functions are not pickleable \"\"\"\n return np.zeros(3)\n\ndef test_pickle():\n import pickle\n\n for protocol in range(pickle.HIGHEST_PROTOCOL + 1):\n A = interface.LinearOperator((3, 3), matvec)\n s = pickle.dumps(A, protocol=protocol)\n B = pickle.loads(s)\n\n for k in A.__dict__:\n assert_equal(getattr(A, k), getattr(B, k))\n\ndef test_inheritance():\n class Empty(interface.LinearOperator):\n pass\n\n with warns(RuntimeWarning, match=\"should implement at least\"):\n assert_raises(TypeError, Empty)\n\n class Identity(interface.LinearOperator):\n def __init__(self, n):\n super(Identity, self).__init__(dtype=None, shape=(n, n))\n\n def _matvec(self, x):\n return x\n\n id3 = Identity(3)\n assert_equal(id3.matvec([1, 2, 3]), [1, 2, 3])\n assert_raises(NotImplementedError, id3.rmatvec, [4, 5, 6])\n\n class MatmatOnly(interface.LinearOperator):\n def __init__(self, A):\n super(MatmatOnly, self).__init__(A.dtype, A.shape)\n self.A = A\n\n def _matmat(self, x):\n return self.A.dot(x)\n\n mm = MatmatOnly(np.random.randn(5, 3))\n assert_equal(mm.matvec(np.random.randn(3)).shape, (5,))\n\ndef test_dtypes_of_operator_sum():\n # gh-6078\n\n mat_complex = np.random.rand(2,2) + 1j * np.random.rand(2,2)\n mat_real = np.random.rand(2,2)\n\n complex_operator = interface.aslinearoperator(mat_complex)\n real_operator = interface.aslinearoperator(mat_real)\n\n sum_complex = complex_operator + complex_operator\n sum_real = real_operator + real_operator\n\n assert_equal(sum_real.dtype, np.float64)\n assert_equal(sum_complex.dtype, np.complex128)\n\ndef test_no_double_init():\n call_count = [0]\n\n def matvec(v):\n call_count[0] += 1\n return v\n\n # It should call matvec exactly once (in order to determine the\n # operator dtype)\n interface.LinearOperator((2, 2), matvec=matvec)\n assert_equal(call_count[0], 1)\n\ndef test_adjoint_conjugate():\n X = np.array([[1j]])\n A = interface.aslinearoperator(X)\n\n B = 1j * A\n Y = 1j * X\n\n v = np.array([1])\n\n assert_equal(B.dot(v), Y.dot(v))\n assert_equal(B.H.dot(v), Y.T.conj().dot(v))\n\ndef test_ndim():\n X = np.array([[1]])\n A = interface.aslinearoperator(X)\n assert_equal(A.ndim, 2)\n\ndef test_transpose_noconjugate():\n X = np.array([[1j]])\n A = interface.aslinearoperator(X)\n\n B = 1j * A\n Y = 1j * X\n\n v = np.array([1])\n\n assert_equal(B.dot(v), Y.dot(v))\n assert_equal(B.T.dot(v), Y.T.dot(v))\n", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 3 21:08:49 2017\n\nAuthor: Josef Perktold\n\"\"\"\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom statsmodels.discrete.discrete_model import (Poisson, NegativeBinomial,\n NegativeBinomialP)\nfrom statsmodels.tools.tools import add_constant\n\nimport statsmodels.discrete.tests.results.results_count_margins as res_stata\n\n# load data into module namespace\nfrom statsmodels.datasets.cpunish import load\ncpunish_data = load(as_pandas=False)\ncpunish_data.exog[:,3] = np.log(cpunish_data.exog[:,3])\nexog = add_constant(cpunish_data.exog, prepend=False)\nendog = cpunish_data.endog - 1 # avoid zero-truncation\nexog /= np.round(exog.max(0), 3)\n\nclass CheckMarginMixin(object):\n rtol_fac = 1\n\n def test_margins_table(self):\n res1 = self.res1\n sl = self.res1_slice\n rf = self.rtol_fac\n assert_allclose(self.margeff.margeff, self.res1.params[sl], rtol=1e-5 * rf)\n assert_allclose(self.margeff.margeff_se, self.res1.bse[sl], rtol=1e-6 * rf)\n assert_allclose(self.margeff.pvalues, self.res1.pvalues[sl], rtol=5e-6 * rf)\n assert_allclose(self.margeff.conf_int(), res1.margins_table[sl, 4:6],\n rtol=1e-6 * rf)\n\n\nclass TestPoissonMargin(CheckMarginMixin):\n\n @classmethod\n def setup_class(cls):\n # here we do not need to check convergence from default start_params\n start_params = [14.1709, 0.7085, -3.4548, -0.539, 3.2368, -7.9299,\n -5.0529]\n mod_poi = Poisson(endog, exog)\n res_poi = mod_poi.fit(start_params=start_params)\n #res_poi = mod_poi.fit(maxiter=100)\n marge_poi = res_poi.get_margeff()\n cls.res = res_poi\n cls.margeff = marge_poi\n\n cls.rtol_fac = 1\n cls.res1_slice = slice(None, None, None)\n cls.res1 = res_stata.results_poisson_margins_cont\n\n\nclass TestPoissonMarginDummy(CheckMarginMixin):\n\n @classmethod\n def setup_class(cls):\n # here we do not need to check convergence from default start_params\n start_params = [14.1709, 0.7085, -3.4548, -0.539, 3.2368, -7.9299,\n -5.0529]\n mod_poi = Poisson(endog, exog)\n res_poi = mod_poi.fit(start_params=start_params)\n marge_poi = res_poi.get_margeff(dummy=True)\n cls.res = res_poi\n cls.margeff = marge_poi\n\n cls.res1_slice = [0, 1, 2, 3, 5, 6]\n cls.res1 = res_stata.results_poisson_margins_dummy\n\n\nclass TestNegBinMargin(CheckMarginMixin):\n\n @classmethod\n def setup_class(cls):\n # here we do not need to check convergence from default start_params\n start_params = [13.1996, 0.8582, -2.8005, -1.5031, 2.3849, -8.5552,\n -2.88, 1.14]\n mod = NegativeBinomial(endog, exog)\n res = mod.fit(start_params=start_params, method='nm', maxiter=2000)\n marge = res.get_margeff()\n cls.res = res\n cls.margeff = marge\n\n cls.res1_slice = slice(None, None, None)\n cls.res1 = res_stata.results_negbin_margins_cont\n cls.rtol_fac = 5e1\n # negbin has lower agreement with Stata in this case\n\n\nclass TestNegBinMarginDummy(CheckMarginMixin):\n\n @classmethod\n def setup_class(cls):\n # here we do not need to check convergence from default start_params\n start_params = [13.1996, 0.8582, -2.8005, -1.5031, 2.3849, -8.5552,\n -2.88, 1.14]\n mod = NegativeBinomial(endog, exog)\n res = mod.fit(start_params=start_params, method='nm', maxiter=2000)\n marge = res.get_margeff(dummy=True)\n cls.res = res\n cls.margeff = marge\n\n cls.res1_slice = cls.res1_slice = [0, 1, 2, 3, 5, 6]\n cls.res1 = res_stata.results_negbin_margins_dummy\n cls.rtol_fac = 5e1\n\n\nclass TestNegBinPMargin(CheckMarginMixin):\n # this is the same as the nb2 version above for NB-P, p=2\n\n @classmethod\n def setup_class(cls):\n # here we do not need to check convergence from default start_params\n start_params = [13.1996, 0.8582, -2.8005, -1.5031, 2.3849, -8.5552,\n -2.88, 1.14]\n mod = NegativeBinomialP(endog, exog) # checks also that default p=2\n res = mod.fit(start_params=start_params, method='nm', maxiter=2000)\n marge = res.get_margeff()\n cls.res = res\n cls.margeff = marge\n\n cls.res1_slice = slice(None, None, None)\n cls.res1 = res_stata.results_negbin_margins_cont\n cls.rtol_fac = 5e1\n # negbin has lower agreement with Stata in this case\n", "from typing import List\n\nfrom pandas._typing import FilePathOrBuffer, Scalar, StorageOptions\nfrom pandas.compat._optional import import_optional_dependency\n\nfrom pandas.io.excel._base import BaseExcelReader\n\n\nclass PyxlsbReader(BaseExcelReader):\n def __init__(\n self,\n filepath_or_buffer: FilePathOrBuffer,\n storage_options: StorageOptions = None,\n ):\n \"\"\"\n Reader using pyxlsb engine.\n\n Parameters\n ----------\n filepath_or_buffer : str, path object, or Workbook\n Object to be parsed.\n storage_options : dict, optional\n passed to fsspec for appropriate URLs (see ``_get_filepath_or_buffer``)\n \"\"\"\n import_optional_dependency(\"pyxlsb\")\n # This will call load_workbook on the filepath or buffer\n # And set the result to the book-attribute\n super().__init__(filepath_or_buffer, storage_options=storage_options)\n\n @property\n def _workbook_class(self):\n from pyxlsb import Workbook\n\n return Workbook\n\n def load_workbook(self, filepath_or_buffer: FilePathOrBuffer):\n from pyxlsb import open_workbook\n\n # TODO: hack in buffer capability\n # This might need some modifications to the Pyxlsb library\n # Actual work for opening it is in xlsbpackage.py, line 20-ish\n\n return open_workbook(filepath_or_buffer)\n\n @property\n def sheet_names(self) -> List[str]:\n return self.book.sheets\n\n def get_sheet_by_name(self, name: str):\n self.raise_if_bad_sheet_by_name(name)\n return self.book.get_sheet(name)\n\n def get_sheet_by_index(self, index: int):\n self.raise_if_bad_sheet_by_index(index)\n # pyxlsb sheets are indexed from 1 onwards\n # There's a fix for this in the source, but the pypi package doesn't have it\n return self.book.get_sheet(index + 1)\n\n def _convert_cell(self, cell, convert_float: bool) -> Scalar:\n # TODO: there is no way to distinguish between floats and datetimes in pyxlsb\n # This means that there is no way to read datetime types from an xlsb file yet\n if cell.v is None:\n return \"\" # Prevents non-named columns from not showing up as Unnamed: i\n if isinstance(cell.v, float) and convert_float:\n val = int(cell.v)\n if val == cell.v:\n return val\n else:\n return float(cell.v)\n\n return cell.v\n\n def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]:\n return [\n [self._convert_cell(c, convert_float) for c in r]\n for r in sheet.rows(sparse=False)\n ]\n", "import numpy as np\n\nimport matplotlib as mpl\nfrom matplotlib.testing.decorators import image_comparison\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import ImageGrid\n\n\n# The original version of this test relied on mpl_toolkits's slightly different\n# colorbar implementation; moving to matplotlib's own colorbar implementation\n# caused the small image comparison error.\n@image_comparison(['imagegrid_cbar_mode.png'],\n remove_text=True, style='mpl20', tol=0.3)\ndef test_imagegrid_cbar_mode_edge():\n # Remove this line when this test image is regenerated.\n plt.rcParams['pcolormesh.snap'] = False\n\n X, Y = np.meshgrid(np.linspace(0, 6, 30), np.linspace(0, 6, 30))\n arr = np.sin(X) * np.cos(Y) + 1j*(np.sin(3*Y) * np.cos(Y/2.))\n\n fig = plt.figure(figsize=(18, 9))\n\n positions = (241, 242, 243, 244, 245, 246, 247, 248)\n directions = ['row']*4 + ['column']*4\n cbar_locations = ['left', 'right', 'top', 'bottom']*2\n\n for position, direction, location in zip(\n positions, directions, cbar_locations):\n grid = ImageGrid(fig, position,\n nrows_ncols=(2, 2),\n direction=direction,\n cbar_location=location,\n cbar_size='20%',\n cbar_mode='edge')\n ax1, ax2, ax3, ax4, = grid\n\n ax1.imshow(arr.real, cmap='nipy_spectral')\n ax2.imshow(arr.imag, cmap='hot')\n ax3.imshow(np.abs(arr), cmap='jet')\n ax4.imshow(np.arctan2(arr.imag, arr.real), cmap='hsv')\n\n # In each row/column, the \"first\" colorbars must be overwritten by the\n # \"second\" ones. To achieve this, clear out the axes first.\n for ax in grid:\n ax.cax.cla()\n cb = ax.cax.colorbar(\n ax.images[0],\n ticks=mpl.ticker.MaxNLocator(5)) # old default locator.\n\n\ndef test_imagegrid():\n fig = plt.figure()\n grid = ImageGrid(fig, 111, nrows_ncols=(1, 1))\n ax = grid[0]\n im = ax.imshow([[1, 2]], norm=mpl.colors.LogNorm())\n cb = ax.cax.colorbar(im)\n assert isinstance(cb.locator, mpl.colorbar._ColorbarLogLocator)\n", "import numpy as np\nfrom statsmodels.robust import mad\nfrom scipy.optimize import minimize_scalar\n\n\nclass BoxCox(object):\n \"\"\"\n Mixin class to allow for a Box-Cox transformation.\n \"\"\"\n\n def transform_boxcox(self, x, lmbda=None, method='guerrero', **kwargs):\n \"\"\"\n Performs a Box-Cox transformation on the data array x. If lmbda is None,\n the indicated method is used to estimate a suitable lambda parameter.\n\n Parameters\n ----------\n x : array_like\n lmbda : float\n The lambda parameter for the Box-Cox transform. If None, a value\n will be estimated by means of the specified method.\n method : {'guerrero', 'loglik'}\n The method to estimate the lambda parameter. Will only be used if\n lmbda is None, and defaults to 'guerrero', detailed in Guerrero\n (1993). 'loglik' maximizes the profile likelihood.\n **kwargs\n Options for the specified method.\n * For 'guerrero', this entails window_length, the grouping\n parameter, scale, the dispersion measure, and options, to be\n passed to the optimizer.\n * For 'loglik': options, to be passed to the optimizer.\n\n Returns\n -------\n y : array_like\n The transformed series.\n lmbda : float\n The lmbda parameter used to transform the series.\n\n References\n ----------\n Guerrero, Victor M. 1993. \"Time-series analysis supported by power\n transformations\". `Journal of Forecasting`. 12 (1): 37-48.\n\n Guerrero, Victor M. and Perera, Rafael. 2004. \"Variance Stabilizing\n Power Transformation for Time Series,\" `Journal of Modern Applied\n Statistical Methods`. 3 (2): 357-369.\n\n Box, G. E. P., and D. R. Cox. 1964. \"An Analysis of Transformations\".\n `Journal of the Royal Statistical Society`. 26 (2): 211-252.\n \"\"\"\n x = np.asarray(x)\n\n if np.any(x <= 0):\n raise ValueError(\"Non-positive x.\")\n\n if lmbda is None:\n lmbda = self._est_lambda(x,\n method=method,\n **kwargs)\n\n # if less than 0.01, treat lambda as zero.\n if np.isclose(lmbda, 0.):\n y = np.log(x)\n else:\n y = (np.power(x, lmbda) - 1.) / lmbda\n\n return y, lmbda\n\n def untransform_boxcox(self, x, lmbda, method='naive'):\n \"\"\"\n Back-transforms the Box-Cox transformed data array, by means of the\n indicated method. The provided argument lmbda should be the lambda\n parameter that was used to initially transform the data.\n\n Parameters\n ----------\n x : array_like\n The transformed series.\n lmbda : float\n The lambda parameter that was used to transform the series.\n method : {'naive'}\n Indicates the method to be used in the untransformation. Defaults\n to 'naive', which reverses the transformation.\n\n NOTE: 'naive' is implemented natively, while other methods may be\n available in subclasses!\n\n Returns\n -------\n y : array_like\n The untransformed series.\n \"\"\"\n method = method.lower()\n x = np.asarray(x)\n\n if method == 'naive':\n if np.isclose(lmbda, 0.):\n y = np.exp(x)\n else:\n y = np.power(lmbda * x + 1, 1. / lmbda)\n else:\n raise ValueError(\"Method '{0}' not understood.\".format(method))\n\n return y\n\n def _est_lambda(self, x, bounds=(-1, 2), method='guerrero', **kwargs):\n \"\"\"\n Computes an estimate for the lambda parameter in the Box-Cox\n transformation using method.\n\n Parameters\n ----------\n x : array_like\n The untransformed data.\n bounds : tuple\n Numeric 2-tuple, that indicate the solution space for the lambda\n parameter. Default (-1, 2).\n method : {'guerrero', 'loglik'}\n The method by which to estimate lambda. Defaults to 'guerrero', but\n the profile likelihood ('loglik') is also available.\n **kwargs\n Options for the specified method.\n * For 'guerrero': window_length (int), the seasonality/grouping\n parameter. Scale ({'mad', 'sd'}), the dispersion measure. Options\n (dict), to be passed to the optimizer.\n * For 'loglik': Options (dict), to be passed to the optimizer.\n\n Returns\n -------\n lmbda : float\n The lambda parameter.\n \"\"\"\n method = method.lower()\n\n if len(bounds) != 2:\n raise ValueError(\"Bounds of length {0} not understood.\"\n .format(len(bounds)))\n elif bounds[0] >= bounds[1]:\n raise ValueError(\"Lower bound exceeds upper bound.\")\n\n if method == 'guerrero':\n lmbda = self._guerrero_cv(x, bounds=bounds, **kwargs)\n elif method == 'loglik':\n lmbda = self._loglik_boxcox(x, bounds=bounds, **kwargs)\n else:\n raise ValueError(\"Method '{0}' not understood.\".format(method))\n\n return lmbda\n\n def _guerrero_cv(self, x, bounds, window_length=4, scale='sd',\n options={'maxiter': 25}):\n \"\"\"\n Computes lambda using guerrero's coefficient of variation. If no\n seasonality is present in the data, window_length is set to 4 (as\n per Guerrero and Perera, (2004)).\n\n NOTE: Seasonality-specific auxiliaries *should* provide their own\n seasonality parameter.\n\n Parameters\n ----------\n x : array_like\n bounds : tuple\n Numeric 2-tuple, that indicate the solution space for the lambda\n parameter.\n window_length : int\n Seasonality/grouping parameter. Default 4, as per Guerrero and\n Perera (2004). NOTE: this indicates the length of the individual\n groups, not the total number of groups!\n scale : {'sd', 'mad'}\n The dispersion measure to be used. 'sd' indicates the sample\n standard deviation, but the more robust 'mad' is also available.\n options : dict\n The options (as a dict) to be passed to the optimizer.\n \"\"\"\n nobs = len(x)\n groups = int(nobs / window_length)\n\n # remove the first n < window_length observations from consideration.\n grouped_data = np.reshape(x[nobs - (groups * window_length): nobs],\n (groups, window_length))\n mean = np.mean(grouped_data, 1)\n\n scale = scale.lower()\n if scale == 'sd':\n dispersion = np.std(grouped_data, 1, ddof=1)\n elif scale == 'mad':\n dispersion = mad(grouped_data, axis=1)\n else:\n raise ValueError(\"Scale '{0}' not understood.\".format(scale))\n\n def optim(lmbda):\n rat = np.divide(dispersion, np.power(mean, 1 - lmbda)) # eq 6, p 40\n return np.std(rat, ddof=1) / np.mean(rat)\n\n res = minimize_scalar(optim,\n bounds=bounds,\n method='bounded',\n options=options)\n return res.x\n\n def _loglik_boxcox(self, x, bounds, options={'maxiter': 25}):\n \"\"\"\n Taken from the Stata manual on Box-Cox regressions, where this is the\n special case of 'lhs only'. As an estimator for the variance, the\n sample variance is used, by means of the well-known formula.\n\n Parameters\n ----------\n x : array_like\n options : dict\n The options (as a dict) to be passed to the optimizer.\n \"\"\"\n sum_x = np.sum(np.log(x))\n nobs = len(x)\n\n def optim(lmbda):\n y, lmbda = self.transform_boxcox(x, lmbda)\n return (1 - lmbda) * sum_x + (nobs / 2.) * np.log(np.var(y))\n\n res = minimize_scalar(optim,\n bounds=bounds,\n method='bounded',\n options=options)\n return res.x\n", "'''subclassing kde\n\nAuthor: josef pktd\n'''\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, assert_\nimport scipy\nfrom scipy import stats\nimport matplotlib.pylab as plt\n\n\nclass gaussian_kde_set_covariance(stats.gaussian_kde):\n '''\n from Anne Archibald in mailinglist:\n http://www.nabble.com/Width-of-the-gaussian-in-stats.kde.gaussian_kde---td19558924.html#a19558924\n '''\n def __init__(self, dataset, covariance):\n self.covariance = covariance\n scipy.stats.gaussian_kde.__init__(self, dataset)\n\n def _compute_covariance(self):\n self.inv_cov = np.linalg.inv(self.covariance)\n self._norm_factor = np.sqrt(np.linalg.det(2*np.pi*self.covariance)) * self.n\n\n\nclass gaussian_kde_covfact(stats.gaussian_kde):\n def __init__(self, dataset, covfact = 'scotts'):\n self.covfact = covfact\n scipy.stats.gaussian_kde.__init__(self, dataset)\n\n def _compute_covariance_(self):\n '''not used'''\n self.inv_cov = np.linalg.inv(self.covariance)\n self._norm_factor = np.sqrt(np.linalg.det(2*np.pi*self.covariance)) * self.n\n\n def covariance_factor(self):\n if self.covfact in ['sc', 'scotts']:\n return self.scotts_factor()\n if self.covfact in ['si', 'silverman']:\n return self.silverman_factor()\n elif self.covfact:\n return float(self.covfact)\n else:\n raise ValueError('covariance factor has to be scotts, silverman or a number')\n\n def reset_covfact(self, covfact):\n self.covfact = covfact\n self.covariance_factor()\n self._compute_covariance()\n\ndef plotkde(covfact):\n gkde.reset_covfact(covfact)\n kdepdf = gkde.evaluate(ind)\n plt.figure()\n # plot histgram of sample\n plt.hist(xn, bins=20, normed=1)\n # plot estimated density\n plt.plot(ind, kdepdf, label='kde', color=\"g\")\n # plot data generating density\n plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +\n (1-alpha) * stats.norm.pdf(ind, loc=mhigh),\n color=\"r\", label='DGP: normal mix')\n plt.title('Kernel Density Estimation - ' + str(gkde.covfact))\n plt.legend()\n\n\ndef test_kde_1d():\n np.random.seed(8765678)\n n_basesample = 500\n xn = np.random.randn(n_basesample)\n xnmean = xn.mean()\n xnstd = xn.std(ddof=1)\n print(xnmean, xnstd)\n\n # get kde for original sample\n gkde = stats.gaussian_kde(xn)\n\n # evaluate the density function for the kde for some points\n xs = np.linspace(-7,7,501)\n kdepdf = gkde.evaluate(xs)\n normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)\n print('MSE', np.sum((kdepdf - normpdf)**2))\n print('axabserror', np.max(np.abs(kdepdf - normpdf)))\n intervall = xs[1] - xs[0]\n assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)\n #assert_array_almost_equal(kdepdf, normpdf, decimal=2)\n print(gkde.integrate_gaussian(0.0, 1.0))\n print(gkde.integrate_box_1d(-np.inf, 0.0))\n print(gkde.integrate_box_1d(0.0, np.inf))\n print(gkde.integrate_box_1d(-np.inf, xnmean))\n print(gkde.integrate_box_1d(xnmean, np.inf))\n\n assert_almost_equal(gkde.integrate_box_1d(xnmean, np.inf), 0.5, decimal=1)\n assert_almost_equal(gkde.integrate_box_1d(-np.inf, xnmean), 0.5, decimal=1)\n assert_almost_equal(gkde.integrate_box(xnmean, np.inf), 0.5, decimal=1)\n assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), 0.5, decimal=1)\n\n assert_almost_equal(gkde.integrate_kde(gkde),\n (kdepdf**2).sum()*intervall, decimal=2)\n assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),\n (kdepdf*normpdf).sum()*intervall, decimal=2)\n## assert_almost_equal(gkde.integrate_gaussian(0.0, 1.0),\n## (kdepdf*normpdf).sum()*intervall, decimal=2)\n\n\n\n\nif __name__ == '__main__':\n # generate a sample\n n_basesample = 1000\n np.random.seed(8765678)\n alpha = 0.6 #weight for (prob of) lower distribution\n mlow, mhigh = (-3,3) #mean locations for gaussian mixture\n xn = np.concatenate([mlow + np.random.randn(alpha * n_basesample),\n mhigh + np.random.randn((1-alpha) * n_basesample)])\n\n # get kde for original sample\n #gkde = stats.gaussian_kde(xn)\n gkde = gaussian_kde_covfact(xn, 0.1)\n # evaluate the density function for the kde for some points\n ind = np.linspace(-7,7,101)\n kdepdf = gkde.evaluate(ind)\n\n plt.figure()\n # plot histgram of sample\n plt.hist(xn, bins=20, normed=1)\n # plot estimated density\n plt.plot(ind, kdepdf, label='kde', color=\"g\")\n # plot data generating density\n plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +\n (1-alpha) * stats.norm.pdf(ind, loc=mhigh),\n color=\"r\", label='DGP: normal mix')\n plt.title('Kernel Density Estimation')\n plt.legend()\n\n gkde = gaussian_kde_covfact(xn, 'scotts')\n kdepdf = gkde.evaluate(ind)\n plt.figure()\n # plot histgram of sample\n plt.hist(xn, bins=20, normed=1)\n # plot estimated density\n plt.plot(ind, kdepdf, label='kde', color=\"g\")\n # plot data generating density\n plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +\n (1-alpha) * stats.norm.pdf(ind, loc=mhigh),\n color=\"r\", label='DGP: normal mix')\n plt.title('Kernel Density Estimation')\n plt.legend()\n #plt.show()\n for cv in ['scotts', 'silverman', 0.05, 0.1, 0.5]:\n plotkde(cv)\n\n test_kde_1d()\n\n\n np.random.seed(8765678)\n n_basesample = 1000\n xn = np.random.randn(n_basesample)\n xnmean = xn.mean()\n xnstd = xn.std(ddof=1)\n\n # get kde for original sample\n gkde = stats.gaussian_kde(xn)\n", "\"\"\"\nUnit tests for the basin hopping global minimization algorithm.\n\"\"\"\nimport copy\n\nfrom numpy.testing import assert_almost_equal, assert_equal, assert_\nimport pytest\nfrom pytest import raises as assert_raises\nimport numpy as np\nfrom numpy import cos, sin\n\nfrom scipy.optimize import basinhopping, OptimizeResult\nfrom scipy.optimize._basinhopping import (\n Storage, RandomDisplacement, Metropolis, AdaptiveStepsize)\nfrom scipy._lib._pep440 import Version\n\n\ndef func1d(x):\n f = cos(14.5 * x - 0.3) + (x + 0.2) * x\n df = np.array(-14.5 * sin(14.5 * x - 0.3) + 2. * x + 0.2)\n return f, df\n\n\ndef func2d_nograd(x):\n f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]\n return f\n\n\ndef func2d(x):\n f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]\n df = np.zeros(2)\n df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2\n df[1] = 2. * x[1] + 0.2\n return f, df\n\n\ndef func2d_easyderiv(x):\n f = 2.0*x[0]**2 + 2.0*x[0]*x[1] + 2.0*x[1]**2 - 6.0*x[0]\n df = np.zeros(2)\n df[0] = 4.0*x[0] + 2.0*x[1] - 6.0\n df[1] = 2.0*x[0] + 4.0*x[1]\n\n return f, df\n\n\nclass MyTakeStep1(RandomDisplacement):\n \"\"\"use a copy of displace, but have it set a special parameter to\n make sure it's actually being used.\"\"\"\n def __init__(self):\n self.been_called = False\n super(MyTakeStep1, self).__init__()\n\n def __call__(self, x):\n self.been_called = True\n return super(MyTakeStep1, self).__call__(x)\n\n\ndef myTakeStep2(x):\n \"\"\"redo RandomDisplacement in function form without the attribute stepsize\n to make sure everything still works ok\n \"\"\"\n s = 0.5\n x += np.random.uniform(-s, s, np.shape(x))\n return x\n\n\nclass MyAcceptTest(object):\n \"\"\"pass a custom accept test\n\n This does nothing but make sure it's being used and ensure all the\n possible return values are accepted\n \"\"\"\n def __init__(self):\n self.been_called = False\n self.ncalls = 0\n self.testres = [False, 'force accept', True, np.bool_(True),\n np.bool_(False), [], {}, 0, 1]\n\n def __call__(self, **kwargs):\n self.been_called = True\n self.ncalls += 1\n if self.ncalls - 1 < len(self.testres):\n return self.testres[self.ncalls - 1]\n else:\n return True\n\n\nclass MyCallBack(object):\n \"\"\"pass a custom callback function\n\n This makes sure it's being used. It also returns True after 10\n steps to ensure that it's stopping early.\n\n \"\"\"\n def __init__(self):\n self.been_called = False\n self.ncalls = 0\n\n def __call__(self, x, f, accepted):\n self.been_called = True\n self.ncalls += 1\n if self.ncalls == 10:\n return True\n\n\nclass TestBasinHopping(object):\n\n def setup_method(self):\n \"\"\" Tests setup.\n\n Run tests based on the 1-D and 2-D functions described above.\n \"\"\"\n self.x0 = (1.0, [1.0, 1.0])\n self.sol = (-0.195, np.array([-0.195, -0.1]))\n\n self.tol = 3 # number of decimal places\n\n self.niter = 100\n self.disp = False\n\n # fix random seed\n np.random.seed(1234)\n\n self.kwargs = {\"method\": \"L-BFGS-B\", \"jac\": True}\n self.kwargs_nograd = {\"method\": \"L-BFGS-B\"}\n\n def test_TypeError(self):\n # test the TypeErrors are raised on bad input\n i = 1\n # if take_step is passed, it must be callable\n assert_raises(TypeError, basinhopping, func2d, self.x0[i],\n take_step=1)\n # if accept_test is passed, it must be callable\n assert_raises(TypeError, basinhopping, func2d, self.x0[i],\n accept_test=1)\n\n def test_1d_grad(self):\n # test 1-D minimizations with gradient\n i = 0\n res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,\n niter=self.niter, disp=self.disp)\n assert_almost_equal(res.x, self.sol[i], self.tol)\n\n def test_2d(self):\n # test 2d minimizations with gradient\n i = 1\n res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,\n niter=self.niter, disp=self.disp)\n assert_almost_equal(res.x, self.sol[i], self.tol)\n assert_(res.nfev > 0)\n\n def test_njev(self):\n # test njev is returned correctly\n i = 1\n minimizer_kwargs = self.kwargs.copy()\n # L-BFGS-B doesn't use njev, but BFGS does\n minimizer_kwargs[\"method\"] = \"BFGS\"\n res = basinhopping(func2d, self.x0[i],\n minimizer_kwargs=minimizer_kwargs, niter=self.niter,\n disp=self.disp)\n assert_(res.nfev > 0)\n assert_equal(res.nfev, res.njev)\n\n def test_jac(self):\n # test Jacobian returned\n minimizer_kwargs = self.kwargs.copy()\n # BFGS returns a Jacobian\n minimizer_kwargs[\"method\"] = \"BFGS\"\n\n res = basinhopping(func2d_easyderiv, [0.0, 0.0],\n minimizer_kwargs=minimizer_kwargs, niter=self.niter,\n disp=self.disp)\n\n assert_(hasattr(res.lowest_optimization_result, \"jac\"))\n\n # in this case, the Jacobian is just [df/dx, df/dy]\n _, jacobian = func2d_easyderiv(res.x)\n assert_almost_equal(res.lowest_optimization_result.jac, jacobian,\n self.tol)\n\n def test_2d_nograd(self):\n # test 2-D minimizations without gradient\n i = 1\n res = basinhopping(func2d_nograd, self.x0[i],\n minimizer_kwargs=self.kwargs_nograd,\n niter=self.niter, disp=self.disp)\n assert_almost_equal(res.x, self.sol[i], self.tol)\n\n def test_all_minimizers(self):\n # Test 2-D minimizations with gradient. Nelder-Mead, Powell, and COBYLA\n # don't accept jac=True, so aren't included here.\n i = 1\n methods = ['CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP']\n minimizer_kwargs = copy.copy(self.kwargs)\n for method in methods:\n minimizer_kwargs[\"method\"] = method\n res = basinhopping(func2d, self.x0[i],\n minimizer_kwargs=minimizer_kwargs,\n niter=self.niter, disp=self.disp)\n assert_almost_equal(res.x, self.sol[i], self.tol)\n\n def test_all_nograd_minimizers(self):\n # Test 2-D minimizations without gradient. Newton-CG requires jac=True,\n # so not included here.\n i = 1\n methods = ['CG', 'BFGS', 'L-BFGS-B', 'TNC', 'SLSQP',\n 'Nelder-Mead', 'Powell', 'COBYLA']\n minimizer_kwargs = copy.copy(self.kwargs_nograd)\n for method in methods:\n minimizer_kwargs[\"method\"] = method\n res = basinhopping(func2d_nograd, self.x0[i],\n minimizer_kwargs=minimizer_kwargs,\n niter=self.niter, disp=self.disp)\n tol = self.tol\n if method == 'COBYLA':\n tol = 2\n assert_almost_equal(res.x, self.sol[i], decimal=tol)\n\n def test_pass_takestep(self):\n # test that passing a custom takestep works\n # also test that the stepsize is being adjusted\n takestep = MyTakeStep1()\n initial_step_size = takestep.stepsize\n i = 1\n res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,\n niter=self.niter, disp=self.disp,\n take_step=takestep)\n assert_almost_equal(res.x, self.sol[i], self.tol)\n assert_(takestep.been_called)\n # make sure that the build in adaptive step size has been used\n assert_(initial_step_size != takestep.stepsize)\n\n def test_pass_simple_takestep(self):\n # test that passing a custom takestep without attribute stepsize\n takestep = myTakeStep2\n i = 1\n res = basinhopping(func2d_nograd, self.x0[i],\n minimizer_kwargs=self.kwargs_nograd,\n niter=self.niter, disp=self.disp,\n take_step=takestep)\n assert_almost_equal(res.x, self.sol[i], self.tol)\n\n def test_pass_accept_test(self):\n # test passing a custom accept test\n # makes sure it's being used and ensures all the possible return values\n # are accepted.\n accept_test = MyAcceptTest()\n i = 1\n # there's no point in running it more than a few steps.\n basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,\n niter=10, disp=self.disp, accept_test=accept_test)\n assert_(accept_test.been_called)\n\n def test_pass_callback(self):\n # test passing a custom callback function\n # This makes sure it's being used. It also returns True after 10 steps\n # to ensure that it's stopping early.\n callback = MyCallBack()\n i = 1\n # there's no point in running it more than a few steps.\n res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,\n niter=30, disp=self.disp, callback=callback)\n assert_(callback.been_called)\n assert_(\"callback\" in res.message[0])\n # One of the calls of MyCallBack is during BasinHoppingRunner\n # construction, so there are only 9 remaining before MyCallBack stops\n # the minimization.\n assert_equal(res.nit, 9)\n\n def test_minimizer_fail(self):\n # test if a minimizer fails\n i = 1\n self.kwargs[\"options\"] = dict(maxiter=0)\n self.niter = 10\n res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,\n niter=self.niter, disp=self.disp)\n # the number of failed minimizations should be the number of\n # iterations + 1\n assert_equal(res.nit + 1, res.minimization_failures)\n\n def test_niter_zero(self):\n # gh5915, what happens if you call basinhopping with niter=0\n i = 0\n basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,\n niter=0, disp=self.disp)\n\n def test_seed_reproducibility(self):\n # seed should ensure reproducibility between runs\n minimizer_kwargs = {\"method\": \"L-BFGS-B\", \"jac\": True}\n\n f_1 = []\n\n def callback(x, f, accepted):\n f_1.append(f)\n\n basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,\n niter=10, callback=callback, seed=10)\n\n f_2 = []\n\n def callback2(x, f, accepted):\n f_2.append(f)\n\n basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,\n niter=10, callback=callback2, seed=10)\n assert_equal(np.array(f_1), np.array(f_2))\n\n @pytest.mark.skipif(Version(np.__version__) < Version('1.17'),\n reason='Generator not available for numpy, < 1.17')\n def test_random_gen(self):\n # check that np.random.Generator can be used (numpy >= 1.17)\n rng = np.random.default_rng(1)\n\n minimizer_kwargs = {\"method\": \"L-BFGS-B\", \"jac\": True}\n\n res1 = basinhopping(func2d, [1.0, 1.0],\n minimizer_kwargs=minimizer_kwargs,\n niter=10, seed=rng)\n\n rng = np.random.default_rng(1)\n res2 = basinhopping(func2d, [1.0, 1.0],\n minimizer_kwargs=minimizer_kwargs,\n niter=10, seed=rng)\n assert_equal(res1.x, res2.x)\n\n def test_monotonic_basin_hopping(self):\n # test 1-D minimizations with gradient and T=0\n i = 0\n res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,\n niter=self.niter, disp=self.disp, T=0)\n assert_almost_equal(res.x, self.sol[i], self.tol)\n\n\nclass Test_Storage(object):\n def setup_method(self):\n self.x0 = np.array(1)\n self.f0 = 0\n\n minres = OptimizeResult()\n minres.x = self.x0\n minres.fun = self.f0\n\n self.storage = Storage(minres)\n\n def test_higher_f_rejected(self):\n new_minres = OptimizeResult()\n new_minres.x = self.x0 + 1\n new_minres.fun = self.f0 + 1\n\n ret = self.storage.update(new_minres)\n minres = self.storage.get_lowest()\n assert_equal(self.x0, minres.x)\n assert_equal(self.f0, minres.fun)\n assert_(not ret)\n\n def test_lower_f_accepted(self):\n new_minres = OptimizeResult()\n new_minres.x = self.x0 + 1\n new_minres.fun = self.f0 - 1\n\n ret = self.storage.update(new_minres)\n minres = self.storage.get_lowest()\n assert_(self.x0 != minres.x)\n assert_(self.f0 != minres.fun)\n assert_(ret)\n\n\nclass Test_RandomDisplacement(object):\n def setup_method(self):\n self.stepsize = 1.0\n self.displace = RandomDisplacement(stepsize=self.stepsize)\n self.N = 300000\n self.x0 = np.zeros([self.N])\n\n def test_random(self):\n # the mean should be 0\n # the variance should be (2*stepsize)**2 / 12\n # note these tests are random, they will fail from time to time\n x = self.displace(self.x0)\n v = (2. * self.stepsize) ** 2 / 12\n assert_almost_equal(np.mean(x), 0., 1)\n assert_almost_equal(np.var(x), v, 1)\n\n\nclass Test_Metropolis(object):\n def setup_method(self):\n self.T = 2.\n self.met = Metropolis(self.T)\n\n def test_boolean_return(self):\n # the return must be a bool, else an error will be raised in\n # basinhopping\n ret = self.met(f_new=0., f_old=1.)\n assert isinstance(ret, bool)\n\n def test_lower_f_accepted(self):\n assert_(self.met(f_new=0., f_old=1.))\n\n def test_KeyError(self):\n # should raise KeyError if kwargs f_old or f_new is not passed\n assert_raises(KeyError, self.met, f_old=1.)\n assert_raises(KeyError, self.met, f_new=1.)\n\n def test_accept(self):\n # test that steps are randomly accepted for f_new > f_old\n one_accept = False\n one_reject = False\n for i in range(1000):\n if one_accept and one_reject:\n break\n ret = self.met(f_new=1., f_old=0.5)\n if ret:\n one_accept = True\n else:\n one_reject = True\n assert_(one_accept)\n assert_(one_reject)\n\n def test_GH7495(self):\n # an overflow in exp was producing a RuntimeWarning\n # create own object here in case someone changes self.T\n met = Metropolis(2)\n with np.errstate(over='raise'):\n met.accept_reject(0, 2000)\n\n\nclass Test_AdaptiveStepsize(object):\n def setup_method(self):\n self.stepsize = 1.\n self.ts = RandomDisplacement(stepsize=self.stepsize)\n self.target_accept_rate = 0.5\n self.takestep = AdaptiveStepsize(takestep=self.ts, verbose=False,\n accept_rate=self.target_accept_rate)\n\n def test_adaptive_increase(self):\n # if few steps are rejected, the stepsize should increase\n x = 0.\n self.takestep(x)\n self.takestep.report(False)\n for i in range(self.takestep.interval):\n self.takestep(x)\n self.takestep.report(True)\n assert_(self.ts.stepsize > self.stepsize)\n\n def test_adaptive_decrease(self):\n # if few steps are rejected, the stepsize should increase\n x = 0.\n self.takestep(x)\n self.takestep.report(True)\n for i in range(self.takestep.interval):\n self.takestep(x)\n self.takestep.report(False)\n assert_(self.ts.stepsize < self.stepsize)\n\n def test_all_accepted(self):\n # test that everything works OK if all steps were accepted\n x = 0.\n for i in range(self.takestep.interval + 1):\n self.takestep(x)\n self.takestep.report(True)\n assert_(self.ts.stepsize > self.stepsize)\n\n def test_all_rejected(self):\n # test that everything works OK if all steps were rejected\n x = 0.\n for i in range(self.takestep.interval + 1):\n self.takestep(x)\n self.takestep.report(False)\n assert_(self.ts.stepsize < self.stepsize)\n", "import os\nimport warnings\nimport inspect\nimport shutil\nimport locale\nimport types\nfrom copy import deepcopy\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib.testing.compare import compare_images\n\nfrom plotnine import ggplot, theme\n\n\nTOLERANCE = 2 # Default tolerance for the tests\nDPI = 72 # Default DPI for the tests\n\n# This partial theme modifies all themes that are used in\n# the test. It is limited to setting the size of the test\n# images Should a test require a larger or smaller figure\n# size, the dpi or aspect_ratio should be modified.\ntest_theme = theme(figure_size=(640/DPI, 480/DPI))\n\nif not os.path.exists(os.path.join(\n os.path.dirname(__file__), 'baseline_images')):\n raise IOError(\n \"The baseline image directory does not exist. \"\n \"This is most likely because the test data is not installed. \"\n \"You may need to install plotnine from source to get the \"\n \"test data.\")\n\n\ndef raise_no_baseline_image(filename):\n raise Exception(\"Baseline image {} is missing\".format(filename))\n\n\ndef ggplot_equals(gg, right):\n \"\"\"\n Compare ggplot object to image determined by `right`\n\n Parameters\n ----------\n gg : ggplot\n ggplot object\n right : str | tuple\n Identifier. If a tuple, then first element is the\n identifier and the second element is a `dict`.\n The `dict` can have two keys\n - tol - tolerance for the image comparison, a float.\n - savefig_kwargs - Parameter used by MPL to save\n the figure. This is a `dict`.\n\n The right looks like any one of the following::\n\n - 'identifier'\n - ('identifier', {'tol': 17})\n - ('identifier', {'tol': 17, 'savefig_kwargs': {'dpi': 80}})\n\n This function is meant to monkey patch ggplot.__eq__\n so that tests can use the `assert` statement.\n \"\"\"\n _setup()\n if isinstance(right, (tuple, list)):\n name, params = right\n tol = params.get('tol', TOLERANCE)\n _savefig_kwargs = params.get('savefig_kwargs', {})\n else:\n name, tol = right, TOLERANCE\n _savefig_kwargs = {}\n\n savefig_kwargs = {'dpi': DPI}\n savefig_kwargs.update(_savefig_kwargs)\n\n gg += test_theme\n fig = gg.draw()\n test_file = inspect.stack()[1][1]\n filenames = make_test_image_filenames(name, test_file)\n\n # savefig ignores the figure face & edge colors\n facecolor = fig.get_facecolor()\n edgecolor = fig.get_edgecolor()\n if facecolor:\n savefig_kwargs['facecolor'] = facecolor\n if edgecolor:\n savefig_kwargs['edgecolor'] = edgecolor\n\n # Save the figure before testing whether the original image\n # actually exists. This makes creating new tests much easier,\n # as the result image can afterwards just be copied.\n fig.savefig(filenames.result, **savefig_kwargs)\n _teardown()\n if os.path.exists(filenames.baseline):\n shutil.copyfile(filenames.baseline, filenames.expected)\n else:\n # Putting the exception in short function makes for\n # short pytest error messages\n raise_no_baseline_image(filenames.baseline)\n\n err = compare_images(filenames.expected, filenames.result,\n tol, in_decorator=True)\n gg._err = err # For the pytest error message\n return False if err else True\n\n\nggplot.__eq__ = ggplot_equals\n\n\ndef draw_test(self):\n \"\"\"\n Try drawing the ggplot object\n\n Parameters\n ----------\n self : ggplot\n ggplot object\n\n This function is meant to monkey patch ggplot.draw_test\n so that tests can draw and not care about cleaning up\n the MPL figure.\n \"\"\"\n try:\n figure = self.draw()\n except Exception as err:\n plt.close('all')\n raise err\n else:\n if figure:\n plt.close(figure)\n\n\nggplot.draw_test = draw_test\n\n\ndef build_test(self):\n \"\"\"\n Try building the ggplot object\n\n Parameters\n ----------\n self : ggplot\n ggplot object\n\n This function is meant to monkey patch ggplot.build_test\n so that tests build.\n \"\"\"\n self = deepcopy(self)\n self._build()\n return self\n\n\nggplot.build_test = build_test\n\n\ndef pytest_assertrepr_compare(op, left, right):\n if (isinstance(left, ggplot) and\n isinstance(right, (str, tuple)) and\n op == \"==\"):\n\n msg = (\"images not close: {actual:s} vs. {expected:s} \"\n \"(RMS {rms:.2f})\".format(**left._err))\n return [msg]\n\n\ndef make_test_image_filenames(name, test_file):\n \"\"\"\n Create filenames for testing\n\n Parameters\n ----------\n name : str\n An identifier for the specific test. This will make-up\n part of the filenames.\n test_file : str\n Full path of the test file. This will determine the\n directory structure\n\n Returns\n -------\n out : types.SimpleNamespace\n Object with 3 attributes to store the generated filenames\n\n - result\n - baseline\n - expected\n\n `result`, is the filename for the image generated by the test.\n `baseline`, is the filename for the baseline image to which\n the result will be compared.\n `expected`, is the filename to the copy of the baseline that\n will be stored in the same directory as the result image.\n Creating a copy make comparison easier.\n \"\"\"\n if '.png' not in name:\n name = name + '.png'\n\n basedir = os.path.abspath(os.path.dirname(test_file))\n basename = os.path.basename(test_file)\n subdir = os.path.splitext(basename)[0]\n\n baseline_dir = os.path.join(basedir, 'baseline_images', subdir)\n result_dir = os.path.abspath(os.path.join('result_images', subdir))\n\n if not os.path.exists(result_dir):\n os.makedirs(result_dir, exist_ok=True)\n\n base, ext = os.path.splitext(name)\n expected_name = '{}-{}{}'.format(base, 'expected', ext)\n\n filenames = types.SimpleNamespace(\n baseline=os.path.join(baseline_dir, name),\n result=os.path.join(result_dir, name),\n expected=os.path.join(result_dir, expected_name))\n return filenames\n\n\n# This is called from the cleanup decorator\ndef _setup():\n # The baseline images are created in this locale, so we should use\n # it during all of the tests.\n try:\n locale.setlocale(locale.LC_ALL, str('en_US.UTF-8'))\n except locale.Error:\n try:\n locale.setlocale(locale.LC_ALL, str('English_United States.1252'))\n except locale.Error:\n warnings.warn(\n \"Could not set locale to English/United States. \"\n \"Some date-related tests may fail\")\n\n plt.switch_backend('Agg') # use Agg backend for these test\n if mpl.get_backend().lower() != \"agg\":\n msg = (\"Using a wrong matplotlib backend ({0}), \"\n \"which will not produce proper images\")\n raise Exception(msg.format(mpl.get_backend()))\n\n # These settings *must* be hardcoded for running the comparison\n # tests\n mpl.rcdefaults() # Start with all defaults\n mpl.rcParams['text.hinting'] = 'auto'\n mpl.rcParams['text.antialiased'] = True\n mpl.rcParams['text.hinting_factor'] = 8\n\n # make sure we don't carry over bad plots from former tests\n msg = (\"no of open figs: {} -> find the last test with ' \"\n \"python tests.py -v' and add a '@cleanup' decorator.\")\n assert len(plt.get_fignums()) == 0, msg.format(plt.get_fignums())\n\n\ndef _teardown():\n plt.close('all')\n # reset any warning filters set in tests\n warnings.resetwarnings()\n", "\"\"\"\nSandbox Panel Estimators\n\nReferences\n-----------\n\nBaltagi, Badi H. `Econometric Analysis of Panel Data.` 4th ed. Wiley, 2008.\n\"\"\"\nfrom functools import reduce\n\nimport numpy as np\n\nfrom statsmodels.regression.linear_model import GLS\n\n__all__ = [\"PanelModel\"]\n\nfrom pandas import Panel\n\n\ndef group(X):\n \"\"\"\n Returns unique numeric values for groups without sorting.\n\n Examples\n --------\n >>> X = np.array(['a','a','b','c','b','c'])\n >>> group(X)\n >>> g\n array([ 0., 0., 1., 2., 1., 2.])\n \"\"\"\n uniq_dict = {}\n group = np.zeros(len(X))\n for i in range(len(X)):\n if not X[i] in uniq_dict:\n uniq_dict.update({X[i] : len(uniq_dict)})\n group[i] = uniq_dict[X[i]]\n return group\n\ndef repanel_cov(groups, sigmas):\n '''calculate error covariance matrix for random effects model\n\n Parameters\n ----------\n groups : ndarray, (nobs, nre) or (nobs,)\n array of group/category observations\n sigma : ndarray, (nre+1,)\n array of standard deviations of random effects,\n last element is the standard deviation of the\n idiosyncratic error\n\n Returns\n -------\n omega : ndarray, (nobs, nobs)\n covariance matrix of error\n omegainv : ndarray, (nobs, nobs)\n inverse covariance matrix of error\n omegainvsqrt : ndarray, (nobs, nobs)\n squareroot inverse covariance matrix of error\n such that omega = omegainvsqrt * omegainvsqrt.T\n\n Notes\n -----\n This does not use sparse matrices and constructs nobs by nobs\n matrices. Also, omegainvsqrt is not sparse, i.e. elements are non-zero\n '''\n\n if groups.ndim == 1:\n groups = groups[:,None]\n nobs, nre = groups.shape\n omega = sigmas[-1]*np.eye(nobs)\n for igr in range(nre):\n group = groups[:,igr:igr+1]\n groupuniq = np.unique(group)\n dummygr = sigmas[igr] * (group == groupuniq).astype(float)\n omega += np.dot(dummygr, dummygr.T)\n ev, evec = np.linalg.eigh(omega) #eig does not work\n omegainv = np.dot(evec, (1/ev * evec).T)\n omegainvhalf = evec/np.sqrt(ev)\n return omega, omegainv, omegainvhalf\n\n\n\nclass PanelData(Panel):\n pass\n\nclass PanelModel(object):\n \"\"\"\n An abstract statistical model class for panel (longitudinal) datasets.\n\n Parameters\n ----------\n endog : array_like or str\n If a pandas object is used then endog should be the name of the\n endogenous variable as a string.\n# exog\n# panel_arr\n# time_arr\n panel_data : pandas.Panel object\n\n Notes\n -----\n If a pandas object is supplied it is assumed that the major_axis is time\n and that the minor_axis has the panel variable.\n \"\"\"\n def __init__(self, endog=None, exog=None, panel=None, time=None,\n xtnames=None, equation=None, panel_data=None):\n if panel_data is None:\n# if endog == None and exog == None and panel == None and \\\n# time == None:\n# raise ValueError(\"If pandel_data is False then endog, exog, \\\n#panel_arr, and time_arr cannot be None.\")\n self.initialize(endog, exog, panel, time, xtnames, equation)\n# elif aspandas != False:\n# if not isinstance(endog, str):\n# raise ValueError(\"If a pandas object is supplied then endog \\\n#must be a string containing the name of the endogenous variable\")\n# if not isinstance(aspandas, Panel):\n# raise ValueError(\"Only pandas.Panel objects are supported\")\n# self.initialize_pandas(endog, aspandas, panel_name)\n\n\n def initialize(self, endog, exog, panel, time, xtnames, equation):\n \"\"\"\n Initialize plain array model.\n\n See PanelModel\n \"\"\"\n#TODO: for now, we are going assume a constant, and then make the first\n#panel the base, add a flag for this....\n\n # get names\n names = equation.split(\" \")\n self.endog_name = names[0]\n exog_names = names[1:] # this makes the order matter in the array\n self.panel_name = xtnames[0]\n self.time_name = xtnames[1]\n\n\n novar = exog.var(0) == 0\n if True in novar:\n cons_index = np.where(novar == 1)[0][0] # constant col. num\n exog_names.insert(cons_index, 'cons')\n\n self._cons_index = novar # used again in fit_fixed\n self.exog_names = exog_names\n self.endog = np.squeeze(np.asarray(endog))\n exog = np.asarray(exog)\n self.exog = exog\n self.panel = np.asarray(panel)\n self.time = np.asarray(time)\n\n self.paneluniq = np.unique(panel)\n self.timeuniq = np.unique(time)\n#TODO: this structure can possibly be extracted somewhat to deal with\n#names in general\n\n#TODO: add some dimension checks, etc.\n\n# def initialize_pandas(self, endog, aspandas):\n# \"\"\"\n# Initialize pandas objects.\n#\n# See PanelModel.\n# \"\"\"\n# self.aspandas = aspandas\n# endog = aspandas[endog].values\n# self.endog = np.squeeze(endog)\n# exog_name = aspandas.columns.tolist()\n# exog_name.remove(endog)\n# self.exog = aspandas.filterItems(exog_name).values\n#TODO: can the above be simplified to slice notation?\n# if panel_name != None:\n# self.panel_name = panel_name\n# self.exog_name = exog_name\n# self.endog_name = endog\n# self.time_arr = aspandas.major_axis\n #TODO: is time always handled correctly in fromRecords?\n# self.panel_arr = aspandas.minor_axis\n#TODO: all of this might need to be refactored to explicitly rely (internally)\n# on the pandas LongPanel structure for speed and convenience.\n# not sure this part is finished...\n\n#TODO: does not conform to new initialize\n def initialize_pandas(self, panel_data, endog_name, exog_name):\n self.panel_data = panel_data\n endog = panel_data[endog_name].values # does this create a copy?\n self.endog = np.squeeze(endog)\n if exog_name is None:\n exog_name = panel_data.columns.tolist()\n exog_name.remove(endog_name)\n self.exog = panel_data.filterItems(exog_name).values # copy?\n self._exog_name = exog_name\n self._endog_name = endog_name\n self._timeseries = panel_data.major_axis # might not need these\n self._panelseries = panel_data.minor_axis\n\n#TODO: this could be pulled out and just have a by kwd that takes\n# the panel or time array\n#TODO: this also needs to be expanded for 'twoway'\n def _group_mean(self, X, index='oneway', counts=False, dummies=False):\n \"\"\"\n Get group means of X by time or by panel.\n\n index default is panel\n \"\"\"\n if index == 'oneway':\n Y = self.panel\n uniq = self.paneluniq\n elif index == 'time':\n Y = self.time\n uniq = self.timeuniq\n else:\n raise ValueError(\"index %s not understood\" % index)\n print(Y, uniq, uniq[:,None], len(Y), len(uniq), len(uniq[:,None]),\n index)\n #TODO: use sparse matrices\n dummy = (Y == uniq[:,None]).astype(float)\n if X.ndim > 1:\n mean = np.dot(dummy,X)/dummy.sum(1)[:,None]\n else:\n mean = np.dot(dummy,X)/dummy.sum(1)\n if counts is False and dummies is False:\n return mean\n elif counts is True and dummies is False:\n return mean, dummy.sum(1)\n elif counts is True and dummies is True:\n return mean, dummy.sum(1), dummy\n elif counts is False and dummies is True:\n return mean, dummy\n\n#TODO: Use kwd arguments or have fit_method methods?\n def fit(self, model=None, method=None, effects='oneway'):\n \"\"\"\n method : LSDV, demeaned, MLE, GLS, BE, FE, optional\n model :\n between\n fixed\n random\n pooled\n [gmm]\n effects :\n oneway\n time\n twoway\n femethod : demeaned (only one implemented)\n WLS\n remethod :\n swar -\n amemiya\n nerlove\n walhus\n\n\n Notes\n -----\n This is unfinished. None of the method arguments work yet.\n Only oneway effects should work.\n \"\"\"\n if method: # get rid of this with default\n method = method.lower()\n model = model.lower()\n if method and method not in [\"lsdv\", \"demeaned\", \"mle\",\n \"gls\", \"be\", \"fe\"]:\n # get rid of if method with default\n raise ValueError(\"%s not a valid method\" % method)\n# if method == \"lsdv\":\n# self.fit_lsdv(model)\n if model == 'pooled':\n return GLS(self.endog, self.exog).fit()\n if model == 'between':\n return self._fit_btwn(method, effects)\n if model == 'fixed':\n return self._fit_fixed(method, effects)\n\n# def fit_lsdv(self, effects):\n# \"\"\"\n# Fit using least squares dummy variables.\n#\n# Notes\n# -----\n# Should only be used for small `nobs`.\n# \"\"\"\n# pdummies = None\n# tdummies = None\n\n def _fit_btwn(self, method, effects):\n # group mean regression or WLS\n if effects != \"twoway\":\n endog = self._group_mean(self.endog, index=effects)\n exog = self._group_mean(self.exog, index=effects)\n else:\n raise ValueError(\"%s effects is not valid for the between \"\n \"estimator\" % effects)\n befit = GLS(endog, exog).fit()\n return befit\n\n def _fit_fixed(self, method, effects):\n endog = self.endog\n exog = self.exog\n demeantwice = False\n if effects in [\"oneway\",\"twoways\"]:\n if effects == \"twoways\":\n demeantwice = True\n effects = \"oneway\"\n endog_mean, counts = self._group_mean(endog, index=effects,\n counts=True)\n exog_mean = self._group_mean(exog, index=effects)\n counts = counts.astype(int)\n endog = endog - np.repeat(endog_mean, counts)\n exog = exog - np.repeat(exog_mean, counts, axis=0)\n if demeantwice or effects == \"time\":\n endog_mean, dummies = self._group_mean(endog, index=\"time\",\n dummies=True)\n exog_mean = self._group_mean(exog, index=\"time\")\n # This allows unbalanced panels\n endog = endog - np.dot(endog_mean, dummies)\n exog = exog - np.dot(dummies.T, exog_mean)\n fefit = GLS(endog, exog[:,-self._cons_index]).fit()\n#TODO: might fail with one regressor\n return fefit\n\n\n\n\nclass SURPanel(PanelModel):\n pass\n\nclass SEMPanel(PanelModel):\n pass\n\nclass DynamicPanel(PanelModel):\n pass\n\nif __name__ == \"__main__\":\n import pandas\n from pandas import Panel\n import statsmodels.api as sm\n import numpy.lib.recfunctions as nprf\n\n data = sm.datasets.grunfeld.load(as_pandas=False)\n # Baltagi does not include American Steel\n endog = data.endog[:-20]\n fullexog = data.exog[:-20]\n# fullexog.sort(order=['firm','year'])\n panel_arr = nprf.append_fields(fullexog, 'investment', endog, float,\n usemask=False)\n\n panel_df = pandas.DataFrame(panel_arr)\n panel_panda = panel_df.set_index(['year', 'firm']).to_panel()\n\n\n # the most cumbersome way of doing it as far as preprocessing by hand\n exog = fullexog[['value','capital']].view(float).reshape(-1,2)\n exog = sm.add_constant(exog, prepend=False)\n panel = group(fullexog['firm'])\n year = fullexog['year']\n panel_mod = PanelModel(endog, exog, panel, year, xtnames=['firm','year'],\n equation='invest value capital')\n# note that equation does not actually do anything but name the variables\n panel_ols = panel_mod.fit(model='pooled')\n\n panel_be = panel_mod.fit(model='between', effects='oneway')\n panel_fe = panel_mod.fit(model='fixed', effects='oneway')\n\n panel_bet = panel_mod.fit(model='between', effects='time')\n panel_fet = panel_mod.fit(model='fixed', effects='time')\n\n panel_fe2 = panel_mod.fit(model='fixed', effects='twoways')\n\n\n#see also Baltagi (3rd edt) 3.3 THE RANDOM EFFECTS MODEL p.35\n#for explicit formulas for spectral decomposition\n#but this works also for unbalanced panel\n#\n#I also just saw: 9.4.2 The Random Effects Model p.176 which is\n#partially almost the same as I did\n#\n#this needs to use sparse matrices for larger datasets\n#\n#\"\"\"\n#\n#import numpy as np\n#\n\n groups = np.array([0,0,0,1,1,2,2,2])\n nobs = groups.shape[0]\n groupuniq = np.unique(groups)\n periods = np.array([0,1,2,1,2,0,1,2])\n perioduniq = np.unique(periods)\n\n dummygr = (groups[:,None] == groupuniq).astype(float)\n dummype = (periods[:,None] == perioduniq).astype(float)\n\n sigma = 1.\n sigmagr = np.sqrt(2.)\n sigmape = np.sqrt(3.)\n\n #dummyall = np.c_[sigma*np.ones((nobs,1)), sigmagr*dummygr,\n # sigmape*dummype]\n #exclude constant ?\n dummyall = np.c_[sigmagr*dummygr, sigmape*dummype]\n # omega is the error variance-covariance matrix for the stacked\n # observations\n omega = np.dot(dummyall, dummyall.T) + sigma* np.eye(nobs)\n print(omega)\n print(np.linalg.cholesky(omega))\n ev, evec = np.linalg.eigh(omega) #eig does not work\n omegainv = np.dot(evec, (1/ev * evec).T)\n omegainv2 = np.linalg.inv(omega)\n omegacomp = np.dot(evec, (ev * evec).T)\n print(np.max(np.abs(omegacomp - omega)))\n #check\n #print(np.dot(omegainv,omega)\n print(np.max(np.abs(np.dot(omegainv,omega) - np.eye(nobs))))\n omegainvhalf = evec/np.sqrt(ev) #not sure whether ev should not be column\n print(np.max(np.abs(np.dot(omegainvhalf,omegainvhalf.T) - omegainv)))\n\n # now we can use omegainvhalf in GLS (instead of the cholesky)\n\n\n\n\n\n\n\n\n sigmas2 = np.array([sigmagr, sigmape, sigma])\n groups2 = np.column_stack((groups, periods))\n omega_, omegainv_, omegainvhalf_ = repanel_cov(groups2, sigmas2)\n print(np.max(np.abs(omega_ - omega)))\n print(np.max(np.abs(omegainv_ - omegainv)))\n print(np.max(np.abs(omegainvhalf_ - omegainvhalf)))\n\n # notation Baltagi (3rd) section 9.4.1 (Fixed Effects Model)\n Pgr = reduce(np.dot,[dummygr,\n np.linalg.inv(np.dot(dummygr.T, dummygr)),dummygr.T])\n Qgr = np.eye(nobs) - Pgr\n # within group effect: np.dot(Qgr, groups)\n # but this is not memory efficient, compared to groupstats\n print(np.max(np.abs(np.dot(Qgr, groups))))\n", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 16 22:21:26 2018\n\nAuthor: Josef Perktold\nLicense: BSD-3\n\"\"\"\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_equal\n\nimport statsmodels.base._penalties as smpen\nfrom statsmodels.tools.numdiff import approx_fprime, approx_hess\n\n\nclass CheckPenalty(object):\n\n def test_symmetry(self):\n pen = self.pen\n x = self.params\n\n p = np.array([pen.func(np.atleast_1d(xi)) for xi in x])\n assert_allclose(p, p[::-1], rtol=1e-10)\n # func(0) should be 0\n assert_allclose(pen.func(0 * np.atleast_1d(x[0])), 0, rtol=1e-10)\n\n def test_derivatives(self):\n pen = self.pen\n x = self.params\n\n ps = np.array([pen.deriv(np.atleast_1d(xi)) for xi in x])\n psn = np.array([approx_fprime(np.atleast_1d(xi), pen.func) for xi in x])\n assert_allclose(ps, psn, rtol=1e-7, atol=1e-8)\n\n ph = np.array([pen.deriv2(np.atleast_1d(xi)) for xi in x])\n phn = np.array([approx_hess(np.atleast_1d(xi), pen.func) for xi in x])\n if ph.ndim == 2:\n # SmoothedSCAD returns only diagonal if hessian if independent\n # TODO should ww allow this also in L@?\n ph = np.array([np.diag(phi) for phi in ph])\n assert_allclose(ph, phn, rtol=1e-7, atol=1e-8)\n\n\nclass TestL2Constraints0(CheckPenalty):\n\n @classmethod\n def setup_class(cls):\n x0 = np.linspace(-0.2, 0.2, 11)\n cls.params = np.column_stack((x0, x0))\n cls.pen = smpen.L2ConstraintsPenalty()\n\n def test_equivalence(self):\n # compare plain penalty with included weights or restriction\n pen = self.pen\n x = self.params\n k = x.shape[1]\n\n pen2 = smpen.L2ConstraintsPenalty(weights=np.ones(k))\n pen3 = smpen.L2ConstraintsPenalty(restriction=np.eye(k))\n f = pen.func(x.T)\n d = pen.deriv(x.T)\n d2 = np.array([pen.deriv2(np.atleast_1d(xi)) for xi in x])\n for pen_ in [pen2, pen3]:\n assert_allclose(pen_.func(x.T), f, rtol=1e-7, atol=1e-8)\n assert_allclose(pen_.deriv(x.T), d, rtol=1e-7, atol=1e-8)\n d2_ = np.array([pen.deriv2(np.atleast_1d(xi)) for xi in x])\n assert_allclose(d2_, d2, rtol=1e-10, atol=1e-8)\n\n\nclass TestL2Constraints1(CheckPenalty):\n\n @classmethod\n def setup_class(cls):\n x0 = np.linspace(-0.2, 0.2, 11)\n cls.params = np.column_stack((x0, x0))\n cls.pen = smpen.L2ConstraintsPenalty(restriction=[[1,0], [1, 1]])\n\n def test_values(self):\n pen = self.pen\n x = self.params\n r = pen.restriction\n f = (r.dot(x.T)**2).sum(0)\n assert_allclose(pen.func(x.T), f, rtol=1e-7, atol=1e-8)\n\n\nclass TestSmoothedSCAD(CheckPenalty):\n\n @classmethod\n def setup_class(cls):\n x0 = np.linspace(-0.2, 0.2, 11)\n cls.params = np.column_stack((x0, x0))\n cls.pen = smpen.SCADSmoothed(tau=0.05, c0=0.05)\n\n\nclass TestPseudoHuber(CheckPenalty):\n\n @classmethod\n def setup_class(cls):\n x0 = np.linspace(-0.2, 0.2, 11)\n cls.params = np.column_stack((x0, x0))\n cls.pen = smpen.PseudoHuber(0.1)\n\n def test_backward_compatibility(self):\n wts = [0.5]\n pen = smpen.PseudoHuber(0.1, weights=wts)\n assert_equal(pen.weights, wts)\n\n def test_deprecated_priority(self):\n weights = [1.0]\n pen = smpen.PseudoHuber(0.1, weights=weights)\n\n assert_equal(pen.weights, weights)\n\n def test_weights_assignment(self):\n weights = [1.0, 2.0]\n pen = smpen.PseudoHuber(0.1, weights=weights)\n assert_equal(pen.weights, weights)\n\n\nclass TestL2(CheckPenalty):\n\n @classmethod\n def setup_class(cls):\n x0 = np.linspace(-0.2, 0.2, 11)\n cls.params = np.column_stack((x0, x0))\n cls.pen = smpen.L2()\n\n def test_backward_compatibility(self):\n wts = [0.5]\n pen = smpen.L2(weights=wts)\n assert_equal(pen.weights, wts)\n\n def test_deprecated_priority(self):\n weights = [1.0]\n pen = smpen.L2(weights=weights)\n assert_equal(pen.weights, weights)\n\n def test_weights_assignment(self):\n weights = [1.0, 2.0]\n pen = smpen.L2(weights=weights)\n assert_equal(pen.weights, weights)\n\n\nclass TestNonePenalty(CheckPenalty):\n\n @classmethod\n def setup_class(cls):\n x0 = np.linspace(-0.2, 0.2, 11)\n cls.params = np.column_stack((x0, x0))\n cls.pen = smpen.NonePenalty()\n", "\"\"\"Create a mosaic plot from a contingency table.\n\nIt allows to visualize multivariate categorical data in a rigorous\nand informative way.\n\nsee the docstring of the mosaic function for more informations.\n\"\"\"\n# Author: Enrico Giampieri - 21 Jan 2013\n\nfrom statsmodels.compat.python import lrange, lzip\nimport numpy as np\nfrom itertools import product\n\nfrom numpy import iterable, r_, cumsum, array\nfrom statsmodels.graphics import utils\nfrom pandas import DataFrame\n\n__all__ = [\"mosaic\"]\n\n\ndef _normalize_split(proportion):\n \"\"\"\n return a list of proportions of the available space given the division\n if only a number is given, it will assume a split in two pieces\n \"\"\"\n if not iterable(proportion):\n if proportion == 0:\n proportion = array([0.0, 1.0])\n elif proportion >= 1:\n proportion = array([1.0, 0.0])\n elif proportion < 0:\n raise ValueError(\"proportions should be positive,\"\n \"given value: {}\".format(proportion))\n else:\n proportion = array([proportion, 1.0 - proportion])\n proportion = np.asarray(proportion, dtype=float)\n if np.any(proportion < 0):\n raise ValueError(\"proportions should be positive,\"\n \"given value: {}\".format(proportion))\n if np.allclose(proportion, 0):\n raise ValueError(\"at least one proportion should be \"\n \"greater than zero\".format(proportion))\n # ok, data are meaningful, so go on\n if len(proportion) < 2:\n return array([0.0, 1.0])\n left = r_[0, cumsum(proportion)]\n left /= left[-1] * 1.0\n return left\n\n\ndef _split_rect(x, y, width, height, proportion, horizontal=True, gap=0.05):\n \"\"\"\n Split the given rectangle in n segments whose proportion is specified\n along the given axis if a gap is inserted, they will be separated by a\n certain amount of space, retaining the relative proportion between them\n a gap of 1 correspond to a plot that is half void and the remaining half\n space is proportionally divided among the pieces.\n \"\"\"\n x, y, w, h = float(x), float(y), float(width), float(height)\n if (w < 0) or (h < 0):\n raise ValueError(\"dimension of the square less than\"\n \"zero w={} h=()\".format(w, h))\n proportions = _normalize_split(proportion)\n\n # extract the starting point and the dimension of each subdivision\n # in respect to the unit square\n starting = proportions[:-1]\n amplitude = proportions[1:] - starting\n\n # how much each extrema is going to be displaced due to gaps\n starting += gap * np.arange(len(proportions) - 1)\n\n # how much the squares plus the gaps are extended\n extension = starting[-1] + amplitude[-1] - starting[0]\n\n # normalize everything for fit again in the original dimension\n starting /= extension\n amplitude /= extension\n\n # bring everything to the original square\n starting = (x if horizontal else y) + starting * (w if horizontal else h)\n amplitude = amplitude * (w if horizontal else h)\n\n # create each 4-tuple for each new block\n results = [(s, y, a, h) if horizontal else (x, s, w, a)\n for s, a in zip(starting, amplitude)]\n return results\n\n\ndef _reduce_dict(count_dict, partial_key):\n \"\"\"\n Make partial sum on a counter dict.\n Given a match for the beginning of the category, it will sum each value.\n \"\"\"\n L = len(partial_key)\n count = sum(v for k, v in count_dict.items() if k[:L] == partial_key)\n return count\n\n\ndef _key_splitting(rect_dict, keys, values, key_subset, horizontal, gap):\n \"\"\"\n Given a dictionary where each entry is a rectangle, a list of key and\n value (count of elements in each category) it split each rect accordingly,\n as long as the key start with the tuple key_subset. The other keys are\n returned without modification.\n \"\"\"\n result = {}\n L = len(key_subset)\n for name, (x, y, w, h) in rect_dict.items():\n if key_subset == name[:L]:\n # split base on the values given\n divisions = _split_rect(x, y, w, h, values, horizontal, gap)\n for key, rect in zip(keys, divisions):\n result[name + (key,)] = rect\n else:\n result[name] = (x, y, w, h)\n return result\n\n\ndef _tuplify(obj):\n \"\"\"convert an object in a tuple of strings (even if it is not iterable,\n like a single integer number, but keep the string healthy)\n \"\"\"\n if np.iterable(obj) and not isinstance(obj, str):\n res = tuple(str(o) for o in obj)\n else:\n res = (str(obj),)\n return res\n\n\ndef _categories_level(keys):\n \"\"\"use the Ordered dict to implement a simple ordered set\n return each level of each category\n [[key_1_level_1,key_2_level_1],[key_1_level_2,key_2_level_2]]\n \"\"\"\n res = []\n for i in zip(*(keys)):\n tuplefied = _tuplify(i)\n res.append(list(dict([(j, None) for j in tuplefied])))\n return res\n\n\ndef _hierarchical_split(count_dict, horizontal=True, gap=0.05):\n \"\"\"\n Split a square in a hierarchical way given a contingency table.\n\n Hierarchically split the unit square in alternate directions\n in proportion to the subdivision contained in the contingency table\n count_dict. This is the function that actually perform the tiling\n for the creation of the mosaic plot. If the gap array has been specified\n it will insert a corresponding amount of space (proportional to the\n unit length), while retaining the proportionality of the tiles.\n\n Parameters\n ----------\n count_dict : dict\n Dictionary containing the contingency table.\n Each category should contain a non-negative number\n with a tuple as index. It expects that all the combination\n of keys to be represents; if that is not true, will\n automatically consider the missing values as 0\n horizontal : bool\n The starting direction of the split (by default along\n the horizontal axis)\n gap : float or array of floats\n The list of gaps to be applied on each subdivision.\n If the length of the given array is less of the number\n of subcategories (or if it's a single number) it will extend\n it with exponentially decreasing gaps\n\n Returns\n ---------\n base_rect : dict\n A dictionary containing the result of the split.\n To each key is associated a 4-tuple of coordinates\n that are required to create the corresponding rectangle:\n\n 0 - x position of the lower left corner\n 1 - y position of the lower left corner\n 2 - width of the rectangle\n 3 - height of the rectangle\n \"\"\"\n # this is the unit square that we are going to divide\n base_rect = dict([(tuple(), (0, 0, 1, 1))])\n # get the list of each possible value for each level\n categories_levels = _categories_level(list(count_dict.keys()))\n L = len(categories_levels)\n\n # recreate the gaps vector starting from an int\n if not np.iterable(gap):\n gap = [gap / 1.5 ** idx for idx in range(L)]\n # extend if it's too short\n if len(gap) < L:\n last = gap[-1]\n gap = list(*gap) + [last / 1.5 ** idx for idx in range(L)]\n # trim if it's too long\n gap = gap[:L]\n # put the count dictionay in order for the keys\n # this will allow some code simplification\n count_ordered = dict([(k, count_dict[k])\n for k in list(product(*categories_levels))])\n for cat_idx, cat_enum in enumerate(categories_levels):\n # get the partial key up to the actual level\n base_keys = list(product(*categories_levels[:cat_idx]))\n for key in base_keys:\n # for each partial and each value calculate how many\n # observation we have in the counting dictionary\n part_count = [_reduce_dict(count_ordered, key + (partial,))\n for partial in cat_enum]\n # reduce the gap for subsequents levels\n new_gap = gap[cat_idx]\n # split the given subkeys in the rectangle dictionary\n base_rect = _key_splitting(base_rect, cat_enum, part_count, key,\n horizontal, new_gap)\n horizontal = not horizontal\n return base_rect\n\n\ndef _single_hsv_to_rgb(hsv):\n \"\"\"Transform a color from the hsv space to the rgb.\"\"\"\n from matplotlib.colors import hsv_to_rgb\n return hsv_to_rgb(array(hsv).reshape(1, 1, 3)).reshape(3)\n\n\ndef _create_default_properties(data):\n \"\"\"\"Create the default properties of the mosaic given the data\n first it will varies the color hue (first category) then the color\n saturation (second category) and then the color value\n (third category). If a fourth category is found, it will put\n decoration on the rectangle. Does not manage more than four\n level of categories\n \"\"\"\n categories_levels = _categories_level(list(data.keys()))\n Nlevels = len(categories_levels)\n # first level, the hue\n L = len(categories_levels[0])\n # hue = np.linspace(1.0, 0.0, L+1)[:-1]\n hue = np.linspace(0.0, 1.0, L + 2)[:-2]\n # second level, the saturation\n L = len(categories_levels[1]) if Nlevels > 1 else 1\n saturation = np.linspace(0.5, 1.0, L + 1)[:-1]\n # third level, the value\n L = len(categories_levels[2]) if Nlevels > 2 else 1\n value = np.linspace(0.5, 1.0, L + 1)[:-1]\n # fourth level, the hatch\n L = len(categories_levels[3]) if Nlevels > 3 else 1\n hatch = ['', '/', '-', '|', '+'][:L + 1]\n # convert in list and merge with the levels\n hue = lzip(list(hue), categories_levels[0])\n saturation = lzip(list(saturation),\n categories_levels[1] if Nlevels > 1 else [''])\n value = lzip(list(value),\n categories_levels[2] if Nlevels > 2 else [''])\n hatch = lzip(list(hatch),\n categories_levels[3] if Nlevels > 3 else [''])\n # create the properties dictionary\n properties = {}\n for h, s, v, t in product(hue, saturation, value, hatch):\n hv, hn = h\n sv, sn = s\n vv, vn = v\n tv, tn = t\n level = (hn,) + ((sn,) if sn else tuple())\n level = level + ((vn,) if vn else tuple())\n level = level + ((tn,) if tn else tuple())\n hsv = array([hv, sv, vv])\n prop = {'color': _single_hsv_to_rgb(hsv), 'hatch': tv, 'lw': 0}\n properties[level] = prop\n return properties\n\n\ndef _normalize_data(data, index):\n \"\"\"normalize the data to a dict with tuples of strings as keys\n right now it works with:\n\n 0 - dictionary (or equivalent mappable)\n 1 - pandas.Series with simple or hierarchical indexes\n 2 - numpy.ndarrays\n 3 - everything that can be converted to a numpy array\n 4 - pandas.DataFrame (via the _normalize_dataframe function)\n \"\"\"\n # if data is a dataframe we need to take a completely new road\n # before coming back here. Use the hasattr to avoid importing\n # pandas explicitly\n if hasattr(data, 'pivot') and hasattr(data, 'groupby'):\n data = _normalize_dataframe(data, index)\n index = None\n # can it be used as a dictionary?\n try:\n items = list(data.items())\n except AttributeError:\n # ok, I cannot use the data as a dictionary\n # Try to convert it to a numpy array, or die trying\n data = np.asarray(data)\n temp = {}\n for idx in np.ndindex(data.shape):\n name = tuple(i for i in idx)\n temp[name] = data[idx]\n data = temp\n items = list(data.items())\n # make all the keys a tuple, even if simple numbers\n data = dict([_tuplify(k), v] for k, v in items)\n categories_levels = _categories_level(list(data.keys()))\n # fill the void in the counting dictionary\n indexes = product(*categories_levels)\n contingency = dict([(k, data.get(k, 0)) for k in indexes])\n data = contingency\n # reorder the keys order according to the one specified by the user\n # or if the index is None convert it into a simple list\n # right now it does not do any check, but can be modified in the future\n index = lrange(len(categories_levels)) if index is None else index\n contingency = {}\n for key, value in data.items():\n new_key = tuple(key[i] for i in index)\n contingency[new_key] = value\n data = contingency\n return data\n\n\ndef _normalize_dataframe(dataframe, index):\n \"\"\"Take a pandas DataFrame and count the element present in the\n given columns, return a hierarchical index on those columns\n \"\"\"\n #groupby the given keys, extract the same columns and count the element\n # then collapse them with a mean\n data = dataframe[index].dropna()\n grouped = data.groupby(index, sort=False)\n counted = grouped[index].count()\n averaged = counted.mean(axis=1)\n # Fill empty missing with 0, see GH5639\n averaged = averaged.fillna(0.0)\n return averaged\n\n\ndef _statistical_coloring(data):\n \"\"\"evaluate colors from the indipendence properties of the matrix\n It will encounter problem if one category has all zeros\n \"\"\"\n data = _normalize_data(data, None)\n categories_levels = _categories_level(list(data.keys()))\n Nlevels = len(categories_levels)\n total = 1.0 * sum(v for v in data.values())\n # count the proportion of observation\n # for each level that has the given name\n # at each level\n levels_count = []\n for level_idx in range(Nlevels):\n proportion = {}\n for level in categories_levels[level_idx]:\n proportion[level] = 0.0\n for key, value in data.items():\n if level == key[level_idx]:\n proportion[level] += value\n proportion[level] /= total\n levels_count.append(proportion)\n # for each key I obtain the expected value\n # and it's standard deviation from a binomial distribution\n # under the hipothesys of independence\n expected = {}\n for key, value in data.items():\n base = 1.0\n for i, k in enumerate(key):\n base *= levels_count[i][k]\n expected[key] = base * total, np.sqrt(total * base * (1.0 - base))\n # now we have the standard deviation of distance from the\n # expected value for each tile. We create the colors from this\n sigmas = dict((k, (data[k] - m) / s) for k, (m, s) in expected.items())\n props = {}\n for key, dev in sigmas.items():\n red = 0.0 if dev < 0 else (dev / (1 + dev))\n blue = 0.0 if dev > 0 else (dev / (-1 + dev))\n green = (1.0 - red - blue) / 2.0\n hatch = 'x' if dev > 2 else 'o' if dev < -2 else ''\n props[key] = {'color': [red, green, blue], 'hatch': hatch}\n return props\n\n\ndef _get_position(x, w, h, W):\n if W == 0:\n return x\n return (x + w / 2.0) * w * h / W\n\n\ndef _create_labels(rects, horizontal, ax, rotation):\n \"\"\"find the position of the label for each value of each category\n\n right now it supports only up to the four categories\n\n ax: the axis on which the label should be applied\n rotation: the rotation list for each side\n \"\"\"\n categories = _categories_level(list(rects.keys()))\n if len(categories) > 4:\n msg = (\"maximum of 4 level supported for axes labeling... and 4\"\n \"is already a lot of levels, are you sure you need them all?\")\n raise ValueError(msg)\n labels = {}\n #keep it fixed as will be used a lot of times\n items = list(rects.items())\n vertical = not horizontal\n\n #get the axis ticks and labels locator to put the correct values!\n ax2 = ax.twinx()\n ax3 = ax.twiny()\n #this is the order of execution for horizontal disposition\n ticks_pos = [ax.set_xticks, ax.set_yticks, ax3.set_xticks, ax2.set_yticks]\n ticks_lab = [ax.set_xticklabels, ax.set_yticklabels,\n ax3.set_xticklabels, ax2.set_yticklabels]\n #for the vertical one, rotate it by one\n if vertical:\n ticks_pos = ticks_pos[1:] + ticks_pos[:1]\n ticks_lab = ticks_lab[1:] + ticks_lab[:1]\n #clean them\n for pos, lab in zip(ticks_pos, ticks_lab):\n pos([])\n lab([])\n #for each level, for each value in the level, take the mean of all\n #the sublevel that correspond to that partial key\n for level_idx, level in enumerate(categories):\n #this dictionary keep the labels only for this level\n level_ticks = dict()\n for value in level:\n #to which level it should refer to get the preceding\n #values of labels? it's rather a tricky question...\n #this is dependent on the side. It's a very crude management\n #but I couldn't think a more general way...\n if horizontal:\n if level_idx == 3:\n index_select = [-1, -1, -1]\n else:\n index_select = [+0, -1, -1]\n else:\n if level_idx == 3:\n index_select = [+0, -1, +0]\n else:\n index_select = [-1, -1, -1]\n #now I create the base key name and append the current value\n #It will search on all the rects to find the corresponding one\n #and use them to evaluate the mean position\n basekey = tuple(categories[i][index_select[i]]\n for i in range(level_idx))\n basekey = basekey + (value,)\n subset = dict((k, v) for k, v in items\n if basekey == k[:level_idx + 1])\n #now I extract the center of all the tiles and make a weighted\n #mean of all these center on the area of the tile\n #this should give me the (more or less) correct position\n #of the center of the category\n\n vals = list(subset.values())\n W = sum(w * h for (x, y, w, h) in vals)\n x_lab = sum(_get_position(x, w, h, W) for (x, y, w, h) in vals)\n y_lab = sum(_get_position(y, h, w, W) for (x, y, w, h) in vals)\n #now base on the ordering, select which position to keep\n #needs to be written in a more general form of 4 level are enough?\n #should give also the horizontal and vertical alignment\n side = (level_idx + vertical) % 4\n level_ticks[value] = y_lab if side % 2 else x_lab\n #now we add the labels of this level to the correct axis\n\n ticks_pos[level_idx](list(level_ticks.values()))\n ticks_lab[level_idx](list(level_ticks.keys()),\n rotation=rotation[level_idx])\n return labels\n\n\ndef mosaic(data, index=None, ax=None, horizontal=True, gap=0.005,\n properties=lambda key: None, labelizer=None,\n title='', statistic=False, axes_label=True,\n label_rotation=0.0):\n \"\"\"Create a mosaic plot from a contingency table.\n\n It allows to visualize multivariate categorical data in a rigorous\n and informative way.\n\n Parameters\n ----------\n data : {dict, Series, ndarray, DataFrame}\n The contingency table that contains the data.\n Each category should contain a non-negative number\n with a tuple as index. It expects that all the combination\n of keys to be represents; if that is not true, will\n automatically consider the missing values as 0. The order\n of the keys will be the same as the one of insertion.\n If a dict of a Series (or any other dict like object)\n is used, it will take the keys as labels. If a\n np.ndarray is provided, it will generate a simple\n numerical labels.\n index : list, optional\n Gives the preferred order for the category ordering. If not specified\n will default to the given order. It does not support named indexes\n for hierarchical Series. If a DataFrame is provided, it expects\n a list with the name of the columns.\n ax : Axes, optional\n The graph where display the mosaic. If not given, will\n create a new figure\n horizontal : bool, optional\n The starting direction of the split (by default along\n the horizontal axis)\n gap : {float, sequence[float]}\n The list of gaps to be applied on each subdivision.\n If the length of the given array is less of the number\n of subcategories (or if it's a single number) it will extend\n it with exponentially decreasing gaps\n properties : dict[str, callable], optional\n A function that for each tile in the mosaic take the key\n of the tile and returns the dictionary of properties\n of the generated Rectangle, like color, hatch or similar.\n A default properties set will be provided fot the keys whose\n color has not been defined, and will use color variation to help\n visually separates the various categories. It should return None\n to indicate that it should use the default property for the tile.\n A dictionary of the properties for each key can be passed,\n and it will be internally converted to the correct function\n labelizer : dict[str, callable], optional\n A function that generate the text to display at the center of\n each tile base on the key of that tile\n title : str, optional\n The title of the axis\n statistic : bool, optional\n If true will use a crude statistical model to give colors to the plot.\n If the tile has a constraint that is more than 2 standard deviation\n from the expected value under independence hypothesis, it will\n go from green to red (for positive deviations, blue otherwise) and\n will acquire an hatching when crosses the 3 sigma.\n axes_label : bool, optional\n Show the name of each value of each category\n on the axis (default) or hide them.\n label_rotation : {float, list[float]}\n The rotation of the axis label (if present). If a list is given\n each axis can have a different rotation\n\n Returns\n ---------\n fig : Figure\n The figure containing the plot.\n rects : dict\n A dictionary that has the same keys of the original\n dataset, that holds a reference to the coordinates of the\n tile and the Rectangle that represent it.\n\n References\n ----------\n A Brief History of the Mosaic Display\n Michael Friendly, York University, Psychology Department\n Journal of Computational and Graphical Statistics, 2001\n\n Mosaic Displays for Loglinear Models.\n Michael Friendly, York University, Psychology Department\n Proceedings of the Statistical Graphics Section, 1992, 61-68.\n\n Mosaic displays for multi-way contingency tables.\n Michael Friendly, York University, Psychology Department\n Journal of the american statistical association\n March 1994, Vol. 89, No. 425, Theory and Methods\n\n Examples\n ----------\n >>> import numpy as np\n >>> import pandas as pd\n >>> import matplotlib.pyplot as plt\n >>> from statsmodels.graphics.mosaicplot import mosaic\n\n The most simple use case is to take a dictionary and plot the result\n\n >>> data = {'a': 10, 'b': 15, 'c': 16}\n >>> mosaic(data, title='basic dictionary')\n >>> plt.show()\n\n A more useful example is given by a dictionary with multiple indices.\n In this case we use a wider gap to a better visual separation of the\n resulting plot\n\n >>> data = {('a', 'b'): 1, ('a', 'c'): 2, ('d', 'b'): 3, ('d', 'c'): 4}\n >>> mosaic(data, gap=0.05, title='complete dictionary')\n >>> plt.show()\n\n The same data can be given as a simple or hierarchical indexed Series\n\n >>> rand = np.random.random\n >>> from itertools import product\n >>> tuples = list(product(['bar', 'baz', 'foo', 'qux'], ['one', 'two']))\n >>> index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])\n >>> data = pd.Series(rand(8), index=index)\n >>> mosaic(data, title='hierarchical index series')\n >>> plt.show()\n\n The third accepted data structure is the np array, for which a\n very simple index will be created.\n\n >>> rand = np.random.random\n >>> data = 1+rand((2,2))\n >>> mosaic(data, title='random non-labeled array')\n >>> plt.show()\n\n If you need to modify the labeling and the coloring you can give\n a function tocreate the labels and one with the graphical properties\n starting from the key tuple\n\n >>> data = {'a': 10, 'b': 15, 'c': 16}\n >>> props = lambda key: {'color': 'r' if 'a' in key else 'gray'}\n >>> labelizer = lambda k: {('a',): 'first', ('b',): 'second',\n ... ('c',): 'third'}[k]\n >>> mosaic(data, title='colored dictionary', properties=props,\n ... labelizer=labelizer)\n >>> plt.show()\n\n Using a DataFrame as source, specifying the name of the columns of interest\n\n >>> gender = ['male', 'male', 'male', 'female', 'female', 'female']\n >>> pet = ['cat', 'dog', 'dog', 'cat', 'dog', 'cat']\n >>> data = pd.DataFrame({'gender': gender, 'pet': pet})\n >>> mosaic(data, ['pet', 'gender'], title='DataFrame as Source')\n >>> plt.show()\n\n .. plot :: plots/graphics_mosaicplot_mosaic.py\n \"\"\"\n if isinstance(data, DataFrame) and index is None:\n raise ValueError(\"You must pass an index if data is a DataFrame.\"\n \" See examples.\")\n\n from matplotlib.patches import Rectangle\n #from pylab import Rectangle\n fig, ax = utils.create_mpl_ax(ax)\n # normalize the data to a dict with tuple of strings as keys\n data = _normalize_data(data, index)\n # split the graph into different areas\n rects = _hierarchical_split(data, horizontal=horizontal, gap=gap)\n # if there is no specified way to create the labels\n # create a default one\n if labelizer is None:\n labelizer = lambda k: \"\\n\".join(k)\n if statistic:\n default_props = _statistical_coloring(data)\n else:\n default_props = _create_default_properties(data)\n if isinstance(properties, dict):\n color_dict = properties\n properties = lambda key: color_dict.get(key, None)\n for k, v in rects.items():\n # create each rectangle and put a label on it\n x, y, w, h = v\n conf = properties(k)\n props = conf if conf else default_props[k]\n text = labelizer(k)\n Rect = Rectangle((x, y), w, h, label=text, **props)\n ax.add_patch(Rect)\n ax.text(x + w / 2, y + h / 2, text, ha='center',\n va='center', size='smaller')\n #creating the labels on the axis\n #o clearing it\n if axes_label:\n if np.iterable(label_rotation):\n rotation = label_rotation\n else:\n rotation = [label_rotation] * 4\n labels = _create_labels(rects, horizontal, ax, rotation)\n else:\n ax.set_xticks([])\n ax.set_xticklabels([])\n ax.set_yticks([])\n ax.set_yticklabels([])\n ax.set_title(title)\n return fig, rects\n", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 12 13:18:12 2018\n\nAuthor: Josef Perktold\n\"\"\"\nfrom statsmodels.compat.pandas import testing as pdt\n\nimport os.path\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport pandas as pd\n\nimport pytest\n\nfrom statsmodels.regression.linear_model import OLS\nfrom statsmodels.genmod.generalized_linear_model import GLM\nfrom statsmodels.genmod import families\n\nfrom statsmodels.stats.outliers_influence import MLEInfluence\n\ncur_dir = os.path.abspath(os.path.dirname(__file__))\n\nfile_name = 'binary_constrict.csv'\nfile_path = os.path.join(cur_dir, 'results', file_name)\ndata_bin = pd.read_csv(file_path, index_col=0)\n\nfile_name = 'results_influence_logit.csv'\nfile_path = os.path.join(cur_dir, 'results', file_name)\nresults_sas_df = pd.read_csv(file_path, index_col=0)\n\n\ndef test_influence_glm_bernoulli():\n # example uses Finney's data and is used in Pregibon 1981\n\n df = data_bin\n results_sas = np.asarray(results_sas_df)\n\n res = GLM(df['constrict'], df[['const', 'log_rate', 'log_volumne']],\n family=families.Binomial()).fit(attach_wls=True, atol=1e-10)\n\n infl = res.get_influence(observed=False)\n\n k_vars = 3\n assert_allclose(infl.dfbetas, results_sas[:, 5:8], atol=1e-4)\n assert_allclose(infl.d_params, results_sas[:, 5:8] * res.bse.values, atol=1e-4)\n assert_allclose(infl.cooks_distance[0] * k_vars, results_sas[:, 8], atol=6e-5)\n assert_allclose(infl.hat_matrix_diag, results_sas[:, 4], atol=6e-5)\n\n c_bar = infl.cooks_distance[0] * 3 * (1 - infl.hat_matrix_diag)\n assert_allclose(c_bar, results_sas[:, 9], atol=6e-5)\n\n\nclass InfluenceCompareExact(object):\n # Mixin to compare and test two Influence instances\n\n def test_basics(self):\n infl1 = self.infl1\n infl0 = self.infl0\n\n assert_allclose(infl0.hat_matrix_diag, infl1.hat_matrix_diag,\n rtol=1e-12)\n\n assert_allclose(infl0.resid_studentized,\n infl1.resid_studentized, rtol=1e-12, atol=1e-7)\n\n cd_rtol = getattr(self, 'cd_rtol', 1e-7)\n assert_allclose(infl0.cooks_distance[0], infl1.cooks_distance[0],\n rtol=cd_rtol)\n assert_allclose(infl0.dfbetas, infl1.dfbetas, rtol=1e-9, atol=5e-9)\n assert_allclose(infl0.d_params, infl1.d_params, rtol=1e-9, atol=5e-9)\n assert_allclose(infl0.d_fittedvalues, infl1.d_fittedvalues, rtol=5e-9)\n assert_allclose(infl0.d_fittedvalues_scaled,\n infl1.d_fittedvalues_scaled, rtol=5e-9)\n\n @pytest.mark.smoke\n @pytest.mark.matplotlib\n def test_plots(self, close_figures):\n infl1 = self.infl1\n infl0 = self.infl0\n\n fig = infl0.plot_influence(external=False)\n fig = infl1.plot_influence(external=False)\n\n fig = infl0.plot_index('resid', threshold=0.2, title='')\n fig = infl1.plot_index('resid', threshold=0.2, title='')\n\n fig = infl0.plot_index('dfbeta', idx=1, threshold=0.2, title='')\n fig = infl1.plot_index('dfbeta', idx=1, threshold=0.2, title='')\n\n fig = infl0.plot_index('cook', idx=1, threshold=0.2, title='')\n fig = infl1.plot_index('cook', idx=1, threshold=0.2, title='')\n\n fig = infl0.plot_index('hat', idx=1, threshold=0.2, title='')\n fig = infl1.plot_index('hat', idx=1, threshold=0.2, title='')\n\n\n def test_summary(self):\n infl1 = self.infl1\n infl0 = self.infl0\n\n df0 = infl0.summary_frame()\n df1 = infl1.summary_frame()\n assert_allclose(df0.values, df1.values, rtol=5e-5)\n pdt.assert_index_equal(df0.index, df1.index)\n\n\ndef _check_looo(self):\n infl = self.infl1\n # unwrap if needed\n results = getattr(infl.results, '_results', infl.results)\n\n res_looo = infl._res_looo\n mask_infl = infl.cooks_distance[0] > 2 * infl.cooks_distance[0].std()\n mask_low = ~mask_infl\n diff_params = results.params - res_looo['params']\n assert_allclose(infl.d_params[mask_low], diff_params[mask_low], atol=0.05)\n assert_allclose(infl.params_one[mask_low], res_looo['params'][mask_low], rtol=0.01)\n\n\nclass TestInfluenceLogitGLMMLE(InfluenceCompareExact):\n\n @classmethod\n def setup_class(cls):\n df = data_bin\n res = GLM(df['constrict'], df[['const', 'log_rate', 'log_volumne']],\n family=families.Binomial()).fit(attach_wls=True, atol=1e-10)\n\n cls.infl1 = res.get_influence()\n cls.infl0 = MLEInfluence(res)\n\n def test_looo(self):\n _check_looo(self)\n\n\nclass TestInfluenceBinomialGLMMLE(InfluenceCompareExact):\n # example based on Williams and R docs\n\n @classmethod\n def setup_class(cls):\n yi = np.array([0, 2, 14, 19, 30])\n ni = 40 * np.ones(len(yi))\n xi = np.arange(1, len(yi) + 1)\n exog = np.column_stack((np.ones(len(yi)), xi))\n endog = np.column_stack((yi, ni - yi))\n\n res = GLM(endog, exog, family=families.Binomial()).fit()\n\n cls.infl1 = res.get_influence()\n cls.infl0 = MLEInfluence(res)\n cls.cd_rtol = 5e-5\n\n def test_looo(self):\n _check_looo(self)\n\n def test_r(self):\n # values from R,\n # > xi <- 1:5\n # > yi <- c(0,2,14,19,30) # number of mice responding to dose xi\n # > mi <- rep(40, 5) # number of mice exposed\n # > glmI <- glm(cbind(yi, mi -yi) ~ xi, family = binomial)\n # > imI <- influence.measures(glmI)\n # > t(imI$infmat)\n\n # dfbeta/dfbetas and dffits do not make sense to me and are furthe away from\n # looo than mine\n # resid seem to be resid_deviance based and not resid_pearson\n # I did not compare cov.r\n infl1 = self.infl1\n cooks_d = [0.25220202795934726, 0.26107981497746285, 1.28985614424132389,\n 0.08449722285516942, 0.36362110845918005]\n hat = [0.2594393406119333, 0.3696442663244837, 0.3535768402250521,\n 0.389209198535791057, 0.6281303543027403]\n\n assert_allclose(infl1.hat_matrix_diag, hat, rtol=5e-6)\n assert_allclose(infl1.cooks_distance[0], cooks_d, rtol=1e-5)\n\n\nclass TestInfluenceGaussianGLMMLE(InfluenceCompareExact):\n\n @classmethod\n def setup_class(cls):\n from .test_diagnostic import get_duncan_data\n endog, exog, labels = get_duncan_data()\n data = pd.DataFrame(np.column_stack((endog, exog)),\n columns='y const var1 var2'.split(),\n index=labels)\n\n res = GLM.from_formula('y ~ const + var1 + var2 - 1', data).fit()\n #res = GLM(endog, exog).fit()\n\n cls.infl1 = res.get_influence()\n cls.infl0 = MLEInfluence(res)\n\n def test_looo(self):\n _check_looo(self)\n\n\nclass TestInfluenceGaussianGLMOLS(InfluenceCompareExact):\n\n @classmethod\n def setup_class(cls):\n from .test_diagnostic import get_duncan_data\n endog, exog, labels = get_duncan_data()\n data = pd.DataFrame(np.column_stack((endog, exog)),\n columns='y const var1 var2'.split(),\n index=labels)\n\n res0 = GLM.from_formula('y ~ const + var1 + var2 - 1', data).fit()\n res1 = OLS.from_formula('y ~ const + var1 + var2 - 1', data).fit()\n cls.infl1 = res1.get_influence()\n cls.infl0 = res0.get_influence()\n\n def test_basics(self):\n # needs to override attributes that are not equivalent,\n # i.e. not available or different definition like external vs internal\n infl1 = self.infl1\n infl0 = self.infl0\n\n assert_allclose(infl0.hat_matrix_diag, infl1.hat_matrix_diag,\n rtol=1e-12)\n assert_allclose(infl0.resid_studentized,\n infl1.resid_studentized, rtol=1e-12, atol=1e-7)\n assert_allclose(infl0.cooks_distance, infl1.cooks_distance, rtol=1e-7)\n assert_allclose(infl0.dfbetas, infl1.dfbetas, rtol=0.1) # changed\n # OLSInfluence only has looo dfbeta/d_params\n assert_allclose(infl0.d_params, infl1.dfbeta, rtol=1e-9, atol=1e-14)\n # d_fittedvalues is not available in OLSInfluence, i.e. only scaled dffits\n # assert_allclose(infl0.d_fittedvalues, infl1.d_fittedvalues, rtol=1e-9)\n assert_allclose(infl0.d_fittedvalues_scaled,\n infl1.dffits_internal[0], rtol=1e-9)\n\n # specific to linear link\n assert_allclose(infl0.d_linpred,\n infl0.d_fittedvalues, rtol=1e-12)\n assert_allclose(infl0.d_linpred_scaled,\n infl0.d_fittedvalues_scaled, rtol=1e-12)\n\n def test_summary(self):\n infl1 = self.infl1\n infl0 = self.infl0\n\n df0 = infl0.summary_frame()\n df1 = infl1.summary_frame()\n # just some basic check on overlap except for dfbetas\n cols = ['cooks_d', 'standard_resid', 'hat_diag', 'dffits_internal']\n assert_allclose(df0[cols].values, df1[cols].values, rtol=1e-5)\n pdt.assert_index_equal(df0.index, df1.index)\n", "import numpy as np\n\nrslt_binomial_0 = np.array([\n 0, 6.618737, 0.004032037, 0.01433665, 0.01265635, 0.006173346, 0.01067706])\n\nrslt_binomial_1 = np.array([\n 0, 1.029661, 0.02180239, 0.07769613, 0.06756466, 0.03156418, 0.05851878])\n\nrslt_binomial_2 = np.array([\n 0, 0.1601819, 0.07111087, 0.2544921, 0.2110318, 0.08577924, 0.1984383])\n\nrslt_binomial_3 = np.array([\n 0.5, 0.05343991, 0.004990061, 0.2838563, 0.2167881, 0.02370156, 0.2096612])\n\nrslt_binomial_4 = np.array([\n 0.5, 0.02313286, 0.0708914, 0.3791042, 0.2938332, 0.07506391, 0.2982251])\n\nrslt_binomial_5 = np.array([\n 0.5, 0.009124078, 0.106681, 0.4327268, 0.3362166, 0.1019452, 0.3479955])\n\nrslt_binomial_6 = np.array([\n 1, 0.02932512, 0, 0.3085764, 0.2300801, 0.01143652, 0.2291531])\n\nrslt_binomial_7 = np.array([\n 1, 0.01269414, 0.07022348, 0.396642, 0.3044255, 0.07151663, 0.31301])\n\nrslt_binomial_8 = np.array([\n 1, 0.005494992, 0.1049623, 0.4385186, 0.3391729, 0.09907393, 0.3527401])\n\nrslt_poisson_0 = np.array([\n 0, 23.5349, 0.009251658, 0.003730997, 0.01266164, 0.003439135, 0.0141719])\n\nrslt_poisson_1 = np.array([\n 0, 3.661269, 0.04842557, 0.02095708, 0.06550316, 0.02029514, 0.07300782])\n\nrslt_poisson_2 = np.array([\n 0, 0.5695749, 0.1440462, 0.07208017, 0.182649, 0.07511376, 0.2018242])\n\nrslt_poisson_3 = np.array([\n 0.5, 0.1577593, 0.1247603, 0.02857521, 0.185693, 0.03840622, 0.2200925])\n\nrslt_poisson_4 = np.array([\n 0.5, 0.05669575, 0.187629, 0.08842012, 0.2348627, 0.09736964, 0.2628845])\n\nrslt_poisson_5 = np.array([\n 0.5, 0.0185653, 0.2118078, 0.1121067, 0.2534181, 0.1204543, 0.2784761])\n\nrslt_poisson_6 = np.array([\n 1, 0.07887965, 0.1339927, 0.0322772, 0.1969884, 0.0439019, 0.2339252])\n\nrslt_poisson_7 = np.array([\n 1, 0.02834788, 0.1927163, 0.09160406, 0.2398164, 0.1010126, 0.2682158])\n\nrslt_poisson_8 = np.array([\n 1, 0.0101877, 0.2126847, 0.1123439, 0.2544153, 0.1208601, 0.2796794])\n", "# Note: The first part of this file can be modified in place, but the latter\n# part is autogenerated by the boilerplate.py script.\n\n\"\"\"\n`matplotlib.pyplot` is a state-based interface to matplotlib. It provides\na MATLAB-like way of plotting.\n\npyplot is mainly intended for interactive plots and simple cases of\nprogrammatic plot generation::\n\n import numpy as np\n import matplotlib.pyplot as plt\n\n x = np.arange(0, 5, 0.1)\n y = np.sin(x)\n plt.plot(x, y)\n\nThe object-oriented API is recommended for more complex plots.\n\"\"\"\n\nimport functools\nimport importlib\nimport inspect\nimport logging\nfrom numbers import Number\nimport re\nimport sys\nimport time\ntry:\n import threading\nexcept ImportError:\n import dummy_threading as threading\n\nfrom cycler import cycler\nimport matplotlib\nimport matplotlib.colorbar\nimport matplotlib.image\nfrom matplotlib import _api\nfrom matplotlib import rcsetup, style\nfrom matplotlib import _pylab_helpers, interactive\nfrom matplotlib import cbook\nfrom matplotlib import docstring\nfrom matplotlib.backend_bases import FigureCanvasBase, MouseButton\nfrom matplotlib.figure import Figure, figaspect\nfrom matplotlib.gridspec import GridSpec, SubplotSpec\nfrom matplotlib import rcParams, rcParamsDefault, get_backend, rcParamsOrig\nfrom matplotlib.rcsetup import interactive_bk as _interactive_bk\nfrom matplotlib.artist import Artist\nfrom matplotlib.axes import Axes, Subplot\nfrom matplotlib.projections import PolarAxes\nfrom matplotlib import mlab # for detrend_none, window_hanning\nfrom matplotlib.scale import get_scale_names\n\nfrom matplotlib import cm\nfrom matplotlib.cm import get_cmap, register_cmap\n\nimport numpy as np\n\n# We may not need the following imports here:\nfrom matplotlib.colors import Normalize\nfrom matplotlib.lines import Line2D\nfrom matplotlib.text import Text, Annotation\nfrom matplotlib.patches import Polygon, Rectangle, Circle, Arrow\nfrom matplotlib.widgets import SubplotTool, Button, Slider, Widget\n\nfrom .ticker import (\n TickHelper, Formatter, FixedFormatter, NullFormatter, FuncFormatter,\n FormatStrFormatter, ScalarFormatter, LogFormatter, LogFormatterExponent,\n LogFormatterMathtext, Locator, IndexLocator, FixedLocator, NullLocator,\n LinearLocator, LogLocator, AutoLocator, MultipleLocator, MaxNLocator)\n\n_log = logging.getLogger(__name__)\n\n\n_code_objs = {\n _api.rename_parameter:\n _api.rename_parameter(\"\", \"old\", \"new\", lambda new: None).__code__,\n _api.make_keyword_only:\n _api.make_keyword_only(\"\", \"p\", lambda p: None).__code__,\n}\n\n\ndef _copy_docstring_and_deprecators(method, func=None):\n if func is None:\n return functools.partial(_copy_docstring_and_deprecators, method)\n decorators = [docstring.copy(method)]\n # Check whether the definition of *method* includes @_api.rename_parameter\n # or @_api.make_keyword_only decorators; if so, propagate them to the\n # pyplot wrapper as well.\n while getattr(method, \"__wrapped__\", None) is not None:\n for decorator_maker, code in _code_objs.items():\n if method.__code__ is code:\n kwargs = {\n k: v.cell_contents\n for k, v in zip(code.co_freevars, method.__closure__)}\n assert kwargs[\"func\"] is method.__wrapped__\n kwargs.pop(\"func\")\n decorators.append(decorator_maker(**kwargs))\n method = method.__wrapped__\n for decorator in decorators[::-1]:\n func = decorator(func)\n return func\n\n\n## Global ##\n\n\n_IP_REGISTERED = None\n_INSTALL_FIG_OBSERVER = False\n\n\ndef install_repl_displayhook():\n \"\"\"\n Install a repl display hook so that any stale figure are automatically\n redrawn when control is returned to the repl.\n\n This works both with IPython and with vanilla python shells.\n \"\"\"\n global _IP_REGISTERED\n global _INSTALL_FIG_OBSERVER\n\n class _NotIPython(Exception):\n pass\n\n # see if we have IPython hooks around, if use them\n\n try:\n if 'IPython' in sys.modules:\n from IPython import get_ipython\n ip = get_ipython()\n if ip is None:\n raise _NotIPython()\n\n if _IP_REGISTERED:\n return\n\n def post_execute():\n if matplotlib.is_interactive():\n draw_all()\n\n # IPython >= 2\n try:\n ip.events.register('post_execute', post_execute)\n except AttributeError:\n # IPython 1.x\n ip.register_post_execute(post_execute)\n\n _IP_REGISTERED = post_execute\n _INSTALL_FIG_OBSERVER = False\n\n # trigger IPython's eventloop integration, if available\n from IPython.core.pylabtools import backend2gui\n\n ipython_gui_name = backend2gui.get(get_backend())\n if ipython_gui_name:\n ip.enable_gui(ipython_gui_name)\n else:\n _INSTALL_FIG_OBSERVER = True\n\n # import failed or ipython is not running\n except (ImportError, _NotIPython):\n _INSTALL_FIG_OBSERVER = True\n\n\ndef uninstall_repl_displayhook():\n \"\"\"\n Uninstall the matplotlib display hook.\n\n .. warning::\n\n Need IPython >= 2 for this to work. For IPython < 2 will raise a\n ``NotImplementedError``\n\n .. warning::\n\n If you are using vanilla python and have installed another\n display hook this will reset ``sys.displayhook`` to what ever\n function was there when matplotlib installed it's displayhook,\n possibly discarding your changes.\n \"\"\"\n global _IP_REGISTERED\n global _INSTALL_FIG_OBSERVER\n if _IP_REGISTERED:\n from IPython import get_ipython\n ip = get_ipython()\n try:\n ip.events.unregister('post_execute', _IP_REGISTERED)\n except AttributeError as err:\n raise NotImplementedError(\"Can not unregister events \"\n \"in IPython < 2.0\") from err\n _IP_REGISTERED = None\n\n if _INSTALL_FIG_OBSERVER:\n _INSTALL_FIG_OBSERVER = False\n\n\ndraw_all = _pylab_helpers.Gcf.draw_all\n\n\[email protected](matplotlib.set_loglevel)\ndef set_loglevel(*args, **kwargs): # Ensure this appears in the pyplot docs.\n return matplotlib.set_loglevel(*args, **kwargs)\n\n\n@_copy_docstring_and_deprecators(Artist.findobj)\ndef findobj(o=None, match=None, include_self=True):\n if o is None:\n o = gcf()\n return o.findobj(match, include_self=include_self)\n\n\ndef _get_required_interactive_framework(backend_mod):\n return getattr(\n backend_mod.FigureCanvas, \"required_interactive_framework\", None)\n\n\ndef switch_backend(newbackend):\n \"\"\"\n Close all open figures and set the Matplotlib backend.\n\n The argument is case-insensitive. Switching to an interactive backend is\n possible only if no event loop for another interactive backend has started.\n Switching to and from non-interactive backends is always possible.\n\n Parameters\n ----------\n newbackend : str\n The name of the backend to use.\n \"\"\"\n global _backend_mod\n # make sure the init is pulled up so we can assign to it later\n import matplotlib.backends\n close(\"all\")\n\n if newbackend is rcsetup._auto_backend_sentinel:\n current_framework = cbook._get_running_interactive_framework()\n mapping = {'qt5': 'qt5agg',\n 'qt4': 'qt4agg',\n 'gtk3': 'gtk3agg',\n 'wx': 'wxagg',\n 'tk': 'tkagg',\n 'macosx': 'macosx',\n 'headless': 'agg'}\n\n best_guess = mapping.get(current_framework, None)\n if best_guess is not None:\n candidates = [best_guess]\n else:\n candidates = []\n candidates += [\"macosx\", \"qt5agg\", \"gtk3agg\", \"tkagg\", \"wxagg\"]\n\n # Don't try to fallback on the cairo-based backends as they each have\n # an additional dependency (pycairo) over the agg-based backend, and\n # are of worse quality.\n for candidate in candidates:\n try:\n switch_backend(candidate)\n except ImportError:\n continue\n else:\n rcParamsOrig['backend'] = candidate\n return\n else:\n # Switching to Agg should always succeed; if it doesn't, let the\n # exception propagate out.\n switch_backend(\"agg\")\n rcParamsOrig[\"backend\"] = \"agg\"\n return\n\n # Backends are implemented as modules, but \"inherit\" default method\n # implementations from backend_bases._Backend. This is achieved by\n # creating a \"class\" that inherits from backend_bases._Backend and whose\n # body is filled with the module's globals.\n\n backend_name = cbook._backend_module_name(newbackend)\n\n class backend_mod(matplotlib.backend_bases._Backend):\n locals().update(vars(importlib.import_module(backend_name)))\n\n required_framework = _get_required_interactive_framework(backend_mod)\n if required_framework is not None:\n current_framework = cbook._get_running_interactive_framework()\n if (current_framework and required_framework\n and current_framework != required_framework):\n raise ImportError(\n \"Cannot load backend {!r} which requires the {!r} interactive \"\n \"framework, as {!r} is currently running\".format(\n newbackend, required_framework, current_framework))\n\n _log.debug(\"Loaded backend %s version %s.\",\n newbackend, backend_mod.backend_version)\n\n rcParams['backend'] = rcParamsDefault['backend'] = newbackend\n _backend_mod = backend_mod\n for func_name in [\"new_figure_manager\", \"draw_if_interactive\", \"show\"]:\n globals()[func_name].__signature__ = inspect.signature(\n getattr(backend_mod, func_name))\n\n # Need to keep a global reference to the backend for compatibility reasons.\n # See https://github.com/matplotlib/matplotlib/issues/6092\n matplotlib.backends.backend = newbackend\n\n\ndef _warn_if_gui_out_of_main_thread():\n if (_get_required_interactive_framework(_backend_mod)\n and threading.current_thread() is not threading.main_thread()):\n _api.warn_external(\n \"Starting a Matplotlib GUI outside of the main thread will likely \"\n \"fail.\")\n\n\n# This function's signature is rewritten upon backend-load by switch_backend.\ndef new_figure_manager(*args, **kwargs):\n \"\"\"Create a new figure manager instance.\"\"\"\n _warn_if_gui_out_of_main_thread()\n return _backend_mod.new_figure_manager(*args, **kwargs)\n\n\n# This function's signature is rewritten upon backend-load by switch_backend.\ndef draw_if_interactive(*args, **kwargs):\n \"\"\"\n Redraw the current figure if in interactive mode.\n\n .. warning::\n\n End users will typically not have to call this function because the\n the interactive mode takes care of this.\n \"\"\"\n return _backend_mod.draw_if_interactive(*args, **kwargs)\n\n\n# This function's signature is rewritten upon backend-load by switch_backend.\ndef show(*args, **kwargs):\n \"\"\"\n Display all open figures.\n\n Parameters\n ----------\n block : bool, optional\n Whether to wait for all figures to be closed before returning.\n\n If `True` block and run the GUI main loop until all figure windows\n are closed.\n\n If `False` ensure that all figure windows are displayed and return\n immediately. In this case, you are responsible for ensuring\n that the event loop is running to have responsive figures.\n\n Defaults to True in non-interactive mode and to False in interactive\n mode (see `.pyplot.isinteractive`).\n\n See Also\n --------\n ion : Enable interactive mode, which shows / updates the figure after\n every plotting command, so that calling ``show()`` is not necessary.\n ioff : Disable interactive mode.\n savefig : Save the figure to an image file instead of showing it on screen.\n\n Notes\n -----\n **Saving figures to file and showing a window at the same time**\n\n If you want an image file as well as a user interface window, use\n `.pyplot.savefig` before `.pyplot.show`. At the end of (a blocking)\n ``show()`` the figure is closed and thus unregistered from pyplot. Calling\n `.pyplot.savefig` afterwards would save a new and thus empty figure. This\n limitation of command order does not apply if the show is non-blocking or\n if you keep a reference to the figure and use `.Figure.savefig`.\n\n **Auto-show in jupyter notebooks**\n\n The jupyter backends (activated via ``%matplotlib inline``,\n ``%matplotlib notebook``, or ``%matplotlib widget``), call ``show()`` at\n the end of every cell by default. Thus, you usually don't have to call it\n explicitly there.\n \"\"\"\n _warn_if_gui_out_of_main_thread()\n return _backend_mod.show(*args, **kwargs)\n\n\ndef isinteractive():\n \"\"\"\n Return whether plots are updated after every plotting command.\n\n The interactive mode is mainly useful if you build plots from the command\n line and want to see the effect of each command while you are building the\n figure.\n\n In interactive mode:\n\n - newly created figures will be shown immediately;\n - figures will automatically redraw on change;\n - `.pyplot.show` will not block by default.\n\n In non-interactive mode:\n\n - newly created figures and changes to figures will not be reflected until\n explicitly asked to be;\n - `.pyplot.show` will block by default.\n\n See Also\n --------\n ion : Enable interactive mode.\n ioff : Disable interactive mode.\n show : Show all figures (and maybe block).\n pause : Show all figures, and block for a time.\n \"\"\"\n return matplotlib.is_interactive()\n\n\nclass _IoffContext:\n \"\"\"\n Context manager for `.ioff`.\n\n The state is changed in ``__init__()`` instead of ``__enter__()``. The\n latter is a no-op. This allows using `.ioff` both as a function and\n as a context.\n \"\"\"\n\n def __init__(self):\n self.wasinteractive = isinteractive()\n matplotlib.interactive(False)\n uninstall_repl_displayhook()\n\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_value, traceback):\n if self.wasinteractive:\n matplotlib.interactive(True)\n install_repl_displayhook()\n else:\n matplotlib.interactive(False)\n uninstall_repl_displayhook()\n\n\nclass _IonContext:\n \"\"\"\n Context manager for `.ion`.\n\n The state is changed in ``__init__()`` instead of ``__enter__()``. The\n latter is a no-op. This allows using `.ion` both as a function and\n as a context.\n \"\"\"\n\n def __init__(self):\n self.wasinteractive = isinteractive()\n matplotlib.interactive(True)\n install_repl_displayhook()\n\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_value, traceback):\n if not self.wasinteractive:\n matplotlib.interactive(False)\n uninstall_repl_displayhook()\n else:\n matplotlib.interactive(True)\n install_repl_displayhook()\n\n\ndef ioff():\n \"\"\"\n Disable interactive mode.\n\n See `.pyplot.isinteractive` for more details.\n\n See Also\n --------\n ion : Enable interactive mode.\n isinteractive : Whether interactive mode is enabled.\n show : Show all figures (and maybe block).\n pause : Show all figures, and block for a time.\n\n Notes\n -----\n For a temporary change, this can be used as a context manager::\n\n # if interactive mode is on\n # then figures will be shown on creation\n plt.ion()\n # This figure will be shown immediately\n fig = plt.figure()\n\n with plt.ioff():\n # interactive mode will be off\n # figures will not automatically be shown\n fig2 = plt.figure()\n # ...\n\n To enable usage as a context manager, this function returns an\n ``_IoffContext`` object. The return value is not intended to be stored\n or accessed by the user.\n \"\"\"\n return _IoffContext()\n\n\ndef ion():\n \"\"\"\n Enable interactive mode.\n\n See `.pyplot.isinteractive` for more details.\n\n See Also\n --------\n ioff : Disable interactive mode.\n isinteractive : Whether interactive mode is enabled.\n show : Show all figures (and maybe block).\n pause : Show all figures, and block for a time.\n\n Notes\n -----\n For a temporary change, this can be used as a context manager::\n\n # if interactive mode is off\n # then figures will not be shown on creation\n plt.ioff()\n # This figure will not be shown immediately\n fig = plt.figure()\n\n with plt.ion():\n # interactive mode will be on\n # figures will automatically be shown\n fig2 = plt.figure()\n # ...\n\n To enable usage as a context manager, this function returns an\n ``_IonContext`` object. The return value is not intended to be stored\n or accessed by the user.\n \"\"\"\n return _IonContext()\n\n\ndef pause(interval):\n \"\"\"\n Run the GUI event loop for *interval* seconds.\n\n If there is an active figure, it will be updated and displayed before the\n pause, and the GUI event loop (if any) will run during the pause.\n\n This can be used for crude animation. For more complex animation use\n :mod:`matplotlib.animation`.\n\n If there is no active figure, sleep for *interval* seconds instead.\n\n See Also\n --------\n matplotlib.animation : Proper animations\n show : Show all figures and optional block until all figures are closed.\n \"\"\"\n manager = _pylab_helpers.Gcf.get_active()\n if manager is not None:\n canvas = manager.canvas\n if canvas.figure.stale:\n canvas.draw_idle()\n show(block=False)\n canvas.start_event_loop(interval)\n else:\n time.sleep(interval)\n\n\n@_copy_docstring_and_deprecators(matplotlib.rc)\ndef rc(group, **kwargs):\n matplotlib.rc(group, **kwargs)\n\n\n@_copy_docstring_and_deprecators(matplotlib.rc_context)\ndef rc_context(rc=None, fname=None):\n return matplotlib.rc_context(rc, fname)\n\n\n@_copy_docstring_and_deprecators(matplotlib.rcdefaults)\ndef rcdefaults():\n matplotlib.rcdefaults()\n if matplotlib.is_interactive():\n draw_all()\n\n\n# getp/get/setp are explicitly reexported so that they show up in pyplot docs.\n\n\n@_copy_docstring_and_deprecators(matplotlib.artist.getp)\ndef getp(obj, *args, **kwargs):\n return matplotlib.artist.getp(obj, *args, **kwargs)\n\n\n@_copy_docstring_and_deprecators(matplotlib.artist.get)\ndef get(obj, *args, **kwargs):\n return matplotlib.artist.get(obj, *args, **kwargs)\n\n\n@_copy_docstring_and_deprecators(matplotlib.artist.setp)\ndef setp(obj, *args, **kwargs):\n return matplotlib.artist.setp(obj, *args, **kwargs)\n\n\ndef xkcd(scale=1, length=100, randomness=2):\n \"\"\"\n Turn on `xkcd <https://xkcd.com/>`_ sketch-style drawing mode. This will\n only have effect on things drawn after this function is called.\n\n For best results, the \"Humor Sans\" font should be installed: it is\n not included with Matplotlib.\n\n Parameters\n ----------\n scale : float, optional\n The amplitude of the wiggle perpendicular to the source line.\n length : float, optional\n The length of the wiggle along the line.\n randomness : float, optional\n The scale factor by which the length is shrunken or expanded.\n\n Notes\n -----\n This function works by a number of rcParams, so it will probably\n override others you have set before.\n\n If you want the effects of this function to be temporary, it can\n be used as a context manager, for example::\n\n with plt.xkcd():\n # This figure will be in XKCD-style\n fig1 = plt.figure()\n # ...\n\n # This figure will be in regular style\n fig2 = plt.figure()\n \"\"\"\n return _xkcd(scale, length, randomness)\n\n\nclass _xkcd:\n # This cannot be implemented in terms of rc_context() because this needs to\n # work as a non-contextmanager too.\n\n def __init__(self, scale, length, randomness):\n self._orig = rcParams.copy()\n\n if rcParams['text.usetex']:\n raise RuntimeError(\n \"xkcd mode is not compatible with text.usetex = True\")\n\n from matplotlib import patheffects\n rcParams.update({\n 'font.family': ['xkcd', 'xkcd Script', 'Humor Sans', 'Comic Neue',\n 'Comic Sans MS'],\n 'font.size': 14.0,\n 'path.sketch': (scale, length, randomness),\n 'path.effects': [\n patheffects.withStroke(linewidth=4, foreground=\"w\")],\n 'axes.linewidth': 1.5,\n 'lines.linewidth': 2.0,\n 'figure.facecolor': 'white',\n 'grid.linewidth': 0.0,\n 'axes.grid': False,\n 'axes.unicode_minus': False,\n 'axes.edgecolor': 'black',\n 'xtick.major.size': 8,\n 'xtick.major.width': 3,\n 'ytick.major.size': 8,\n 'ytick.major.width': 3,\n })\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n dict.update(rcParams, self._orig)\n\n\n## Figures ##\n\ndef figure(num=None, # autoincrement if None, else integer from 1-N\n figsize=None, # defaults to rc figure.figsize\n dpi=None, # defaults to rc figure.dpi\n facecolor=None, # defaults to rc figure.facecolor\n edgecolor=None, # defaults to rc figure.edgecolor\n frameon=True,\n FigureClass=Figure,\n clear=False,\n **kwargs\n ):\n \"\"\"\n Create a new figure, or activate an existing figure.\n\n Parameters\n ----------\n num : int or str or `.Figure`, optional\n A unique identifier for the figure.\n\n If a figure with that identifier already exists, this figure is made\n active and returned. An integer refers to the ``Figure.number``\n attribute, a string refers to the figure label.\n\n If there is no figure with the identifier or *num* is not given, a new\n figure is created, made active and returned. If *num* is an int, it\n will be used for the ``Figure.number`` attribute, otherwise, an\n auto-generated integer value is used (starting at 1 and incremented\n for each new figure). If *num* is a string, the figure label and the\n window title is set to this value.\n\n figsize : (float, float), default: :rc:`figure.figsize`\n Width, height in inches.\n\n dpi : float, default: :rc:`figure.dpi`\n The resolution of the figure in dots-per-inch.\n\n facecolor : color, default: :rc:`figure.facecolor`\n The background color.\n\n edgecolor : color, default: :rc:`figure.edgecolor`\n The border color.\n\n frameon : bool, default: True\n If False, suppress drawing the figure frame.\n\n FigureClass : subclass of `~matplotlib.figure.Figure`\n Optionally use a custom `.Figure` instance.\n\n clear : bool, default: False\n If True and the figure already exists, then it is cleared.\n\n tight_layout : bool or dict, default: :rc:`figure.autolayout`\n If ``False`` use *subplotpars*. If ``True`` adjust subplot\n parameters using `.tight_layout` with default padding.\n When providing a dict containing the keys ``pad``, ``w_pad``,\n ``h_pad``, and ``rect``, the default `.tight_layout` paddings\n will be overridden.\n\n constrained_layout : bool, default: :rc:`figure.constrained_layout.use`\n If ``True`` use constrained layout to adjust positioning of plot\n elements. Like ``tight_layout``, but designed to be more\n flexible. See\n :doc:`/tutorials/intermediate/constrainedlayout_guide`\n for examples. (Note: does not work with `add_subplot` or\n `~.pyplot.subplot2grid`.)\n\n\n **kwargs : optional\n See `~.matplotlib.figure.Figure` for other possible arguments.\n\n Returns\n -------\n `~matplotlib.figure.Figure`\n The `.Figure` instance returned will also be passed to\n new_figure_manager in the backends, which allows to hook custom\n `.Figure` classes into the pyplot interface. Additional kwargs will be\n passed to the `.Figure` init function.\n\n Notes\n -----\n If you are creating many figures, make sure you explicitly call\n `.pyplot.close` on the figures you are not using, because this will\n enable pyplot to properly clean up the memory.\n\n `~matplotlib.rcParams` defines the default values, which can be modified\n in the matplotlibrc file.\n \"\"\"\n if isinstance(num, Figure):\n if num.canvas.manager is None:\n raise ValueError(\"The passed figure is not managed by pyplot\")\n _pylab_helpers.Gcf.set_active(num.canvas.manager)\n return num\n\n allnums = get_fignums()\n next_num = max(allnums) + 1 if allnums else 1\n fig_label = ''\n if num is None:\n num = next_num\n elif isinstance(num, str):\n fig_label = num\n all_labels = get_figlabels()\n if fig_label not in all_labels:\n if fig_label == 'all':\n _api.warn_external(\"close('all') closes all existing figures.\")\n num = next_num\n else:\n inum = all_labels.index(fig_label)\n num = allnums[inum]\n else:\n num = int(num) # crude validation of num argument\n\n manager = _pylab_helpers.Gcf.get_fig_manager(num)\n if manager is None:\n max_open_warning = rcParams['figure.max_open_warning']\n if len(allnums) == max_open_warning >= 1:\n _api.warn_external(\n f\"More than {max_open_warning} figures have been opened. \"\n f\"Figures created through the pyplot interface \"\n f\"(`matplotlib.pyplot.figure`) are retained until explicitly \"\n f\"closed and may consume too much memory. (To control this \"\n f\"warning, see the rcParam `figure.max_open_warning`).\",\n RuntimeWarning)\n\n manager = new_figure_manager(\n num, figsize=figsize, dpi=dpi,\n facecolor=facecolor, edgecolor=edgecolor, frameon=frameon,\n FigureClass=FigureClass, **kwargs)\n fig = manager.canvas.figure\n if fig_label:\n fig.set_label(fig_label)\n\n _pylab_helpers.Gcf._set_new_active_manager(manager)\n\n # make sure backends (inline) that we don't ship that expect this\n # to be called in plotting commands to make the figure call show\n # still work. There is probably a better way to do this in the\n # FigureManager base class.\n draw_if_interactive()\n\n if _INSTALL_FIG_OBSERVER:\n fig.stale_callback = _auto_draw_if_interactive\n\n if clear:\n manager.canvas.figure.clear()\n\n return manager.canvas.figure\n\n\ndef _auto_draw_if_interactive(fig, val):\n \"\"\"\n An internal helper function for making sure that auto-redrawing\n works as intended in the plain python repl.\n\n Parameters\n ----------\n fig : Figure\n A figure object which is assumed to be associated with a canvas\n \"\"\"\n if (val and matplotlib.is_interactive()\n and not fig.canvas.is_saving()\n and not fig.canvas._is_idle_drawing):\n # Some artists can mark themselves as stale in the middle of drawing\n # (e.g. axes position & tick labels being computed at draw time), but\n # this shouldn't trigger a redraw because the current redraw will\n # already take them into account.\n with fig.canvas._idle_draw_cntx():\n fig.canvas.draw_idle()\n\n\ndef gcf():\n \"\"\"\n Get the current figure.\n\n If no current figure exists, a new one is created using\n `~.pyplot.figure()`.\n \"\"\"\n manager = _pylab_helpers.Gcf.get_active()\n if manager is not None:\n return manager.canvas.figure\n else:\n return figure()\n\n\ndef fignum_exists(num):\n \"\"\"Return whether the figure with the given id exists.\"\"\"\n return _pylab_helpers.Gcf.has_fignum(num) or num in get_figlabels()\n\n\ndef get_fignums():\n \"\"\"Return a list of existing figure numbers.\"\"\"\n return sorted(_pylab_helpers.Gcf.figs)\n\n\ndef get_figlabels():\n \"\"\"Return a list of existing figure labels.\"\"\"\n managers = _pylab_helpers.Gcf.get_all_fig_managers()\n managers.sort(key=lambda m: m.num)\n return [m.canvas.figure.get_label() for m in managers]\n\n\ndef get_current_fig_manager():\n \"\"\"\n Return the figure manager of the current figure.\n\n The figure manager is a container for the actual backend-depended window\n that displays the figure on screen.\n\n If no current figure exists, a new one is created, and its figure\n manager is returned.\n\n Returns\n -------\n `.FigureManagerBase` or backend-dependent subclass thereof\n \"\"\"\n return gcf().canvas.manager\n\n\n@_copy_docstring_and_deprecators(FigureCanvasBase.mpl_connect)\ndef connect(s, func):\n return gcf().canvas.mpl_connect(s, func)\n\n\n@_copy_docstring_and_deprecators(FigureCanvasBase.mpl_disconnect)\ndef disconnect(cid):\n return gcf().canvas.mpl_disconnect(cid)\n\n\ndef close(fig=None):\n \"\"\"\n Close a figure window.\n\n Parameters\n ----------\n fig : None or int or str or `.Figure`\n The figure to close. There are a number of ways to specify this:\n\n - *None*: the current figure\n - `.Figure`: the given `.Figure` instance\n - ``int``: a figure number\n - ``str``: a figure name\n - 'all': all figures\n\n \"\"\"\n if fig is None:\n manager = _pylab_helpers.Gcf.get_active()\n if manager is None:\n return\n else:\n _pylab_helpers.Gcf.destroy(manager)\n elif fig == 'all':\n _pylab_helpers.Gcf.destroy_all()\n elif isinstance(fig, int):\n _pylab_helpers.Gcf.destroy(fig)\n elif hasattr(fig, 'int'):\n # if we are dealing with a type UUID, we\n # can use its integer representation\n _pylab_helpers.Gcf.destroy(fig.int)\n elif isinstance(fig, str):\n all_labels = get_figlabels()\n if fig in all_labels:\n num = get_fignums()[all_labels.index(fig)]\n _pylab_helpers.Gcf.destroy(num)\n elif isinstance(fig, Figure):\n _pylab_helpers.Gcf.destroy_fig(fig)\n else:\n raise TypeError(\"close() argument must be a Figure, an int, a string, \"\n \"or None, not %s\" % type(fig))\n\n\ndef clf():\n \"\"\"Clear the current figure.\"\"\"\n gcf().clf()\n\n\ndef draw():\n \"\"\"\n Redraw the current figure.\n\n This is used to update a figure that has been altered, but not\n automatically re-drawn. If interactive mode is on (via `.ion()`), this\n should be only rarely needed, but there may be ways to modify the state of\n a figure without marking it as \"stale\". Please report these cases as bugs.\n\n This is equivalent to calling ``fig.canvas.draw_idle()``, where ``fig`` is\n the current figure.\n \"\"\"\n gcf().canvas.draw_idle()\n\n\n@_copy_docstring_and_deprecators(Figure.savefig)\ndef savefig(*args, **kwargs):\n fig = gcf()\n res = fig.savefig(*args, **kwargs)\n fig.canvas.draw_idle() # need this if 'transparent=True' to reset colors\n return res\n\n\n## Putting things in figures ##\n\n\ndef figlegend(*args, **kwargs):\n return gcf().legend(*args, **kwargs)\nif Figure.legend.__doc__:\n figlegend.__doc__ = Figure.legend.__doc__.replace(\"legend(\", \"figlegend(\")\n\n\n## Axes ##\n\[email protected]_interpd\ndef axes(arg=None, **kwargs):\n \"\"\"\n Add an axes to the current figure and make it the current axes.\n\n Call signatures::\n\n plt.axes()\n plt.axes(rect, projection=None, polar=False, **kwargs)\n plt.axes(ax)\n\n Parameters\n ----------\n arg : None or 4-tuple\n The exact behavior of this function depends on the type:\n\n - *None*: A new full window axes is added using\n ``subplot(**kwargs)``.\n - 4-tuple of floats *rect* = ``[left, bottom, width, height]``.\n A new axes is added with dimensions *rect* in normalized\n (0, 1) units using `~.Figure.add_axes` on the current figure.\n\n projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \\\n'polar', 'rectilinear', str}, optional\n The projection type of the `~.axes.Axes`. *str* is the name of\n a custom projection, see `~matplotlib.projections`. The default\n None results in a 'rectilinear' projection.\n\n polar : bool, default: False\n If True, equivalent to projection='polar'.\n\n sharex, sharey : `~.axes.Axes`, optional\n Share the x or y `~matplotlib.axis` with sharex and/or sharey.\n The axis will have the same limits, ticks, and scale as the axis\n of the shared axes.\n\n label : str\n A label for the returned axes.\n\n Returns\n -------\n `~.axes.Axes`, or a subclass of `~.axes.Axes`\n The returned axes class depends on the projection used. It is\n `~.axes.Axes` if rectilinear projection is used and\n `.projections.polar.PolarAxes` if polar projection is used.\n\n Other Parameters\n ----------------\n **kwargs\n This method also takes the keyword arguments for\n the returned axes class. The keyword arguments for the\n rectilinear axes class `~.axes.Axes` can be found in\n the following table but there might also be other keyword\n arguments if another projection is used, see the actual axes\n class.\n\n %(Axes_kwdoc)s\n\n Notes\n -----\n If the figure already has a axes with key (*args*,\n *kwargs*) then it will simply make that axes current and\n return it. This behavior is deprecated. Meanwhile, if you do\n not want this behavior (i.e., you want to force the creation of a\n new axes), you must use a unique set of args and kwargs. The axes\n *label* attribute has been exposed for this purpose: if you want\n two axes that are otherwise identical to be added to the figure,\n make sure you give them unique labels.\n\n See Also\n --------\n .Figure.add_axes\n .pyplot.subplot\n .Figure.add_subplot\n .Figure.subplots\n .pyplot.subplots\n\n Examples\n --------\n ::\n\n # Creating a new full window axes\n plt.axes()\n\n # Creating a new axes with specified dimensions and some kwargs\n plt.axes((left, bottom, width, height), facecolor='w')\n \"\"\"\n fig = gcf()\n if arg is None:\n return fig.add_subplot(**kwargs)\n else:\n return fig.add_axes(arg, **kwargs)\n\n\ndef delaxes(ax=None):\n \"\"\"\n Remove an `~.axes.Axes` (defaulting to the current axes) from its figure.\n \"\"\"\n if ax is None:\n ax = gca()\n ax.remove()\n\n\ndef sca(ax):\n \"\"\"\n Set the current Axes to *ax* and the current Figure to the parent of *ax*.\n \"\"\"\n figure(ax.figure)\n ax.figure.sca(ax)\n\n\ndef cla():\n \"\"\"Clear the current axes.\"\"\"\n # Not generated via boilerplate.py to allow a different docstring.\n return gca().cla()\n\n\n## More ways of creating axes ##\n\[email protected]_interpd\ndef subplot(*args, **kwargs):\n \"\"\"\n Add an Axes to the current figure or retrieve an existing Axes.\n\n This is a wrapper of `.Figure.add_subplot` which provides additional\n behavior when working with the implicit API (see the notes section).\n\n Call signatures::\n\n subplot(nrows, ncols, index, **kwargs)\n subplot(pos, **kwargs)\n subplot(**kwargs)\n subplot(ax)\n\n Parameters\n ----------\n *args : int, (int, int, *index*), or `.SubplotSpec`, default: (1, 1, 1)\n The position of the subplot described by one of\n\n - Three integers (*nrows*, *ncols*, *index*). The subplot will take the\n *index* position on a grid with *nrows* rows and *ncols* columns.\n *index* starts at 1 in the upper left corner and increases to the\n right. *index* can also be a two-tuple specifying the (*first*,\n *last*) indices (1-based, and including *last*) of the subplot, e.g.,\n ``fig.add_subplot(3, 1, (1, 2))`` makes a subplot that spans the\n upper 2/3 of the figure.\n - A 3-digit integer. The digits are interpreted as if given separately\n as three single-digit integers, i.e. ``fig.add_subplot(235)`` is the\n same as ``fig.add_subplot(2, 3, 5)``. Note that this can only be used\n if there are no more than 9 subplots.\n - A `.SubplotSpec`.\n\n projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \\\n'polar', 'rectilinear', str}, optional\n The projection type of the subplot (`~.axes.Axes`). *str* is the name\n of a custom projection, see `~matplotlib.projections`. The default\n None results in a 'rectilinear' projection.\n\n polar : bool, default: False\n If True, equivalent to projection='polar'.\n\n sharex, sharey : `~.axes.Axes`, optional\n Share the x or y `~matplotlib.axis` with sharex and/or sharey. The\n axis will have the same limits, ticks, and scale as the axis of the\n shared axes.\n\n label : str\n A label for the returned axes.\n\n Returns\n -------\n `.axes.SubplotBase`, or another subclass of `~.axes.Axes`\n\n The axes of the subplot. The returned axes base class depends on\n the projection used. It is `~.axes.Axes` if rectilinear projection\n is used and `.projections.polar.PolarAxes` if polar projection\n is used. The returned axes is then a subplot subclass of the\n base class.\n\n Other Parameters\n ----------------\n **kwargs\n This method also takes the keyword arguments for the returned axes\n base class; except for the *figure* argument. The keyword arguments\n for the rectilinear base class `~.axes.Axes` can be found in\n the following table but there might also be other keyword\n arguments if another projection is used.\n\n %(Axes_kwdoc)s\n\n Notes\n -----\n Creating a new Axes will delete any pre-existing Axes that\n overlaps with it beyond sharing a boundary::\n\n import matplotlib.pyplot as plt\n # plot a line, implicitly creating a subplot(111)\n plt.plot([1, 2, 3])\n # now create a subplot which represents the top plot of a grid\n # with 2 rows and 1 column. Since this subplot will overlap the\n # first, the plot (and its axes) previously created, will be removed\n plt.subplot(211)\n\n If you do not want this behavior, use the `.Figure.add_subplot` method\n or the `.pyplot.axes` function instead.\n\n If no *kwargs* are passed and there exists an Axes in the location\n specified by *args* then that Axes will be returned rather than a new\n Axes being created.\n\n If *kwargs* are passed and there exists an Axes in the location\n specified by *args*, the projection type is the same, and the\n *kwargs* match with the existing Axes, then the existing Axes is\n returned. Otherwise a new Axes is created with the specified\n parameters. We save a reference to the *kwargs* which we use\n for this comparison. If any of the values in *kwargs* are\n mutable we will not detect the case where they are mutated.\n In these cases we suggest using `.Figure.add_subplot` and the\n explicit Axes API rather than the implicit pyplot API.\n\n See Also\n --------\n .Figure.add_subplot\n .pyplot.subplots\n .pyplot.axes\n .Figure.subplots\n\n Examples\n --------\n ::\n\n plt.subplot(221)\n\n # equivalent but more general\n ax1 = plt.subplot(2, 2, 1)\n\n # add a subplot with no frame\n ax2 = plt.subplot(222, frameon=False)\n\n # add a polar subplot\n plt.subplot(223, projection='polar')\n\n # add a red subplot that shares the x-axis with ax1\n plt.subplot(224, sharex=ax1, facecolor='red')\n\n # delete ax2 from the figure\n plt.delaxes(ax2)\n\n # add ax2 to the figure again\n plt.subplot(ax2)\n\n # make the first axes \"current\" again\n plt.subplot(221)\n\n \"\"\"\n # Here we will only normalize `polar=True` vs `projection='polar'` and let\n # downstream code deal with the rest.\n unset = object()\n projection = kwargs.get('projection', unset)\n polar = kwargs.pop('polar', unset)\n if polar is not unset and polar:\n # if we got mixed messages from the user, raise\n if projection is not unset and projection != 'polar':\n raise ValueError(\n f\"polar={polar}, yet projection={projection!r}. \"\n \"Only one of these arguments should be supplied.\"\n )\n kwargs['projection'] = projection = 'polar'\n\n # if subplot called without arguments, create subplot(1, 1, 1)\n if len(args) == 0:\n args = (1, 1, 1)\n\n # This check was added because it is very easy to type subplot(1, 2, False)\n # when subplots(1, 2, False) was intended (sharex=False, that is). In most\n # cases, no error will ever occur, but mysterious behavior can result\n # because what was intended to be the sharex argument is instead treated as\n # a subplot index for subplot()\n if len(args) >= 3 and isinstance(args[2], bool):\n _api.warn_external(\"The subplot index argument to subplot() appears \"\n \"to be a boolean. Did you intend to use \"\n \"subplots()?\")\n # Check for nrows and ncols, which are not valid subplot args:\n if 'nrows' in kwargs or 'ncols' in kwargs:\n raise TypeError(\"subplot() got an unexpected keyword argument 'ncols' \"\n \"and/or 'nrows'. Did you intend to call subplots()?\")\n\n fig = gcf()\n\n # First, search for an existing subplot with a matching spec.\n key = SubplotSpec._from_subplot_args(fig, args)\n\n for ax in fig.axes:\n # if we found an axes at the position sort out if we can re-use it\n if hasattr(ax, 'get_subplotspec') and ax.get_subplotspec() == key:\n # if the user passed no kwargs, re-use\n if kwargs == {}:\n break\n # if the axes class and kwargs are identical, reuse\n elif ax._projection_init == fig._process_projection_requirements(\n *args, **kwargs\n ):\n break\n else:\n # we have exhausted the known Axes and none match, make a new one!\n ax = fig.add_subplot(*args, **kwargs)\n\n fig.sca(ax)\n\n bbox = ax.bbox\n axes_to_delete = []\n for other_ax in fig.axes:\n if other_ax == ax:\n continue\n if bbox.fully_overlaps(other_ax.bbox):\n axes_to_delete.append(other_ax)\n for ax_to_del in axes_to_delete:\n delaxes(ax_to_del)\n\n return ax\n\n\n@_api.make_keyword_only(\"3.3\", \"sharex\")\ndef subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,\n subplot_kw=None, gridspec_kw=None, **fig_kw):\n \"\"\"\n Create a figure and a set of subplots.\n\n This utility wrapper makes it convenient to create common layouts of\n subplots, including the enclosing figure object, in a single call.\n\n Parameters\n ----------\n nrows, ncols : int, default: 1\n Number of rows/columns of the subplot grid.\n\n sharex, sharey : bool or {'none', 'all', 'row', 'col'}, default: False\n Controls sharing of properties among x (*sharex*) or y (*sharey*)\n axes:\n\n - True or 'all': x- or y-axis will be shared among all subplots.\n - False or 'none': each subplot x- or y-axis will be independent.\n - 'row': each subplot row will share an x- or y-axis.\n - 'col': each subplot column will share an x- or y-axis.\n\n When subplots have a shared x-axis along a column, only the x tick\n labels of the bottom subplot are created. Similarly, when subplots\n have a shared y-axis along a row, only the y tick labels of the first\n column subplot are created. To later turn other subplots' ticklabels\n on, use `~matplotlib.axes.Axes.tick_params`.\n\n When subplots have a shared axis that has units, calling\n `~matplotlib.axis.Axis.set_units` will update each axis with the\n new units.\n\n squeeze : bool, default: True\n - If True, extra dimensions are squeezed out from the returned\n array of `~matplotlib.axes.Axes`:\n\n - if only one subplot is constructed (nrows=ncols=1), the\n resulting single Axes object is returned as a scalar.\n - for Nx1 or 1xM subplots, the returned object is a 1D numpy\n object array of Axes objects.\n - for NxM, subplots with N>1 and M>1 are returned as a 2D array.\n\n - If False, no squeezing at all is done: the returned Axes object is\n always a 2D array containing Axes instances, even if it ends up\n being 1x1.\n\n subplot_kw : dict, optional\n Dict with keywords passed to the\n `~matplotlib.figure.Figure.add_subplot` call used to create each\n subplot.\n\n gridspec_kw : dict, optional\n Dict with keywords passed to the `~matplotlib.gridspec.GridSpec`\n constructor used to create the grid the subplots are placed on.\n\n **fig_kw\n All additional keyword arguments are passed to the\n `.pyplot.figure` call.\n\n Returns\n -------\n fig : `~.figure.Figure`\n\n ax : `.axes.Axes` or array of Axes\n *ax* can be either a single `~matplotlib.axes.Axes` object or an\n array of Axes objects if more than one subplot was created. The\n dimensions of the resulting array can be controlled with the squeeze\n keyword, see above.\n\n Typical idioms for handling the return value are::\n\n # using the variable ax for single a Axes\n fig, ax = plt.subplots()\n\n # using the variable axs for multiple Axes\n fig, axs = plt.subplots(2, 2)\n\n # using tuple unpacking for multiple Axes\n fig, (ax1, ax2) = plt.subplots(1, 2)\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n\n The names ``ax`` and pluralized ``axs`` are preferred over ``axes``\n because for the latter it's not clear if it refers to a single\n `~.axes.Axes` instance or a collection of these.\n\n See Also\n --------\n .pyplot.figure\n .pyplot.subplot\n .pyplot.axes\n .Figure.subplots\n .Figure.add_subplot\n\n Examples\n --------\n ::\n\n # First create some toy data:\n x = np.linspace(0, 2*np.pi, 400)\n y = np.sin(x**2)\n\n # Create just a figure and only one subplot\n fig, ax = plt.subplots()\n ax.plot(x, y)\n ax.set_title('Simple plot')\n\n # Create two subplots and unpack the output array immediately\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n ax1.plot(x, y)\n ax1.set_title('Sharing Y axis')\n ax2.scatter(x, y)\n\n # Create four polar axes and access them through the returned array\n fig, axs = plt.subplots(2, 2, subplot_kw=dict(projection=\"polar\"))\n axs[0, 0].plot(x, y)\n axs[1, 1].scatter(x, y)\n\n # Share a X axis with each column of subplots\n plt.subplots(2, 2, sharex='col')\n\n # Share a Y axis with each row of subplots\n plt.subplots(2, 2, sharey='row')\n\n # Share both X and Y axes with all subplots\n plt.subplots(2, 2, sharex='all', sharey='all')\n\n # Note that this is the same as\n plt.subplots(2, 2, sharex=True, sharey=True)\n\n # Create figure number 10 with a single subplot\n # and clears it if it already exists.\n fig, ax = plt.subplots(num=10, clear=True)\n\n \"\"\"\n fig = figure(**fig_kw)\n axs = fig.subplots(nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey,\n squeeze=squeeze, subplot_kw=subplot_kw,\n gridspec_kw=gridspec_kw)\n return fig, axs\n\n\ndef subplot_mosaic(mosaic, *, subplot_kw=None, gridspec_kw=None,\n empty_sentinel='.', **fig_kw):\n \"\"\"\n Build a layout of Axes based on ASCII art or nested lists.\n\n This is a helper function to build complex GridSpec layouts visually.\n\n .. note ::\n\n This API is provisional and may be revised in the future based on\n early user feedback.\n\n\n Parameters\n ----------\n mosaic : list of list of {hashable or nested} or str\n\n A visual layout of how you want your Axes to be arranged\n labeled as strings. For example ::\n\n x = [['A panel', 'A panel', 'edge'],\n ['C panel', '.', 'edge']]\n\n Produces 4 axes:\n\n - 'A panel' which is 1 row high and spans the first two columns\n - 'edge' which is 2 rows high and is on the right edge\n - 'C panel' which in 1 row and 1 column wide in the bottom left\n - a blank space 1 row and 1 column wide in the bottom center\n\n Any of the entries in the layout can be a list of lists\n of the same form to create nested layouts.\n\n If input is a str, then it must be of the form ::\n\n '''\n AAE\n C.E\n '''\n\n where each character is a column and each line is a row.\n This only allows only single character Axes labels and does\n not allow nesting but is very terse.\n\n subplot_kw : dict, optional\n Dictionary with keywords passed to the `.Figure.add_subplot` call\n used to create each subplot.\n\n gridspec_kw : dict, optional\n Dictionary with keywords passed to the `.GridSpec` constructor used\n to create the grid the subplots are placed on.\n\n empty_sentinel : object, optional\n Entry in the layout to mean \"leave this space empty\". Defaults\n to ``'.'``. Note, if *layout* is a string, it is processed via\n `inspect.cleandoc` to remove leading white space, which may\n interfere with using white-space as the empty sentinel.\n\n **fig_kw\n All additional keyword arguments are passed to the\n `.pyplot.figure` call.\n\n Returns\n -------\n fig : `~.figure.Figure`\n The new figure\n\n dict[label, Axes]\n A dictionary mapping the labels to the Axes objects. The order of\n the axes is left-to-right and top-to-bottom of their position in the\n total layout.\n\n \"\"\"\n fig = figure(**fig_kw)\n ax_dict = fig.subplot_mosaic(\n mosaic,\n subplot_kw=subplot_kw,\n gridspec_kw=gridspec_kw,\n empty_sentinel=empty_sentinel\n )\n return fig, ax_dict\n\n\ndef subplot2grid(shape, loc, rowspan=1, colspan=1, fig=None, **kwargs):\n \"\"\"\n Create a subplot at a specific location inside a regular grid.\n\n Parameters\n ----------\n shape : (int, int)\n Number of rows and of columns of the grid in which to place axis.\n loc : (int, int)\n Row number and column number of the axis location within the grid.\n rowspan : int, default: 1\n Number of rows for the axis to span downwards.\n colspan : int, default: 1\n Number of columns for the axis to span to the right.\n fig : `.Figure`, optional\n Figure to place the subplot in. Defaults to the current figure.\n **kwargs\n Additional keyword arguments are handed to `~.Figure.add_subplot`.\n\n Returns\n -------\n `.axes.SubplotBase`, or another subclass of `~.axes.Axes`\n\n The axes of the subplot. The returned axes base class depends on the\n projection used. It is `~.axes.Axes` if rectilinear projection is used\n and `.projections.polar.PolarAxes` if polar projection is used. The\n returned axes is then a subplot subclass of the base class.\n\n Notes\n -----\n The following call ::\n\n ax = subplot2grid((nrows, ncols), (row, col), rowspan, colspan)\n\n is identical to ::\n\n fig = gcf()\n gs = fig.add_gridspec(nrows, ncols)\n ax = fig.add_subplot(gs[row:row+rowspan, col:col+colspan])\n \"\"\"\n\n if fig is None:\n fig = gcf()\n\n rows, cols = shape\n gs = GridSpec._check_gridspec_exists(fig, rows, cols)\n\n subplotspec = gs.new_subplotspec(loc, rowspan=rowspan, colspan=colspan)\n ax = fig.add_subplot(subplotspec, **kwargs)\n bbox = ax.bbox\n axes_to_delete = []\n for other_ax in fig.axes:\n if other_ax == ax:\n continue\n if bbox.fully_overlaps(other_ax.bbox):\n axes_to_delete.append(other_ax)\n for ax_to_del in axes_to_delete:\n delaxes(ax_to_del)\n\n return ax\n\n\ndef twinx(ax=None):\n \"\"\"\n Make and return a second axes that shares the *x*-axis. The new axes will\n overlay *ax* (or the current axes if *ax* is *None*), and its ticks will be\n on the right.\n\n Examples\n --------\n :doc:`/gallery/subplots_axes_and_figures/two_scales`\n \"\"\"\n if ax is None:\n ax = gca()\n ax1 = ax.twinx()\n return ax1\n\n\ndef twiny(ax=None):\n \"\"\"\n Make and return a second axes that shares the *y*-axis. The new axes will\n overlay *ax* (or the current axes if *ax* is *None*), and its ticks will be\n on the top.\n\n Examples\n --------\n :doc:`/gallery/subplots_axes_and_figures/two_scales`\n \"\"\"\n if ax is None:\n ax = gca()\n ax1 = ax.twiny()\n return ax1\n\n\ndef subplot_tool(targetfig=None):\n \"\"\"\n Launch a subplot tool window for a figure.\n\n A `matplotlib.widgets.SubplotTool` instance is returned. You must maintain\n a reference to the instance to keep the associated callbacks alive.\n \"\"\"\n if targetfig is None:\n targetfig = gcf()\n with rc_context({\"toolbar\": \"none\"}): # No navbar for the toolfig.\n # Use new_figure_manager() instead of figure() so that the figure\n # doesn't get registered with pyplot.\n manager = new_figure_manager(-1, (6, 3))\n manager.set_window_title(\"Subplot configuration tool\")\n tool_fig = manager.canvas.figure\n tool_fig.subplots_adjust(top=0.9)\n manager.show()\n return SubplotTool(targetfig, tool_fig)\n\n\n# After deprecation elapses, this can be autogenerated by boilerplate.py.\n@_api.make_keyword_only(\"3.3\", \"pad\")\ndef tight_layout(pad=1.08, h_pad=None, w_pad=None, rect=None):\n \"\"\"\n Adjust the padding between and around subplots.\n\n Parameters\n ----------\n pad : float, default: 1.08\n Padding between the figure edge and the edges of subplots,\n as a fraction of the font size.\n h_pad, w_pad : float, default: *pad*\n Padding (height/width) between edges of adjacent subplots,\n as a fraction of the font size.\n rect : tuple (left, bottom, right, top), default: (0, 0, 1, 1)\n A rectangle in normalized figure coordinates into which the whole\n subplots area (including labels) will fit.\n \"\"\"\n gcf().tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)\n\n\ndef box(on=None):\n \"\"\"\n Turn the axes box on or off on the current axes.\n\n Parameters\n ----------\n on : bool or None\n The new `~matplotlib.axes.Axes` box state. If ``None``, toggle\n the state.\n\n See Also\n --------\n :meth:`matplotlib.axes.Axes.set_frame_on`\n :meth:`matplotlib.axes.Axes.get_frame_on`\n \"\"\"\n ax = gca()\n if on is None:\n on = not ax.get_frame_on()\n ax.set_frame_on(on)\n\n## Axis ##\n\n\ndef xlim(*args, **kwargs):\n \"\"\"\n Get or set the x limits of the current axes.\n\n Call signatures::\n\n left, right = xlim() # return the current xlim\n xlim((left, right)) # set the xlim to left, right\n xlim(left, right) # set the xlim to left, right\n\n If you do not specify args, you can pass *left* or *right* as kwargs,\n i.e.::\n\n xlim(right=3) # adjust the right leaving left unchanged\n xlim(left=1) # adjust the left leaving right unchanged\n\n Setting limits turns autoscaling off for the x-axis.\n\n Returns\n -------\n left, right\n A tuple of the new x-axis limits.\n\n Notes\n -----\n Calling this function with no arguments (e.g. ``xlim()``) is the pyplot\n equivalent of calling `~.Axes.get_xlim` on the current axes.\n Calling this function with arguments is the pyplot equivalent of calling\n `~.Axes.set_xlim` on the current axes. All arguments are passed though.\n \"\"\"\n ax = gca()\n if not args and not kwargs:\n return ax.get_xlim()\n ret = ax.set_xlim(*args, **kwargs)\n return ret\n\n\ndef ylim(*args, **kwargs):\n \"\"\"\n Get or set the y-limits of the current axes.\n\n Call signatures::\n\n bottom, top = ylim() # return the current ylim\n ylim((bottom, top)) # set the ylim to bottom, top\n ylim(bottom, top) # set the ylim to bottom, top\n\n If you do not specify args, you can alternatively pass *bottom* or\n *top* as kwargs, i.e.::\n\n ylim(top=3) # adjust the top leaving bottom unchanged\n ylim(bottom=1) # adjust the bottom leaving top unchanged\n\n Setting limits turns autoscaling off for the y-axis.\n\n Returns\n -------\n bottom, top\n A tuple of the new y-axis limits.\n\n Notes\n -----\n Calling this function with no arguments (e.g. ``ylim()``) is the pyplot\n equivalent of calling `~.Axes.get_ylim` on the current axes.\n Calling this function with arguments is the pyplot equivalent of calling\n `~.Axes.set_ylim` on the current axes. All arguments are passed though.\n \"\"\"\n ax = gca()\n if not args and not kwargs:\n return ax.get_ylim()\n ret = ax.set_ylim(*args, **kwargs)\n return ret\n\n\ndef xticks(ticks=None, labels=None, **kwargs):\n \"\"\"\n Get or set the current tick locations and labels of the x-axis.\n\n Pass no arguments to return the current values without modifying them.\n\n Parameters\n ----------\n ticks : array-like, optional\n The list of xtick locations. Passing an empty list removes all xticks.\n labels : array-like, optional\n The labels to place at the given *ticks* locations. This argument can\n only be passed if *ticks* is passed as well.\n **kwargs\n `.Text` properties can be used to control the appearance of the labels.\n\n Returns\n -------\n locs\n The list of xtick locations.\n labels\n The list of xlabel `.Text` objects.\n\n Notes\n -----\n Calling this function with no arguments (e.g. ``xticks()``) is the pyplot\n equivalent of calling `~.Axes.get_xticks` and `~.Axes.get_xticklabels` on\n the current axes.\n Calling this function with arguments is the pyplot equivalent of calling\n `~.Axes.set_xticks` and `~.Axes.set_xticklabels` on the current axes.\n\n Examples\n --------\n >>> locs, labels = xticks() # Get the current locations and labels.\n >>> xticks(np.arange(0, 1, step=0.2)) # Set label locations.\n >>> xticks(np.arange(3), ['Tom', 'Dick', 'Sue']) # Set text labels.\n >>> xticks([0, 1, 2], ['January', 'February', 'March'],\n ... rotation=20) # Set text labels and properties.\n >>> xticks([]) # Disable xticks.\n \"\"\"\n ax = gca()\n\n if ticks is None:\n locs = ax.get_xticks()\n if labels is not None:\n raise TypeError(\"xticks(): Parameter 'labels' can't be set \"\n \"without setting 'ticks'\")\n else:\n locs = ax.set_xticks(ticks)\n\n if labels is None:\n labels = ax.get_xticklabels()\n else:\n labels = ax.set_xticklabels(labels, **kwargs)\n for l in labels:\n l.update(kwargs)\n\n return locs, labels\n\n\ndef yticks(ticks=None, labels=None, **kwargs):\n \"\"\"\n Get or set the current tick locations and labels of the y-axis.\n\n Pass no arguments to return the current values without modifying them.\n\n Parameters\n ----------\n ticks : array-like, optional\n The list of ytick locations. Passing an empty list removes all yticks.\n labels : array-like, optional\n The labels to place at the given *ticks* locations. This argument can\n only be passed if *ticks* is passed as well.\n **kwargs\n `.Text` properties can be used to control the appearance of the labels.\n\n Returns\n -------\n locs\n The list of ytick locations.\n labels\n The list of ylabel `.Text` objects.\n\n Notes\n -----\n Calling this function with no arguments (e.g. ``yticks()``) is the pyplot\n equivalent of calling `~.Axes.get_yticks` and `~.Axes.get_yticklabels` on\n the current axes.\n Calling this function with arguments is the pyplot equivalent of calling\n `~.Axes.set_yticks` and `~.Axes.set_yticklabels` on the current axes.\n\n Examples\n --------\n >>> locs, labels = yticks() # Get the current locations and labels.\n >>> yticks(np.arange(0, 1, step=0.2)) # Set label locations.\n >>> yticks(np.arange(3), ['Tom', 'Dick', 'Sue']) # Set text labels.\n >>> yticks([0, 1, 2], ['January', 'February', 'March'],\n ... rotation=45) # Set text labels and properties.\n >>> yticks([]) # Disable yticks.\n \"\"\"\n ax = gca()\n\n if ticks is None:\n locs = ax.get_yticks()\n if labels is not None:\n raise TypeError(\"yticks(): Parameter 'labels' can't be set \"\n \"without setting 'ticks'\")\n else:\n locs = ax.set_yticks(ticks)\n\n if labels is None:\n labels = ax.get_yticklabels()\n else:\n labels = ax.set_yticklabels(labels, **kwargs)\n for l in labels:\n l.update(kwargs)\n\n return locs, labels\n\n\ndef rgrids(radii=None, labels=None, angle=None, fmt=None, **kwargs):\n \"\"\"\n Get or set the radial gridlines on the current polar plot.\n\n Call signatures::\n\n lines, labels = rgrids()\n lines, labels = rgrids(radii, labels=None, angle=22.5, fmt=None, **kwargs)\n\n When called with no arguments, `.rgrids` simply returns the tuple\n (*lines*, *labels*). When called with arguments, the labels will\n appear at the specified radial distances and angle.\n\n Parameters\n ----------\n radii : tuple with floats\n The radii for the radial gridlines\n\n labels : tuple with strings or None\n The labels to use at each radial gridline. The\n `matplotlib.ticker.ScalarFormatter` will be used if None.\n\n angle : float\n The angular position of the radius labels in degrees.\n\n fmt : str or None\n Format string used in `matplotlib.ticker.FormatStrFormatter`.\n For example '%f'.\n\n Returns\n -------\n lines : list of `.lines.Line2D`\n The radial gridlines.\n\n labels : list of `.text.Text`\n The tick labels.\n\n Other Parameters\n ----------------\n **kwargs\n *kwargs* are optional `~.Text` properties for the labels.\n\n See Also\n --------\n .pyplot.thetagrids\n .projections.polar.PolarAxes.set_rgrids\n .Axis.get_gridlines\n .Axis.get_ticklabels\n\n Examples\n --------\n ::\n\n # set the locations of the radial gridlines\n lines, labels = rgrids( (0.25, 0.5, 1.0) )\n\n # set the locations and labels of the radial gridlines\n lines, labels = rgrids( (0.25, 0.5, 1.0), ('Tom', 'Dick', 'Harry' ))\n \"\"\"\n ax = gca()\n if not isinstance(ax, PolarAxes):\n raise RuntimeError('rgrids only defined for polar axes')\n if all(p is None for p in [radii, labels, angle, fmt]) and not kwargs:\n lines = ax.yaxis.get_gridlines()\n labels = ax.yaxis.get_ticklabels()\n else:\n lines, labels = ax.set_rgrids(\n radii, labels=labels, angle=angle, fmt=fmt, **kwargs)\n return lines, labels\n\n\ndef thetagrids(angles=None, labels=None, fmt=None, **kwargs):\n \"\"\"\n Get or set the theta gridlines on the current polar plot.\n\n Call signatures::\n\n lines, labels = thetagrids()\n lines, labels = thetagrids(angles, labels=None, fmt=None, **kwargs)\n\n When called with no arguments, `.thetagrids` simply returns the tuple\n (*lines*, *labels*). When called with arguments, the labels will\n appear at the specified angles.\n\n Parameters\n ----------\n angles : tuple with floats, degrees\n The angles of the theta gridlines.\n\n labels : tuple with strings or None\n The labels to use at each radial gridline. The\n `.projections.polar.ThetaFormatter` will be used if None.\n\n fmt : str or None\n Format string used in `matplotlib.ticker.FormatStrFormatter`.\n For example '%f'. Note that the angle in radians will be used.\n\n Returns\n -------\n lines : list of `.lines.Line2D`\n The theta gridlines.\n\n labels : list of `.text.Text`\n The tick labels.\n\n Other Parameters\n ----------------\n **kwargs\n *kwargs* are optional `~.Text` properties for the labels.\n\n See Also\n --------\n .pyplot.rgrids\n .projections.polar.PolarAxes.set_thetagrids\n .Axis.get_gridlines\n .Axis.get_ticklabels\n\n Examples\n --------\n ::\n\n # set the locations of the angular gridlines\n lines, labels = thetagrids(range(45, 360, 90))\n\n # set the locations and labels of the angular gridlines\n lines, labels = thetagrids(range(45, 360, 90), ('NE', 'NW', 'SW', 'SE'))\n \"\"\"\n ax = gca()\n if not isinstance(ax, PolarAxes):\n raise RuntimeError('thetagrids only defined for polar axes')\n if all(param is None for param in [angles, labels, fmt]) and not kwargs:\n lines = ax.xaxis.get_ticklines()\n labels = ax.xaxis.get_ticklabels()\n else:\n lines, labels = ax.set_thetagrids(angles,\n labels=labels, fmt=fmt, **kwargs)\n return lines, labels\n\n\n## Plotting Info ##\n\n\ndef plotting():\n pass\n\n\ndef get_plot_commands():\n \"\"\"\n Get a sorted list of all of the plotting commands.\n \"\"\"\n # This works by searching for all functions in this module and removing\n # a few hard-coded exclusions, as well as all of the colormap-setting\n # functions, and anything marked as private with a preceding underscore.\n exclude = {'colormaps', 'colors', 'connect', 'disconnect',\n 'get_plot_commands', 'get_current_fig_manager', 'ginput',\n 'plotting', 'waitforbuttonpress'}\n exclude |= set(colormaps())\n this_module = inspect.getmodule(get_plot_commands)\n return sorted(\n name for name, obj in globals().items()\n if not name.startswith('_') and name not in exclude\n and inspect.isfunction(obj)\n and inspect.getmodule(obj) is this_module)\n\n\ndef colormaps():\n \"\"\"\n Matplotlib provides a number of colormaps, and others can be added using\n :func:`~matplotlib.cm.register_cmap`. This function documents the built-in\n colormaps, and will also return a list of all registered colormaps if\n called.\n\n You can set the colormap for an image, pcolor, scatter, etc,\n using a keyword argument::\n\n imshow(X, cmap=cm.hot)\n\n or using the :func:`set_cmap` function::\n\n imshow(X)\n pyplot.set_cmap('hot')\n pyplot.set_cmap('jet')\n\n In interactive mode, :func:`set_cmap` will update the colormap post-hoc,\n allowing you to see which one works best for your data.\n\n All built-in colormaps can be reversed by appending ``_r``: For instance,\n ``gray_r`` is the reverse of ``gray``.\n\n There are several common color schemes used in visualization:\n\n Sequential schemes\n for unipolar data that progresses from low to high\n Diverging schemes\n for bipolar data that emphasizes positive or negative deviations from a\n central value\n Cyclic schemes\n for plotting values that wrap around at the endpoints, such as phase\n angle, wind direction, or time of day\n Qualitative schemes\n for nominal data that has no inherent ordering, where color is used\n only to distinguish categories\n\n Matplotlib ships with 4 perceptually uniform colormaps which are\n the recommended colormaps for sequential data:\n\n ========= ===================================================\n Colormap Description\n ========= ===================================================\n inferno perceptually uniform shades of black-red-yellow\n magma perceptually uniform shades of black-red-white\n plasma perceptually uniform shades of blue-red-yellow\n viridis perceptually uniform shades of blue-green-yellow\n ========= ===================================================\n\n The following colormaps are based on the `ColorBrewer\n <https://colorbrewer2.org>`_ color specifications and designs developed by\n Cynthia Brewer:\n\n ColorBrewer Diverging (luminance is highest at the midpoint, and\n decreases towards differently-colored endpoints):\n\n ======== ===================================\n Colormap Description\n ======== ===================================\n BrBG brown, white, blue-green\n PiYG pink, white, yellow-green\n PRGn purple, white, green\n PuOr orange, white, purple\n RdBu red, white, blue\n RdGy red, white, gray\n RdYlBu red, yellow, blue\n RdYlGn red, yellow, green\n Spectral red, orange, yellow, green, blue\n ======== ===================================\n\n ColorBrewer Sequential (luminance decreases monotonically):\n\n ======== ====================================\n Colormap Description\n ======== ====================================\n Blues white to dark blue\n BuGn white, light blue, dark green\n BuPu white, light blue, dark purple\n GnBu white, light green, dark blue\n Greens white to dark green\n Greys white to black (not linear)\n Oranges white, orange, dark brown\n OrRd white, orange, dark red\n PuBu white, light purple, dark blue\n PuBuGn white, light purple, dark green\n PuRd white, light purple, dark red\n Purples white to dark purple\n RdPu white, pink, dark purple\n Reds white to dark red\n YlGn light yellow, dark green\n YlGnBu light yellow, light green, dark blue\n YlOrBr light yellow, orange, dark brown\n YlOrRd light yellow, orange, dark red\n ======== ====================================\n\n ColorBrewer Qualitative:\n\n (For plotting nominal data, `.ListedColormap` is used,\n not `.LinearSegmentedColormap`. Different sets of colors are\n recommended for different numbers of categories.)\n\n * Accent\n * Dark2\n * Paired\n * Pastel1\n * Pastel2\n * Set1\n * Set2\n * Set3\n\n A set of colormaps derived from those of the same name provided\n with Matlab are also included:\n\n ========= =======================================================\n Colormap Description\n ========= =======================================================\n autumn sequential linearly-increasing shades of red-orange-yellow\n bone sequential increasing black-white colormap with\n a tinge of blue, to emulate X-ray film\n cool linearly-decreasing shades of cyan-magenta\n copper sequential increasing shades of black-copper\n flag repetitive red-white-blue-black pattern (not cyclic at\n endpoints)\n gray sequential linearly-increasing black-to-white\n grayscale\n hot sequential black-red-yellow-white, to emulate blackbody\n radiation from an object at increasing temperatures\n jet a spectral map with dark endpoints, blue-cyan-yellow-red;\n based on a fluid-jet simulation by NCSA [#]_\n pink sequential increasing pastel black-pink-white, meant\n for sepia tone colorization of photographs\n prism repetitive red-yellow-green-blue-purple-...-green pattern\n (not cyclic at endpoints)\n spring linearly-increasing shades of magenta-yellow\n summer sequential linearly-increasing shades of green-yellow\n winter linearly-increasing shades of blue-green\n ========= =======================================================\n\n A set of palettes from the `Yorick scientific visualisation\n package <https://dhmunro.github.io/yorick-doc/>`_, an evolution of\n the GIST package, both by David H. Munro are included:\n\n ============ =======================================================\n Colormap Description\n ============ =======================================================\n gist_earth mapmaker's colors from dark blue deep ocean to green\n lowlands to brown highlands to white mountains\n gist_heat sequential increasing black-red-orange-white, to emulate\n blackbody radiation from an iron bar as it grows hotter\n gist_ncar pseudo-spectral black-blue-green-yellow-red-purple-white\n colormap from National Center for Atmospheric\n Research [#]_\n gist_rainbow runs through the colors in spectral order from red to\n violet at full saturation (like *hsv* but not cyclic)\n gist_stern \"Stern special\" color table from Interactive Data\n Language software\n ============ =======================================================\n\n A set of cyclic colormaps:\n\n ================ =================================================\n Colormap Description\n ================ =================================================\n hsv red-yellow-green-cyan-blue-magenta-red, formed by\n changing the hue component in the HSV color space\n twilight perceptually uniform shades of\n white-blue-black-red-white\n twilight_shifted perceptually uniform shades of\n black-blue-white-red-black\n ================ =================================================\n\n Other miscellaneous schemes:\n\n ============= =======================================================\n Colormap Description\n ============= =======================================================\n afmhot sequential black-orange-yellow-white blackbody\n spectrum, commonly used in atomic force microscopy\n brg blue-red-green\n bwr diverging blue-white-red\n coolwarm diverging blue-gray-red, meant to avoid issues with 3D\n shading, color blindness, and ordering of colors [#]_\n CMRmap \"Default colormaps on color images often reproduce to\n confusing grayscale images. The proposed colormap\n maintains an aesthetically pleasing color image that\n automatically reproduces to a monotonic grayscale with\n discrete, quantifiable saturation levels.\" [#]_\n cubehelix Unlike most other color schemes cubehelix was designed\n by D.A. Green to be monotonically increasing in terms\n of perceived brightness. Also, when printed on a black\n and white postscript printer, the scheme results in a\n greyscale with monotonically increasing brightness.\n This color scheme is named cubehelix because the (r, g, b)\n values produced can be visualised as a squashed helix\n around the diagonal in the (r, g, b) color cube.\n gnuplot gnuplot's traditional pm3d scheme\n (black-blue-red-yellow)\n gnuplot2 sequential color printable as gray\n (black-blue-violet-yellow-white)\n ocean green-blue-white\n rainbow spectral purple-blue-green-yellow-orange-red colormap\n with diverging luminance\n seismic diverging blue-white-red\n nipy_spectral black-purple-blue-green-yellow-red-white spectrum,\n originally from the Neuroimaging in Python project\n terrain mapmaker's colors, blue-green-yellow-brown-white,\n originally from IGOR Pro\n turbo Spectral map (purple-blue-green-yellow-orange-red) with\n a bright center and darker endpoints. A smoother\n alternative to jet.\n ============= =======================================================\n\n The following colormaps are redundant and may be removed in future\n versions. It's recommended to use the names in the descriptions\n instead, which produce identical output:\n\n ========= =======================================================\n Colormap Description\n ========= =======================================================\n gist_gray identical to *gray*\n gist_yarg identical to *gray_r*\n binary identical to *gray_r*\n ========= =======================================================\n\n .. rubric:: Footnotes\n\n .. [#] Rainbow colormaps, ``jet`` in particular, are considered a poor\n choice for scientific visualization by many researchers: `Rainbow Color\n Map (Still) Considered Harmful\n <https://ieeexplore.ieee.org/document/4118486/?arnumber=4118486>`_\n\n .. [#] Resembles \"BkBlAqGrYeOrReViWh200\" from NCAR Command\n Language. See `Color Table Gallery\n <https://www.ncl.ucar.edu/Document/Graphics/color_table_gallery.shtml>`_\n\n .. [#] See `Diverging Color Maps for Scientific Visualization\n <http://www.kennethmoreland.com/color-maps/>`_ by Kenneth Moreland.\n\n .. [#] See `A Color Map for Effective Black-and-White Rendering of\n Color-Scale Images\n <https://www.mathworks.com/matlabcentral/fileexchange/2662-cmrmap-m>`_\n by Carey Rappaport\n \"\"\"\n return sorted(cm._cmap_registry)\n\n\ndef _setup_pyplot_info_docstrings():\n \"\"\"\n Setup the docstring of `plotting` and of the colormap-setting functions.\n\n These must be done after the entire module is imported, so it is called\n from the end of this module, which is generated by boilerplate.py.\n \"\"\"\n commands = get_plot_commands()\n\n first_sentence = re.compile(r\"(?:\\s*).+?\\.(?:\\s+|$)\", flags=re.DOTALL)\n\n # Collect the first sentence of the docstring for all of the\n # plotting commands.\n rows = []\n max_name = len(\"Function\")\n max_summary = len(\"Description\")\n for name in commands:\n doc = globals()[name].__doc__\n summary = ''\n if doc is not None:\n match = first_sentence.match(doc)\n if match is not None:\n summary = inspect.cleandoc(match.group(0)).replace('\\n', ' ')\n name = '`%s`' % name\n rows.append([name, summary])\n max_name = max(max_name, len(name))\n max_summary = max(max_summary, len(summary))\n\n separator = '=' * max_name + ' ' + '=' * max_summary\n lines = [\n separator,\n '{:{}} {:{}}'.format('Function', max_name, 'Description', max_summary),\n separator,\n ] + [\n '{:{}} {:{}}'.format(name, max_name, summary, max_summary)\n for name, summary in rows\n ] + [\n separator,\n ]\n plotting.__doc__ = '\\n'.join(lines)\n\n for cm_name in colormaps():\n if cm_name in globals():\n globals()[cm_name].__doc__ = f\"\"\"\n Set the colormap to {cm_name!r}.\n\n This changes the default colormap as well as the colormap of the current\n image if there is one. See ``help(colormaps)`` for more information.\n \"\"\"\n\n\n## Plotting part 1: manually generated functions and wrappers ##\n\n\n@_copy_docstring_and_deprecators(Figure.colorbar)\ndef colorbar(mappable=None, cax=None, ax=None, **kw):\n if mappable is None:\n mappable = gci()\n if mappable is None:\n raise RuntimeError('No mappable was found to use for colorbar '\n 'creation. First define a mappable such as '\n 'an image (with imshow) or a contour set ('\n 'with contourf).')\n ret = gcf().colorbar(mappable, cax=cax, ax=ax, **kw)\n return ret\n\n\ndef clim(vmin=None, vmax=None):\n \"\"\"\n Set the color limits of the current image.\n\n If either *vmin* or *vmax* is None, the image min/max respectively\n will be used for color scaling.\n\n If you want to set the clim of multiple images, use\n `~.ScalarMappable.set_clim` on every image, for example::\n\n for im in gca().get_images():\n im.set_clim(0, 0.5)\n\n \"\"\"\n im = gci()\n if im is None:\n raise RuntimeError('You must first define an image, e.g., with imshow')\n\n im.set_clim(vmin, vmax)\n\n\ndef set_cmap(cmap):\n \"\"\"\n Set the default colormap, and applies it to the current image if any.\n\n Parameters\n ----------\n cmap : `~matplotlib.colors.Colormap` or str\n A colormap instance or the name of a registered colormap.\n\n See Also\n --------\n colormaps\n matplotlib.cm.register_cmap\n matplotlib.cm.get_cmap\n \"\"\"\n cmap = cm.get_cmap(cmap)\n\n rc('image', cmap=cmap.name)\n im = gci()\n\n if im is not None:\n im.set_cmap(cmap)\n\n\n@_copy_docstring_and_deprecators(matplotlib.image.imread)\ndef imread(fname, format=None):\n return matplotlib.image.imread(fname, format)\n\n\n@_copy_docstring_and_deprecators(matplotlib.image.imsave)\ndef imsave(fname, arr, **kwargs):\n return matplotlib.image.imsave(fname, arr, **kwargs)\n\n\ndef matshow(A, fignum=None, **kwargs):\n \"\"\"\n Display an array as a matrix in a new figure window.\n\n The origin is set at the upper left hand corner and rows (first\n dimension of the array) are displayed horizontally. The aspect\n ratio of the figure window is that of the array, unless this would\n make an excessively short or narrow figure.\n\n Tick labels for the xaxis are placed on top.\n\n Parameters\n ----------\n A : 2D array-like\n The matrix to be displayed.\n\n fignum : None or int or False\n If *None*, create a new figure window with automatic numbering.\n\n If a nonzero integer, draw into the figure with the given number\n (create it if it does not exist).\n\n If 0, use the current axes (or create one if it does not exist).\n\n .. note::\n\n Because of how `.Axes.matshow` tries to set the figure aspect\n ratio to be the one of the array, strange things may happen if you\n reuse an existing figure.\n\n Returns\n -------\n `~matplotlib.image.AxesImage`\n\n Other Parameters\n ----------------\n **kwargs : `~matplotlib.axes.Axes.imshow` arguments\n\n \"\"\"\n A = np.asanyarray(A)\n if fignum == 0:\n ax = gca()\n else:\n # Extract actual aspect ratio of array and make appropriately sized\n # figure.\n fig = figure(fignum, figsize=figaspect(A))\n ax = fig.add_axes([0.15, 0.09, 0.775, 0.775])\n im = ax.matshow(A, **kwargs)\n sci(im)\n return im\n\n\ndef polar(*args, **kwargs):\n \"\"\"\n Make a polar plot.\n\n call signature::\n\n polar(theta, r, **kwargs)\n\n Multiple *theta*, *r* arguments are supported, with format strings, as in\n `plot`.\n \"\"\"\n # If an axis already exists, check if it has a polar projection\n if gcf().get_axes():\n ax = gca()\n if isinstance(ax, PolarAxes):\n return ax\n else:\n _api.warn_external('Trying to create polar plot on an Axes '\n 'that does not have a polar projection.')\n ax = axes(projection=\"polar\")\n ret = ax.plot(*args, **kwargs)\n return ret\n\n\n# If rcParams['backend_fallback'] is true, and an interactive backend is\n# requested, ignore rcParams['backend'] and force selection of a backend that\n# is compatible with the current running interactive framework.\nif (rcParams[\"backend_fallback\"]\n and dict.__getitem__(rcParams, \"backend\") in (\n set(_interactive_bk) - {'WebAgg', 'nbAgg'})\n and cbook._get_running_interactive_framework()):\n dict.__setitem__(rcParams, \"backend\", rcsetup._auto_backend_sentinel)\n# Set up the backend.\nswitch_backend(rcParams[\"backend\"])\n\n# Just to be safe. Interactive mode can be turned on without\n# calling `plt.ion()` so register it again here.\n# This is safe because multiple calls to `install_repl_displayhook`\n# are no-ops and the registered function respect `mpl.is_interactive()`\n# to determine if they should trigger a draw.\ninstall_repl_displayhook()\n\n\n################# REMAINING CONTENT GENERATED BY boilerplate.py ##############\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Figure.figimage)\ndef figimage(\n X, xo=0, yo=0, alpha=None, norm=None, cmap=None, vmin=None,\n vmax=None, origin=None, resize=False, **kwargs):\n return gcf().figimage(\n X, xo=xo, yo=yo, alpha=alpha, norm=norm, cmap=cmap, vmin=vmin,\n vmax=vmax, origin=origin, resize=resize, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Figure.text)\ndef figtext(x, y, s, fontdict=None, **kwargs):\n return gcf().text(x, y, s, fontdict=fontdict, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Figure.gca)\ndef gca(**kwargs):\n return gcf().gca(**kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Figure._gci)\ndef gci():\n return gcf()._gci()\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Figure.ginput)\ndef ginput(\n n=1, timeout=30, show_clicks=True,\n mouse_add=MouseButton.LEFT, mouse_pop=MouseButton.RIGHT,\n mouse_stop=MouseButton.MIDDLE):\n return gcf().ginput(\n n=n, timeout=timeout, show_clicks=show_clicks,\n mouse_add=mouse_add, mouse_pop=mouse_pop,\n mouse_stop=mouse_stop)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Figure.subplots_adjust)\ndef subplots_adjust(\n left=None, bottom=None, right=None, top=None, wspace=None,\n hspace=None):\n return gcf().subplots_adjust(\n left=left, bottom=bottom, right=right, top=top, wspace=wspace,\n hspace=hspace)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Figure.suptitle)\ndef suptitle(t, **kwargs):\n return gcf().suptitle(t, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Figure.waitforbuttonpress)\ndef waitforbuttonpress(timeout=-1):\n return gcf().waitforbuttonpress(timeout=timeout)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.acorr)\ndef acorr(x, *, data=None, **kwargs):\n return gca().acorr(\n x, **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.angle_spectrum)\ndef angle_spectrum(\n x, Fs=None, Fc=None, window=None, pad_to=None, sides=None, *,\n data=None, **kwargs):\n return gca().angle_spectrum(\n x, Fs=Fs, Fc=Fc, window=window, pad_to=pad_to, sides=sides,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.annotate)\ndef annotate(text, xy, *args, **kwargs):\n return gca().annotate(text, xy, *args, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.arrow)\ndef arrow(x, y, dx, dy, **kwargs):\n return gca().arrow(x, y, dx, dy, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.autoscale)\ndef autoscale(enable=True, axis='both', tight=None):\n return gca().autoscale(enable=enable, axis=axis, tight=tight)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.axhline)\ndef axhline(y=0, xmin=0, xmax=1, **kwargs):\n return gca().axhline(y=y, xmin=xmin, xmax=xmax, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.axhspan)\ndef axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs):\n return gca().axhspan(ymin, ymax, xmin=xmin, xmax=xmax, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.axis)\ndef axis(*args, emit=True, **kwargs):\n return gca().axis(*args, emit=emit, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.axline)\ndef axline(xy1, xy2=None, *, slope=None, **kwargs):\n return gca().axline(xy1, xy2=xy2, slope=slope, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.axvline)\ndef axvline(x=0, ymin=0, ymax=1, **kwargs):\n return gca().axvline(x=x, ymin=ymin, ymax=ymax, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.axvspan)\ndef axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs):\n return gca().axvspan(xmin, xmax, ymin=ymin, ymax=ymax, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.bar)\ndef bar(\n x, height, width=0.8, bottom=None, *, align='center',\n data=None, **kwargs):\n return gca().bar(\n x, height, width=width, bottom=bottom, align=align,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.barbs)\ndef barbs(*args, data=None, **kw):\n return gca().barbs(\n *args, **({\"data\": data} if data is not None else {}), **kw)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.barh)\ndef barh(y, width, height=0.8, left=None, *, align='center', **kwargs):\n return gca().barh(\n y, width, height=height, left=left, align=align, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.bar_label)\ndef bar_label(\n container, labels=None, *, fmt='%g', label_type='edge',\n padding=0, **kwargs):\n return gca().bar_label(\n container, labels=labels, fmt=fmt, label_type=label_type,\n padding=padding, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.boxplot)\ndef boxplot(\n x, notch=None, sym=None, vert=None, whis=None,\n positions=None, widths=None, patch_artist=None,\n bootstrap=None, usermedians=None, conf_intervals=None,\n meanline=None, showmeans=None, showcaps=None, showbox=None,\n showfliers=None, boxprops=None, labels=None, flierprops=None,\n medianprops=None, meanprops=None, capprops=None,\n whiskerprops=None, manage_ticks=True, autorange=False,\n zorder=None, *, data=None):\n return gca().boxplot(\n x, notch=notch, sym=sym, vert=vert, whis=whis,\n positions=positions, widths=widths, patch_artist=patch_artist,\n bootstrap=bootstrap, usermedians=usermedians,\n conf_intervals=conf_intervals, meanline=meanline,\n showmeans=showmeans, showcaps=showcaps, showbox=showbox,\n showfliers=showfliers, boxprops=boxprops, labels=labels,\n flierprops=flierprops, medianprops=medianprops,\n meanprops=meanprops, capprops=capprops,\n whiskerprops=whiskerprops, manage_ticks=manage_ticks,\n autorange=autorange, zorder=zorder,\n **({\"data\": data} if data is not None else {}))\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.broken_barh)\ndef broken_barh(xranges, yrange, *, data=None, **kwargs):\n return gca().broken_barh(\n xranges, yrange,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.clabel)\ndef clabel(CS, levels=None, **kwargs):\n return gca().clabel(CS, levels=levels, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.cohere)\ndef cohere(\n x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,\n window=mlab.window_hanning, noverlap=0, pad_to=None,\n sides='default', scale_by_freq=None, *, data=None, **kwargs):\n return gca().cohere(\n x, y, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend, window=window,\n noverlap=noverlap, pad_to=pad_to, sides=sides,\n scale_by_freq=scale_by_freq,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.contour)\ndef contour(*args, data=None, **kwargs):\n __ret = gca().contour(\n *args, **({\"data\": data} if data is not None else {}),\n **kwargs)\n if __ret._A is not None: sci(__ret) # noqa\n return __ret\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.contourf)\ndef contourf(*args, data=None, **kwargs):\n __ret = gca().contourf(\n *args, **({\"data\": data} if data is not None else {}),\n **kwargs)\n if __ret._A is not None: sci(__ret) # noqa\n return __ret\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.csd)\ndef csd(\n x, y, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,\n noverlap=None, pad_to=None, sides=None, scale_by_freq=None,\n return_line=None, *, data=None, **kwargs):\n return gca().csd(\n x, y, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend, window=window,\n noverlap=noverlap, pad_to=pad_to, sides=sides,\n scale_by_freq=scale_by_freq, return_line=return_line,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.errorbar)\ndef errorbar(\n x, y, yerr=None, xerr=None, fmt='', ecolor=None,\n elinewidth=None, capsize=None, barsabove=False, lolims=False,\n uplims=False, xlolims=False, xuplims=False, errorevery=1,\n capthick=None, *, data=None, **kwargs):\n return gca().errorbar(\n x, y, yerr=yerr, xerr=xerr, fmt=fmt, ecolor=ecolor,\n elinewidth=elinewidth, capsize=capsize, barsabove=barsabove,\n lolims=lolims, uplims=uplims, xlolims=xlolims,\n xuplims=xuplims, errorevery=errorevery, capthick=capthick,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.eventplot)\ndef eventplot(\n positions, orientation='horizontal', lineoffsets=1,\n linelengths=1, linewidths=None, colors=None,\n linestyles='solid', *, data=None, **kwargs):\n return gca().eventplot(\n positions, orientation=orientation, lineoffsets=lineoffsets,\n linelengths=linelengths, linewidths=linewidths, colors=colors,\n linestyles=linestyles,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.fill)\ndef fill(*args, data=None, **kwargs):\n return gca().fill(\n *args, **({\"data\": data} if data is not None else {}),\n **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.fill_between)\ndef fill_between(\n x, y1, y2=0, where=None, interpolate=False, step=None, *,\n data=None, **kwargs):\n return gca().fill_between(\n x, y1, y2=y2, where=where, interpolate=interpolate, step=step,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.fill_betweenx)\ndef fill_betweenx(\n y, x1, x2=0, where=None, step=None, interpolate=False, *,\n data=None, **kwargs):\n return gca().fill_betweenx(\n y, x1, x2=x2, where=where, step=step, interpolate=interpolate,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.grid)\ndef grid(b=None, which='major', axis='both', **kwargs):\n return gca().grid(b=b, which=which, axis=axis, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.hexbin)\ndef hexbin(\n x, y, C=None, gridsize=100, bins=None, xscale='linear',\n yscale='linear', extent=None, cmap=None, norm=None, vmin=None,\n vmax=None, alpha=None, linewidths=None, edgecolors='face',\n reduce_C_function=np.mean, mincnt=None, marginals=False, *,\n data=None, **kwargs):\n __ret = gca().hexbin(\n x, y, C=C, gridsize=gridsize, bins=bins, xscale=xscale,\n yscale=yscale, extent=extent, cmap=cmap, norm=norm, vmin=vmin,\n vmax=vmax, alpha=alpha, linewidths=linewidths,\n edgecolors=edgecolors, reduce_C_function=reduce_C_function,\n mincnt=mincnt, marginals=marginals,\n **({\"data\": data} if data is not None else {}), **kwargs)\n sci(__ret)\n return __ret\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.hist)\ndef hist(\n x, bins=None, range=None, density=False, weights=None,\n cumulative=False, bottom=None, histtype='bar', align='mid',\n orientation='vertical', rwidth=None, log=False, color=None,\n label=None, stacked=False, *, data=None, **kwargs):\n return gca().hist(\n x, bins=bins, range=range, density=density, weights=weights,\n cumulative=cumulative, bottom=bottom, histtype=histtype,\n align=align, orientation=orientation, rwidth=rwidth, log=log,\n color=color, label=label, stacked=stacked,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.stairs)\ndef stairs(\n values, edges=None, *, orientation='vertical', baseline=0,\n fill=False, data=None, **kwargs):\n return gca().stairs(\n values, edges=edges, orientation=orientation,\n baseline=baseline, fill=fill,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.hist2d)\ndef hist2d(\n x, y, bins=10, range=None, density=False, weights=None,\n cmin=None, cmax=None, *, data=None, **kwargs):\n __ret = gca().hist2d(\n x, y, bins=bins, range=range, density=density,\n weights=weights, cmin=cmin, cmax=cmax,\n **({\"data\": data} if data is not None else {}), **kwargs)\n sci(__ret[-1])\n return __ret\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.hlines)\ndef hlines(\n y, xmin, xmax, colors=None, linestyles='solid', label='', *,\n data=None, **kwargs):\n return gca().hlines(\n y, xmin, xmax, colors=colors, linestyles=linestyles,\n label=label, **({\"data\": data} if data is not None else {}),\n **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.imshow)\ndef imshow(\n X, cmap=None, norm=None, aspect=None, interpolation=None,\n alpha=None, vmin=None, vmax=None, origin=None, extent=None, *,\n filternorm=True, filterrad=4.0, resample=None, url=None,\n data=None, **kwargs):\n __ret = gca().imshow(\n X, cmap=cmap, norm=norm, aspect=aspect,\n interpolation=interpolation, alpha=alpha, vmin=vmin,\n vmax=vmax, origin=origin, extent=extent,\n filternorm=filternorm, filterrad=filterrad, resample=resample,\n url=url, **({\"data\": data} if data is not None else {}),\n **kwargs)\n sci(__ret)\n return __ret\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.legend)\ndef legend(*args, **kwargs):\n return gca().legend(*args, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.locator_params)\ndef locator_params(axis='both', tight=None, **kwargs):\n return gca().locator_params(axis=axis, tight=tight, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.loglog)\ndef loglog(*args, **kwargs):\n return gca().loglog(*args, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.magnitude_spectrum)\ndef magnitude_spectrum(\n x, Fs=None, Fc=None, window=None, pad_to=None, sides=None,\n scale=None, *, data=None, **kwargs):\n return gca().magnitude_spectrum(\n x, Fs=Fs, Fc=Fc, window=window, pad_to=pad_to, sides=sides,\n scale=scale, **({\"data\": data} if data is not None else {}),\n **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.margins)\ndef margins(*margins, x=None, y=None, tight=True):\n return gca().margins(*margins, x=x, y=y, tight=tight)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.minorticks_off)\ndef minorticks_off():\n return gca().minorticks_off()\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.minorticks_on)\ndef minorticks_on():\n return gca().minorticks_on()\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.pcolor)\ndef pcolor(\n *args, shading=None, alpha=None, norm=None, cmap=None,\n vmin=None, vmax=None, data=None, **kwargs):\n __ret = gca().pcolor(\n *args, shading=shading, alpha=alpha, norm=norm, cmap=cmap,\n vmin=vmin, vmax=vmax,\n **({\"data\": data} if data is not None else {}), **kwargs)\n sci(__ret)\n return __ret\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.pcolormesh)\ndef pcolormesh(\n *args, alpha=None, norm=None, cmap=None, vmin=None,\n vmax=None, shading=None, antialiased=False, data=None,\n **kwargs):\n __ret = gca().pcolormesh(\n *args, alpha=alpha, norm=norm, cmap=cmap, vmin=vmin,\n vmax=vmax, shading=shading, antialiased=antialiased,\n **({\"data\": data} if data is not None else {}), **kwargs)\n sci(__ret)\n return __ret\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.phase_spectrum)\ndef phase_spectrum(\n x, Fs=None, Fc=None, window=None, pad_to=None, sides=None, *,\n data=None, **kwargs):\n return gca().phase_spectrum(\n x, Fs=Fs, Fc=Fc, window=window, pad_to=pad_to, sides=sides,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.pie)\ndef pie(\n x, explode=None, labels=None, colors=None, autopct=None,\n pctdistance=0.6, shadow=False, labeldistance=1.1,\n startangle=0, radius=1, counterclock=True, wedgeprops=None,\n textprops=None, center=(0, 0), frame=False,\n rotatelabels=False, *, normalize=None, data=None):\n return gca().pie(\n x, explode=explode, labels=labels, colors=colors,\n autopct=autopct, pctdistance=pctdistance, shadow=shadow,\n labeldistance=labeldistance, startangle=startangle,\n radius=radius, counterclock=counterclock,\n wedgeprops=wedgeprops, textprops=textprops, center=center,\n frame=frame, rotatelabels=rotatelabels, normalize=normalize,\n **({\"data\": data} if data is not None else {}))\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.plot)\ndef plot(*args, scalex=True, scaley=True, data=None, **kwargs):\n return gca().plot(\n *args, scalex=scalex, scaley=scaley,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.plot_date)\ndef plot_date(\n x, y, fmt='o', tz=None, xdate=True, ydate=False, *,\n data=None, **kwargs):\n return gca().plot_date(\n x, y, fmt=fmt, tz=tz, xdate=xdate, ydate=ydate,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.psd)\ndef psd(\n x, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,\n noverlap=None, pad_to=None, sides=None, scale_by_freq=None,\n return_line=None, *, data=None, **kwargs):\n return gca().psd(\n x, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend, window=window,\n noverlap=noverlap, pad_to=pad_to, sides=sides,\n scale_by_freq=scale_by_freq, return_line=return_line,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.quiver)\ndef quiver(*args, data=None, **kw):\n __ret = gca().quiver(\n *args, **({\"data\": data} if data is not None else {}), **kw)\n sci(__ret)\n return __ret\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.quiverkey)\ndef quiverkey(Q, X, Y, U, label, **kw):\n return gca().quiverkey(Q, X, Y, U, label, **kw)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.scatter)\ndef scatter(\n x, y, s=None, c=None, marker=None, cmap=None, norm=None,\n vmin=None, vmax=None, alpha=None, linewidths=None, *,\n edgecolors=None, plotnonfinite=False, data=None, **kwargs):\n __ret = gca().scatter(\n x, y, s=s, c=c, marker=marker, cmap=cmap, norm=norm,\n vmin=vmin, vmax=vmax, alpha=alpha, linewidths=linewidths,\n edgecolors=edgecolors, plotnonfinite=plotnonfinite,\n **({\"data\": data} if data is not None else {}), **kwargs)\n sci(__ret)\n return __ret\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.semilogx)\ndef semilogx(*args, **kwargs):\n return gca().semilogx(*args, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.semilogy)\ndef semilogy(*args, **kwargs):\n return gca().semilogy(*args, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.specgram)\ndef specgram(\n x, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,\n noverlap=None, cmap=None, xextent=None, pad_to=None,\n sides=None, scale_by_freq=None, mode=None, scale=None,\n vmin=None, vmax=None, *, data=None, **kwargs):\n __ret = gca().specgram(\n x, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend, window=window,\n noverlap=noverlap, cmap=cmap, xextent=xextent, pad_to=pad_to,\n sides=sides, scale_by_freq=scale_by_freq, mode=mode,\n scale=scale, vmin=vmin, vmax=vmax,\n **({\"data\": data} if data is not None else {}), **kwargs)\n sci(__ret[-1])\n return __ret\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.spy)\ndef spy(\n Z, precision=0, marker=None, markersize=None, aspect='equal',\n origin='upper', **kwargs):\n __ret = gca().spy(\n Z, precision=precision, marker=marker, markersize=markersize,\n aspect=aspect, origin=origin, **kwargs)\n if isinstance(__ret, cm.ScalarMappable): sci(__ret) # noqa\n return __ret\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.stackplot)\ndef stackplot(\n x, *args, labels=(), colors=None, baseline='zero', data=None,\n **kwargs):\n return gca().stackplot(\n x, *args, labels=labels, colors=colors, baseline=baseline,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.stem)\ndef stem(\n *args, linefmt=None, markerfmt=None, basefmt=None, bottom=0,\n label=None, use_line_collection=True, orientation='vertical',\n data=None):\n return gca().stem(\n *args, linefmt=linefmt, markerfmt=markerfmt, basefmt=basefmt,\n bottom=bottom, label=label,\n use_line_collection=use_line_collection,\n orientation=orientation,\n **({\"data\": data} if data is not None else {}))\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.step)\ndef step(x, y, *args, where='pre', data=None, **kwargs):\n return gca().step(\n x, y, *args, where=where,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.streamplot)\ndef streamplot(\n x, y, u, v, density=1, linewidth=None, color=None, cmap=None,\n norm=None, arrowsize=1, arrowstyle='-|>', minlength=0.1,\n transform=None, zorder=None, start_points=None, maxlength=4.0,\n integration_direction='both', *, data=None):\n __ret = gca().streamplot(\n x, y, u, v, density=density, linewidth=linewidth, color=color,\n cmap=cmap, norm=norm, arrowsize=arrowsize,\n arrowstyle=arrowstyle, minlength=minlength,\n transform=transform, zorder=zorder, start_points=start_points,\n maxlength=maxlength,\n integration_direction=integration_direction,\n **({\"data\": data} if data is not None else {}))\n sci(__ret.lines)\n return __ret\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.table)\ndef table(\n cellText=None, cellColours=None, cellLoc='right',\n colWidths=None, rowLabels=None, rowColours=None,\n rowLoc='left', colLabels=None, colColours=None,\n colLoc='center', loc='bottom', bbox=None, edges='closed',\n **kwargs):\n return gca().table(\n cellText=cellText, cellColours=cellColours, cellLoc=cellLoc,\n colWidths=colWidths, rowLabels=rowLabels,\n rowColours=rowColours, rowLoc=rowLoc, colLabels=colLabels,\n colColours=colColours, colLoc=colLoc, loc=loc, bbox=bbox,\n edges=edges, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.text)\ndef text(x, y, s, fontdict=None, **kwargs):\n return gca().text(x, y, s, fontdict=fontdict, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.tick_params)\ndef tick_params(axis='both', **kwargs):\n return gca().tick_params(axis=axis, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.ticklabel_format)\ndef ticklabel_format(\n *, axis='both', style='', scilimits=None, useOffset=None,\n useLocale=None, useMathText=None):\n return gca().ticklabel_format(\n axis=axis, style=style, scilimits=scilimits,\n useOffset=useOffset, useLocale=useLocale,\n useMathText=useMathText)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.tricontour)\ndef tricontour(*args, **kwargs):\n __ret = gca().tricontour(*args, **kwargs)\n if __ret._A is not None: sci(__ret) # noqa\n return __ret\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.tricontourf)\ndef tricontourf(*args, **kwargs):\n __ret = gca().tricontourf(*args, **kwargs)\n if __ret._A is not None: sci(__ret) # noqa\n return __ret\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.tripcolor)\ndef tripcolor(\n *args, alpha=1.0, norm=None, cmap=None, vmin=None, vmax=None,\n shading='flat', facecolors=None, **kwargs):\n __ret = gca().tripcolor(\n *args, alpha=alpha, norm=norm, cmap=cmap, vmin=vmin,\n vmax=vmax, shading=shading, facecolors=facecolors, **kwargs)\n sci(__ret)\n return __ret\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.triplot)\ndef triplot(*args, **kwargs):\n return gca().triplot(*args, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.violinplot)\ndef violinplot(\n dataset, positions=None, vert=True, widths=0.5,\n showmeans=False, showextrema=True, showmedians=False,\n quantiles=None, points=100, bw_method=None, *, data=None):\n return gca().violinplot(\n dataset, positions=positions, vert=vert, widths=widths,\n showmeans=showmeans, showextrema=showextrema,\n showmedians=showmedians, quantiles=quantiles, points=points,\n bw_method=bw_method,\n **({\"data\": data} if data is not None else {}))\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.vlines)\ndef vlines(\n x, ymin, ymax, colors=None, linestyles='solid', label='', *,\n data=None, **kwargs):\n return gca().vlines(\n x, ymin, ymax, colors=colors, linestyles=linestyles,\n label=label, **({\"data\": data} if data is not None else {}),\n **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.xcorr)\ndef xcorr(\n x, y, normed=True, detrend=mlab.detrend_none, usevlines=True,\n maxlags=10, *, data=None, **kwargs):\n return gca().xcorr(\n x, y, normed=normed, detrend=detrend, usevlines=usevlines,\n maxlags=maxlags,\n **({\"data\": data} if data is not None else {}), **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes._sci)\ndef sci(im):\n return gca()._sci(im)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.set_title)\ndef title(label, fontdict=None, loc=None, pad=None, *, y=None, **kwargs):\n return gca().set_title(\n label, fontdict=fontdict, loc=loc, pad=pad, y=y, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.set_xlabel)\ndef xlabel(xlabel, fontdict=None, labelpad=None, *, loc=None, **kwargs):\n return gca().set_xlabel(\n xlabel, fontdict=fontdict, labelpad=labelpad, loc=loc,\n **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.set_ylabel)\ndef ylabel(ylabel, fontdict=None, labelpad=None, *, loc=None, **kwargs):\n return gca().set_ylabel(\n ylabel, fontdict=fontdict, labelpad=labelpad, loc=loc,\n **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.set_xscale)\ndef xscale(value, **kwargs):\n return gca().set_xscale(value, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\n@_copy_docstring_and_deprecators(Axes.set_yscale)\ndef yscale(value, **kwargs):\n return gca().set_yscale(value, **kwargs)\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.\ndef autumn(): set_cmap('autumn')\ndef bone(): set_cmap('bone')\ndef cool(): set_cmap('cool')\ndef copper(): set_cmap('copper')\ndef flag(): set_cmap('flag')\ndef gray(): set_cmap('gray')\ndef hot(): set_cmap('hot')\ndef hsv(): set_cmap('hsv')\ndef jet(): set_cmap('jet')\ndef pink(): set_cmap('pink')\ndef prism(): set_cmap('prism')\ndef spring(): set_cmap('spring')\ndef summer(): set_cmap('summer')\ndef winter(): set_cmap('winter')\ndef magma(): set_cmap('magma')\ndef inferno(): set_cmap('inferno')\ndef plasma(): set_cmap('plasma')\ndef viridis(): set_cmap('viridis')\ndef nipy_spectral(): set_cmap('nipy_spectral')\n\n\n_setup_pyplot_info_docstrings()\n", "\"\"\"\nARIMA model class.\n\nAuthor: Chad Fulton\nLicense: BSD-3\n\"\"\"\nfrom statsmodels.compat.pandas import Appender\n\nimport warnings\n\nimport numpy as np\n\nfrom statsmodels.tools.data import _is_using_pandas\nfrom statsmodels.tsa.statespace import sarimax\nfrom statsmodels.tsa.statespace.kalman_filter import MEMORY_CONSERVE\nfrom statsmodels.tsa.statespace.tools import diff\nimport statsmodels.base.wrapper as wrap\n\nfrom statsmodels.tsa.arima.estimators.yule_walker import yule_walker\nfrom statsmodels.tsa.arima.estimators.burg import burg\nfrom statsmodels.tsa.arima.estimators.hannan_rissanen import hannan_rissanen\nfrom statsmodels.tsa.arima.estimators.innovations import (\n innovations, innovations_mle)\nfrom statsmodels.tsa.arima.estimators.gls import gls as estimate_gls\n\nfrom statsmodels.tsa.arima.specification import SARIMAXSpecification\n\n\nclass ARIMA(sarimax.SARIMAX):\n \"\"\"\n Autoregressive Integrated Moving Average (ARIMA) model, and extensions\n\n This model is the basic interface for ARIMA-type models, including those\n with exogenous regressors and those with seasonal components. The most\n general form of the model is SARIMAX(p, d, q)x(P, D, Q, s). It also allows\n all specialized cases, including\n\n - autoregressive models: AR(p)\n - moving average models: MA(q)\n - mixed autoregressive moving average models: ARMA(p, q)\n - integration models: ARIMA(p, d, q)\n - seasonal models: SARIMA(P, D, Q, s)\n - regression with errors that follow one of the above ARIMA-type models\n\n Parameters\n ----------\n endog : array_like, optional\n The observed time-series process :math:`y`.\n exog : array_like, optional\n Array of exogenous regressors.\n order : tuple, optional\n The (p,d,q) order of the model for the autoregressive, differences, and\n moving average components. d is always an integer, while p and q may\n either be integers or lists of integers.\n seasonal_order : tuple, optional\n The (P,D,Q,s) order of the seasonal component of the model for the\n AR parameters, differences, MA parameters, and periodicity. Default\n is (0, 0, 0, 0). D and s are always integers, while P and Q\n may either be integers or lists of positive integers.\n trend : str{'n','c','t','ct'} or iterable, optional\n Parameter controlling the deterministic trend. Can be specified as a\n string where 'c' indicates a constant term, 't' indicates a\n linear trend in time, and 'ct' includes both. Can also be specified as\n an iterable defining a polynomial, as in `numpy.poly1d`, where\n `[1,1,0,1]` would denote :math:`a + bt + ct^3`. Default is 'c' for\n models without integration, and no trend for models with integration.\n enforce_stationarity : bool, optional\n Whether or not to require the autoregressive parameters to correspond\n to a stationarity process.\n enforce_invertibility : bool, optional\n Whether or not to require the moving average parameters to correspond\n to an invertible process.\n concentrate_scale : bool, optional\n Whether or not to concentrate the scale (variance of the error term)\n out of the likelihood. This reduces the number of parameters by one.\n This is only applicable when considering estimation by numerical\n maximum likelihood.\n trend_offset : int, optional\n The offset at which to start time trend values. Default is 1, so that\n if `trend='t'` the trend is equal to 1, 2, ..., nobs. Typically is only\n set when the model created by extending a previous dataset.\n dates : array_like of datetime, optional\n If no index is given by `endog` or `exog`, an array-like object of\n datetime objects can be provided.\n freq : str, optional\n If no index is given by `endog` or `exog`, the frequency of the\n time-series may be specified here as a Pandas offset or offset string.\n missing : str\n Available options are 'none', 'drop', and 'raise'. If 'none', no nan\n checking is done. If 'drop', any observations with nans are dropped.\n If 'raise', an error is raised. Default is 'none'.\n\n Notes\n -----\n This model incorporates both exogenous regressors and trend components\n through \"regression with ARIMA errors\".\n\n `enforce_stationarity` and `enforce_invertibility` are specified in the\n constructor because they affect loglikelihood computations, and so should\n not be changed on the fly. This is why they are not instead included as\n arguments to the `fit` method.\n\n TODO: should we use concentrate_scale=True by default?\n\n Examples\n --------\n >>> mod = sm.tsa.arima.ARIMA(endog, order=(1, 0, 0))\n >>> res = mod.fit()\n >>> print(res.summary())\n \"\"\"\n def __init__(self, endog, exog=None, order=(0, 0, 0),\n seasonal_order=(0, 0, 0, 0), trend=None,\n enforce_stationarity=True, enforce_invertibility=True,\n concentrate_scale=False, trend_offset=1, dates=None,\n freq=None, missing='none', validate_specification=True):\n # Default for trend\n # 'c' if there is no integration and 'n' otherwise\n # TODO: if trend='c', then we could alternatively use `demean=True` in\n # the estimation methods rather than setting up `exog` and using GLS.\n # Not sure if it's worth the trouble though.\n integrated = order[1] > 0 or seasonal_order[1] > 0\n if trend is None and not integrated:\n trend = 'c'\n elif trend is None:\n trend = 'n'\n\n # Construct the specification\n # (don't pass specific values of enforce stationarity/invertibility,\n # because we don't actually want to restrict the estimators based on\n # this criteria. Instead, we'll just make sure that the parameter\n # estimates from those methods satisfy the criteria.)\n self._spec_arima = SARIMAXSpecification(\n endog, exog=exog, order=order, seasonal_order=seasonal_order,\n trend=trend, enforce_stationarity=None, enforce_invertibility=None,\n concentrate_scale=concentrate_scale, trend_offset=trend_offset,\n dates=dates, freq=freq, missing=missing,\n validate_specification=validate_specification)\n exog = self._spec_arima._model.data.orig_exog\n\n # Raise an error if we have a constant in an integrated model\n\n has_trend = len(self._spec_arima.trend_terms) > 0\n if has_trend:\n lowest_trend = np.min(self._spec_arima.trend_terms)\n if lowest_trend < order[1] + seasonal_order[1]:\n raise ValueError(\n 'In models with integration (`d > 0`) or seasonal'\n ' integration (`D > 0`), trend terms of lower order than'\n ' `d + D` cannot be (as they would be eliminated due to'\n ' the differencing operation). For example, a constant'\n ' cannot be included in an ARIMA(1, 1, 1) model, but'\n ' including a linear trend, which would have the same'\n ' effect as fitting a constant to the differenced data,'\n ' is allowed.')\n\n # Keep the given `exog` by removing the prepended trend variables\n input_exog = None\n if exog is not None:\n if _is_using_pandas(exog, None):\n input_exog = exog.iloc[:, self._spec_arima.k_trend:]\n else:\n input_exog = exog[:, self._spec_arima.k_trend:]\n\n # Initialize the base SARIMAX class\n # Note: we don't pass in a trend value to the base class, since ARIMA\n # standardizes the trend to always be part of exog, while the base\n # SARIMAX class puts it in the transition equation.\n super(ARIMA, self).__init__(\n endog, exog, trend=None, order=order,\n seasonal_order=seasonal_order,\n enforce_stationarity=enforce_stationarity,\n enforce_invertibility=enforce_invertibility,\n concentrate_scale=concentrate_scale, dates=dates, freq=freq,\n missing=missing, validate_specification=validate_specification)\n self.trend = trend\n\n # Save the input exog and input exog names, so that we can refer to\n # them later (see especially `ARIMAResults.append`)\n self._input_exog = input_exog\n if exog is not None:\n self._input_exog_names = self.exog_names[self._spec_arima.k_trend:]\n else:\n self._input_exog_names = None\n\n # Override the public attributes for k_exog and k_trend to reflect the\n # distinction here (for the purpose of the superclass, these are both\n # combined as `k_exog`)\n self.k_exog = self._spec_arima.k_exog\n self.k_trend = self._spec_arima.k_trend\n\n # Remove some init kwargs that aren't used in this model\n unused = ['measurement_error', 'time_varying_regression',\n 'mle_regression', 'simple_differencing',\n 'hamilton_representation']\n self._init_keys = [key for key in self._init_keys if key not in unused]\n\n @property\n def _res_classes(self):\n return {'fit': (ARIMAResults, ARIMAResultsWrapper)}\n\n def fit(self, start_params=None, transformed=True, includes_fixed=False,\n method=None, method_kwargs=None, gls=None, gls_kwargs=None,\n cov_type=None, cov_kwds=None, return_params=False,\n low_memory=False):\n \"\"\"\n Fit (estimate) the parameters of the model.\n\n Parameters\n ----------\n start_params : array_like, optional\n Initial guess of the solution for the loglikelihood maximization.\n If None, the default is given by Model.start_params.\n transformed : bool, optional\n Whether or not `start_params` is already transformed. Default is\n True.\n includes_fixed : bool, optional\n If parameters were previously fixed with the `fix_params` method,\n this argument describes whether or not `start_params` also includes\n the fixed parameters, in addition to the free parameters. Default\n is False.\n method : str, optional\n The method used for estimating the parameters of the model. Valid\n options include 'statespace', 'innovations_mle', 'hannan_rissanen',\n 'burg', 'innovations', and 'yule_walker'. Not all options are\n available for every specification (for example 'yule_walker' can\n only be used with AR(p) models).\n method_kwargs : dict, optional\n Arguments to pass to the fit function for the parameter estimator\n described by the `method` argument.\n gls : bool, optional\n Whether or not to use generalized least squares (GLS) to estimate\n regression effects. The default is False if `method='statespace'`\n and is True otherwise.\n gls_kwargs : dict, optional\n Arguments to pass to the GLS estimation fit method. Only applicable\n if GLS estimation is used (see `gls` argument for details).\n cov_type : str, optional\n The `cov_type` keyword governs the method for calculating the\n covariance matrix of parameter estimates. Can be one of:\n\n - 'opg' for the outer product of gradient estimator\n - 'oim' for the observed information matrix estimator, calculated\n using the method of Harvey (1989)\n - 'approx' for the observed information matrix estimator,\n calculated using a numerical approximation of the Hessian matrix.\n - 'robust' for an approximate (quasi-maximum likelihood) covariance\n matrix that may be valid even in the presence of some\n misspecifications. Intermediate calculations use the 'oim'\n method.\n - 'robust_approx' is the same as 'robust' except that the\n intermediate calculations use the 'approx' method.\n - 'none' for no covariance matrix calculation.\n\n Default is 'opg' unless memory conservation is used to avoid\n computing the loglikelihood values for each observation, in which\n case the default is 'oim'.\n cov_kwds : dict or None, optional\n A dictionary of arguments affecting covariance matrix computation.\n\n **opg, oim, approx, robust, robust_approx**\n\n - 'approx_complex_step' : bool, optional - If True, numerical\n approximations are computed using complex-step methods. If False,\n numerical approximations are computed using finite difference\n methods. Default is True.\n - 'approx_centered' : bool, optional - If True, numerical\n approximations computed using finite difference methods use a\n centered approximation. Default is False.\n return_params : bool, optional\n Whether or not to return only the array of maximizing parameters.\n Default is False.\n low_memory : bool, optional\n If set to True, techniques are applied to substantially reduce\n memory usage. If used, some features of the results object will\n not be available (including smoothed results and in-sample\n prediction), although out-of-sample forecasting is possible.\n Default is False.\n\n Returns\n -------\n ARIMAResults\n\n Examples\n --------\n >>> mod = sm.tsa.arima.ARIMA(endog, order=(1, 0, 0))\n >>> res = mod.fit()\n >>> print(res.summary())\n \"\"\"\n # Determine which method to use\n # 1. If method is specified, make sure it is valid\n if method is not None:\n self._spec_arima.validate_estimator(method)\n # 2. Otherwise, use state space\n # TODO: may want to consider using innovations (MLE) if possible here,\n # (since in some cases it may be faster than state space), but it is\n # less tested.\n else:\n method = 'statespace'\n\n # Can only use fixed parameters with method='statespace'\n if self._has_fixed_params and method != 'statespace':\n raise ValueError('When parameters have been fixed, only the method'\n ' \"statespace\" can be used; got \"%s\".' % method)\n\n # Handle kwargs related to the fit method\n if method_kwargs is None:\n method_kwargs = {}\n required_kwargs = []\n if method == 'statespace':\n required_kwargs = ['enforce_stationarity', 'enforce_invertibility',\n 'concentrate_scale']\n elif method == 'innovations_mle':\n required_kwargs = ['enforce_invertibility']\n for name in required_kwargs:\n if name in method_kwargs:\n raise ValueError('Cannot override model level value for \"%s\"'\n ' when method=\"%s\".' % (name, method))\n method_kwargs[name] = getattr(self, name)\n\n # Handle kwargs related to GLS estimation\n if gls_kwargs is None:\n gls_kwargs = {}\n\n # Handle starting parameters\n # TODO: maybe should have standard way of computing starting\n # parameters in this class?\n if start_params is not None:\n if method not in ['statespace', 'innovations_mle']:\n raise ValueError('Estimation method \"%s\" does not use starting'\n ' parameters, but `start_params` argument was'\n ' given.' % method)\n\n method_kwargs['start_params'] = start_params\n method_kwargs['transformed'] = transformed\n method_kwargs['includes_fixed'] = includes_fixed\n\n # Perform estimation, depending on whether we have exog or not\n p = None\n fit_details = None\n has_exog = self._spec_arima.exog is not None\n if has_exog or method == 'statespace':\n # Use GLS if it was explicitly requested (`gls = True`) or if it\n # was left at the default (`gls = None`) and the ARMA estimator is\n # anything but statespace.\n # Note: both GLS and statespace are able to handle models with\n # integration, so we don't need to difference endog or exog here.\n if has_exog and (gls or (gls is None and method != 'statespace')):\n p, fit_details = estimate_gls(\n self.endog, exog=self.exog, order=self.order,\n seasonal_order=self.seasonal_order, include_constant=False,\n arma_estimator=method, arma_estimator_kwargs=method_kwargs,\n **gls_kwargs)\n elif method != 'statespace':\n raise ValueError('If `exog` is given and GLS is disabled'\n ' (`gls=False`), then the only valid'\n \" method is 'statespace'. Got '%s'.\"\n % method)\n else:\n method_kwargs.setdefault('disp', 0)\n\n res = super(ARIMA, self).fit(\n return_params=return_params, low_memory=low_memory,\n cov_type=cov_type, cov_kwds=cov_kwds, **method_kwargs)\n if not return_params:\n res.fit_details = res.mlefit\n else:\n # Handle differencing if we have an integrated model\n # (these methods do not support handling integration internally,\n # so we need to manually do the differencing)\n endog = self.endog\n order = self._spec_arima.order\n seasonal_order = self._spec_arima.seasonal_order\n if self._spec_arima.is_integrated:\n warnings.warn('Provided `endog` series has been differenced'\n ' to eliminate integration prior to parameter'\n ' estimation by method \"%s\".' % method)\n endog = diff(\n endog, k_diff=self._spec_arima.diff,\n k_seasonal_diff=self._spec_arima.seasonal_diff,\n seasonal_periods=self._spec_arima.seasonal_periods)\n if order[1] > 0:\n order = (order[0], 0, order[2])\n if seasonal_order[1] > 0:\n seasonal_order = (seasonal_order[0], 0, seasonal_order[2],\n seasonal_order[3])\n\n # Now, estimate parameters\n if method == 'yule_walker':\n p, fit_details = yule_walker(\n endog, ar_order=order[0], demean=False,\n **method_kwargs)\n elif method == 'burg':\n p, fit_details = burg(endog, ar_order=order[0],\n demean=False, **method_kwargs)\n elif method == 'hannan_rissanen':\n p, fit_details = hannan_rissanen(\n endog, ar_order=order[0],\n ma_order=order[2], demean=False, **method_kwargs)\n elif method == 'innovations':\n p, fit_details = innovations(\n endog, ma_order=order[2], demean=False,\n **method_kwargs)\n # innovations computes estimates through the given order, so\n # we want to take the estimate associated with the given order\n p = p[-1]\n elif method == 'innovations_mle':\n p, fit_details = innovations_mle(\n endog, order=order,\n seasonal_order=seasonal_order,\n demean=False, **method_kwargs)\n\n # In all cases except method='statespace', we now need to extract the\n # parameters and, optionally, create a new results object\n if p is not None:\n # Need to check that fitted parameters satisfy given restrictions\n if (self.enforce_stationarity\n and self._spec_arima.max_reduced_ar_order > 0\n and not p.is_stationary):\n raise ValueError('Non-stationary autoregressive parameters'\n ' found with `enforce_stationarity=True`.'\n ' Consider setting it to False or using a'\n ' different estimation method, such as'\n ' method=\"statespace\".')\n\n if (self.enforce_invertibility\n and self._spec_arima.max_reduced_ma_order > 0\n and not p.is_invertible):\n raise ValueError('Non-invertible moving average parameters'\n ' found with `enforce_invertibility=True`.'\n ' Consider setting it to False or using a'\n ' different estimation method, such as'\n ' method=\"statespace\".')\n\n # Build the requested results\n if return_params:\n res = p.params\n else:\n # Handle memory conservation option\n if low_memory:\n conserve_memory = self.ssm.conserve_memory\n self.ssm.set_conserve_memory(MEMORY_CONSERVE)\n\n # Perform filtering / smoothing\n if (self.ssm.memory_no_predicted or self.ssm.memory_no_gain\n or self.ssm.memory_no_smoothing):\n func = self.filter\n else:\n func = self.smooth\n res = func(p.params, transformed=True, includes_fixed=True,\n cov_type=cov_type, cov_kwds=cov_kwds)\n\n # Save any details from the fit method\n res.fit_details = fit_details\n\n # Reset memory conservation\n if low_memory:\n self.ssm.set_conserve_memory(conserve_memory)\n\n return res\n\n\n@Appender(sarimax.SARIMAXResults.__doc__)\nclass ARIMAResults(sarimax.SARIMAXResults):\n\n @Appender(sarimax.SARIMAXResults.append.__doc__)\n def append(self, endog, exog=None, refit=False, fit_kwargs=None, **kwargs):\n # MLEResults.append will concatenate the given `exog` here with\n # `data.orig_exog`. However, `data.orig_exog` already has had any\n # trend variables prepended to it, while the `exog` given here should\n # not. Instead, we need to temporarily replace `orig_exog` and\n # `exog_names` with the ones that correspond to those that were input\n # by the user.\n if exog is not None:\n orig_exog = self.model.data.orig_exog\n exog_names = self.model.exog_names\n self.model.data.orig_exog = self.model._input_exog\n self.model.exog_names = self.model._input_exog_names\n\n # Perform the appending procedure\n out = super().append(endog, exog=exog, refit=refit,\n fit_kwargs=fit_kwargs, **kwargs)\n\n # Now we reverse the temporary change made above\n if exog is not None:\n self.model.data.orig_exog = orig_exog\n self.model.exog_names = exog_names\n return out\n\n\nclass ARIMAResultsWrapper(sarimax.SARIMAXResultsWrapper):\n _attrs = {}\n _wrap_attrs = wrap.union_dicts(\n sarimax.SARIMAXResultsWrapper._wrap_attrs, _attrs)\n _methods = {}\n _wrap_methods = wrap.union_dicts(\n sarimax.SARIMAXResultsWrapper._wrap_methods, _methods)\nwrap.populate_wrapper(ARIMAResultsWrapper, ARIMAResults) # noqa:E305\n", "import platform\n\nfrom distutils.unixccompiler import UnixCCompiler\nfrom numpy.distutils.exec_command import find_executable\nfrom numpy.distutils.ccompiler import simple_version_match\nif platform.system() == 'Windows':\n from numpy.distutils.msvc9compiler import MSVCCompiler\n\n\nclass IntelCCompiler(UnixCCompiler):\n \"\"\"A modified Intel compiler compatible with a GCC-built Python.\"\"\"\n compiler_type = 'intel'\n cc_exe = 'icc'\n cc_args = 'fPIC'\n\n def __init__(self, verbose=0, dry_run=0, force=0):\n UnixCCompiler.__init__(self, verbose, dry_run, force)\n\n v = self.get_version()\n mpopt = 'openmp' if v and v < '15' else 'qopenmp'\n self.cc_exe = ('icc -fPIC -fp-model strict -O3 '\n '-fomit-frame-pointer -{}').format(mpopt)\n compiler = self.cc_exe\n\n if platform.system() == 'Darwin':\n shared_flag = '-Wl,-undefined,dynamic_lookup'\n else:\n shared_flag = '-shared'\n self.set_executables(compiler=compiler,\n compiler_so=compiler,\n compiler_cxx=compiler,\n archiver='xiar' + ' cru',\n linker_exe=compiler + ' -shared-intel',\n linker_so=compiler + ' ' + shared_flag +\n ' -shared-intel')\n\n\nclass IntelItaniumCCompiler(IntelCCompiler):\n compiler_type = 'intele'\n\n # On Itanium, the Intel Compiler used to be called ecc, let's search for\n # it (now it's also icc, so ecc is last in the search).\n for cc_exe in map(find_executable, ['icc', 'ecc']):\n if cc_exe:\n break\n\n\nclass IntelEM64TCCompiler(UnixCCompiler):\n \"\"\"\n A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python.\n \"\"\"\n compiler_type = 'intelem'\n cc_exe = 'icc -m64'\n cc_args = '-fPIC'\n\n def __init__(self, verbose=0, dry_run=0, force=0):\n UnixCCompiler.__init__(self, verbose, dry_run, force)\n\n v = self.get_version()\n mpopt = 'openmp' if v and v < '15' else 'qopenmp'\n self.cc_exe = ('icc -m64 -fPIC -fp-model strict -O3 '\n '-fomit-frame-pointer -{}').format(mpopt)\n compiler = self.cc_exe\n\n if platform.system() == 'Darwin':\n shared_flag = '-Wl,-undefined,dynamic_lookup'\n else:\n shared_flag = '-shared'\n self.set_executables(compiler=compiler,\n compiler_so=compiler,\n compiler_cxx=compiler,\n archiver='xiar' + ' cru',\n linker_exe=compiler + ' -shared-intel',\n linker_so=compiler + ' ' + shared_flag +\n ' -shared-intel')\n\n\nif platform.system() == 'Windows':\n class IntelCCompilerW(MSVCCompiler):\n \"\"\"\n A modified Intel compiler compatible with an MSVC-built Python.\n \"\"\"\n compiler_type = 'intelw'\n compiler_cxx = 'icl'\n\n def __init__(self, verbose=0, dry_run=0, force=0):\n MSVCCompiler.__init__(self, verbose, dry_run, force)\n version_match = simple_version_match(start=r'Intel\\(R\\).*?32,')\n self.__version = version_match\n\n def initialize(self, plat_name=None):\n MSVCCompiler.initialize(self, plat_name)\n self.cc = self.find_exe('icl.exe')\n self.lib = self.find_exe('xilib')\n self.linker = self.find_exe('xilink')\n self.compile_options = ['/nologo', '/O3', '/MD', '/W3',\n '/Qstd=c99']\n self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',\n '/Qstd=c99', '/Z7', '/D_DEBUG']\n\n class IntelEM64TCCompilerW(IntelCCompilerW):\n \"\"\"\n A modified Intel x86_64 compiler compatible with\n a 64bit MSVC-built Python.\n \"\"\"\n compiler_type = 'intelemw'\n\n def __init__(self, verbose=0, dry_run=0, force=0):\n MSVCCompiler.__init__(self, verbose, dry_run, force)\n version_match = simple_version_match(start=r'Intel\\(R\\).*?64,')\n self.__version = version_match\n", "# -*- coding: utf-8 -*-\n\"\"\"\n\nCreated on Fri Mar 01 14:56:56 2013\n\nAuthor: Josef Perktold\n\"\"\"\nimport warnings\n\nimport pytest\nimport numpy as np\nimport pandas as pd\nfrom numpy.testing import (assert_almost_equal, assert_equal,\n assert_array_less, assert_raises, assert_allclose)\n\nfrom statsmodels.stats.proportion import (proportion_confint,\n confint_proportions_2indep,\n multinomial_proportions_confint,\n score_test_proportions_2indep,\n power_proportions_2indep,\n samplesize_proportions_2indep_onetail,\n )\nimport statsmodels.stats.proportion as smprop\nfrom statsmodels.tools.sm_exceptions import HypothesisTestWarning\nfrom statsmodels.tools.testing import Holder\n\n\nprobci_methods = {'agresti_coull': 'agresti-coull',\n 'normal': 'asymptotic',\n 'beta': 'exact',\n 'wilson': 'wilson',\n 'jeffreys': 'bayes'\n }\n\n\ndef test_confint_proportion():\n from .results.results_proportion import res_binom, res_binom_methods\n\n\n for case in res_binom:\n count, nobs = case\n for method in probci_methods:\n idx = res_binom_methods.index(probci_methods[method])\n res_low = res_binom[case].ci_low[idx]\n res_upp = res_binom[case].ci_upp[idx]\n if np.isnan(res_low) or np.isnan(res_upp):\n continue\n if (count == 0 or count == nobs) and method == 'jeffreys':\n # maybe a bug or different corner case definition\n continue\n if method == 'jeffreys' and nobs == 30:\n # something is strange in extreme case e.g 0/30 or 1/30\n continue\n ci = proportion_confint(count, nobs, alpha=0.05, method=method)\n # we impose that confint is in [0, 1]\n res_low = max(res_low, 0)\n res_upp = min(res_upp, 1)\n assert_almost_equal(ci, [res_low, res_upp], decimal=6,\n err_msg=repr(case) + method)\n\n\[email protected]('method', probci_methods)\ndef test_confint_proportion_ndim(method):\n # check that it works with 1-D, 2-D and pandas\n\n count = np.arange(6).reshape(2, 3)\n nobs = 10 * np.ones((2, 3))\n\n count_pd = pd.DataFrame(count)\n nobs_pd = pd.DataFrame(nobs)\n\n ci_arr = proportion_confint(count, nobs, alpha=0.05, method=method)\n ci_pd = proportion_confint(count_pd, nobs_pd, alpha=0.05,\n method=method)\n assert_allclose(ci_arr, (ci_pd[0].values, ci_pd[1].values), rtol=1e-13)\n # spot checking one value\n ci12 = proportion_confint(count[1, 2], nobs[1, 2], alpha=0.05,\n method=method)\n assert_allclose((ci_pd[0].values[1, 2], ci_pd[1].values[1, 2]), ci12,\n rtol=1e-13)\n assert_allclose((ci_arr[0][1, 2], ci_arr[1][1, 2]), ci12, rtol=1e-13)\n\n # check that lists work as input\n ci_li = proportion_confint(count.tolist(), nobs.tolist(), alpha=0.05,\n method=method)\n assert_allclose(ci_arr, (ci_li[0], ci_li[1]), rtol=1e-13)\n\n # check pandas Series, 1-D\n ci_pds = proportion_confint(count_pd.iloc[0], nobs_pd.iloc[0],\n alpha=0.05, method=method)\n assert_allclose((ci_pds[0].values, ci_pds[1].values),\n (ci_pd[0].values[0], ci_pd[1].values[0]), rtol=1e-13)\n\n # check scalar nobs, verifying one value\n ci_arr2 = proportion_confint(count, nobs[1, 2], alpha=0.05,\n method=method)\n assert_allclose((ci_arr2[0][1, 2], ci_arr[1][1, 2]), ci12, rtol=1e-13)\n\n\ndef test_samplesize_confidenceinterval_prop():\n #consistency test for samplesize to achieve confidence_interval\n nobs = 20\n ci = smprop.proportion_confint(12, nobs, alpha=0.05, method='normal')\n res = smprop.samplesize_confint_proportion(12./nobs, (ci[1] - ci[0]) / 2)\n assert_almost_equal(res, nobs, decimal=13)\n\ndef test_proportion_effect_size():\n # example from blog\n es = smprop.proportion_effectsize(0.5, 0.4)\n assert_almost_equal(es, 0.2013579207903309, decimal=13)\n\ndef test_confint_multinomial_proportions():\n from .results.results_multinomial_proportions import res_multinomial\n\n for ((method, description), values) in res_multinomial.items():\n cis = multinomial_proportions_confint(values.proportions, 0.05,\n method=method)\n assert_almost_equal(\n values.cis, cis, decimal=values.precision,\n err_msg='\"%s\" method, %s' % (method, description))\n\ndef test_multinomial_proportions_errors():\n # Out-of-bounds values for alpha raise a ValueError\n for alpha in [-.1, 0, 1, 1.1]:\n assert_raises(ValueError, multinomial_proportions_confint,\n [5] * 50, alpha=alpha)\n\n assert_raises(ValueError, multinomial_proportions_confint,\n np.arange(50) - 1)\n # Any unknown method is reported.\n for method in ['unknown_method', 'sisok_method', 'unknown-glaz']:\n assert_raises(NotImplementedError, multinomial_proportions_confint,\n [5] * 50, method=method)\n\ndef test_confint_multinomial_proportions_zeros():\n # test when a count is zero or close to zero\n # values from R MultinomialCI\n ci01 = np.array([\n 0.09364718, 0.1898413,\n 0.00000000, 0.0483581,\n 0.13667426, 0.2328684,\n 0.10124019, 0.1974343,\n 0.10883321, 0.2050273,\n 0.17210833, 0.2683024,\n 0.09870919, 0.1949033]).reshape(-1,2)\n\n ci0 = np.array([\n 0.09620253, 0.19238867,\n 0.00000000, 0.05061652,\n 0.13924051, 0.23542664,\n 0.10379747, 0.19998360,\n 0.11139241, 0.20757854,\n 0.17468354, 0.27086968,\n 0.10126582, 0.19745196]).reshape(-1,2)\n\n # the shifts are the differences between \"LOWER(SG)\" \"UPPER(SG)\" and\n # \"LOWER(C+1)\" \"UPPER(C+1)\" in verbose printout\n # ci01_shift = np.array([0.002531008, -0.002515122]) # not needed\n ci0_shift = np.array([0.002531642, 0.002515247])\n\n p = [56, 0.1, 73, 59, 62, 87, 58]\n ci_01 = smprop.multinomial_proportions_confint(p, 0.05,\n method='sison_glaz')\n p = [56, 0, 73, 59, 62, 87, 58]\n ci_0 = smprop.multinomial_proportions_confint(p, 0.05,\n method='sison_glaz')\n\n assert_allclose(ci_01, ci01, atol=1e-5)\n assert_allclose(ci_0, np.maximum(ci0 - ci0_shift, 0), atol=1e-5)\n assert_allclose(ci_01, ci_0, atol=5e-4)\n\n\nclass CheckProportionMixin(object):\n def test_proptest(self):\n # equality of k-samples\n pt = smprop.proportions_chisquare(self.n_success, self.nobs, value=None)\n assert_almost_equal(pt[0], self.res_prop_test.statistic, decimal=13)\n assert_almost_equal(pt[1], self.res_prop_test.p_value, decimal=13)\n\n # several against value\n pt = smprop.proportions_chisquare(self.n_success, self.nobs,\n value=self.res_prop_test_val.null_value[0])\n assert_almost_equal(pt[0], self.res_prop_test_val.statistic, decimal=13)\n assert_almost_equal(pt[1], self.res_prop_test_val.p_value, decimal=13)\n\n # one proportion against value\n pt = smprop.proportions_chisquare(self.n_success[0], self.nobs[0],\n value=self.res_prop_test_1.null_value)\n assert_almost_equal(pt[0], self.res_prop_test_1.statistic, decimal=13)\n assert_almost_equal(pt[1], self.res_prop_test_1.p_value, decimal=13)\n\n def test_pairwiseproptest(self):\n ppt = smprop.proportions_chisquare_allpairs(self.n_success, self.nobs,\n multitest_method=None)\n assert_almost_equal(ppt.pvals_raw, self.res_ppt_pvals_raw)\n ppt = smprop.proportions_chisquare_allpairs(self.n_success, self.nobs,\n multitest_method='h')\n assert_almost_equal(ppt.pval_corrected(), self.res_ppt_pvals_holm)\n\n pptd = smprop.proportions_chisquare_pairscontrol(self.n_success,\n self.nobs, multitest_method='hommel')\n assert_almost_equal(pptd.pvals_raw, ppt.pvals_raw[:len(self.nobs) - 1],\n decimal=13)\n\n\n def test_number_pairs_1493(self):\n ppt = smprop.proportions_chisquare_allpairs(self.n_success[:3],\n self.nobs[:3],\n multitest_method=None)\n\n assert_equal(len(ppt.pvals_raw), 3)\n idx = [0, 1, 3]\n assert_almost_equal(ppt.pvals_raw, self.res_ppt_pvals_raw[idx])\n\n\nclass TestProportion(CheckProportionMixin):\n def setup(self):\n self.n_success = np.array([ 73, 90, 114, 75])\n self.nobs = np.array([ 86, 93, 136, 82])\n\n self.res_ppt_pvals_raw = np.array([\n 0.00533824886503131, 0.8327574849753566, 0.1880573726722516,\n 0.002026764254350234, 0.1309487516334318, 0.1076118730631731\n ])\n self.res_ppt_pvals_holm = np.array([\n 0.02669124432515654, 0.8327574849753566, 0.4304474922526926,\n 0.0121605855261014, 0.4304474922526926, 0.4304474922526926\n ])\n\n res_prop_test = Holder()\n res_prop_test.statistic = 11.11938768628861\n res_prop_test.parameter = 3\n res_prop_test.p_value = 0.011097511366581344\n res_prop_test.estimate = np.array([\n 0.848837209302326, 0.967741935483871, 0.838235294117647,\n 0.9146341463414634\n ]).reshape(4,1, order='F')\n res_prop_test.null_value = '''NULL'''\n res_prop_test.conf_int = '''NULL'''\n res_prop_test.alternative = 'two.sided'\n res_prop_test.method = '4-sample test for equality of proportions ' + \\\n 'without continuity correction'\n res_prop_test.data_name = 'smokers2 out of patients'\n self.res_prop_test = res_prop_test\n\n #> pt = prop.test(smokers2, patients, p=rep(c(0.9), 4), correct=FALSE)\n #> cat_items(pt, \"res_prop_test_val.\")\n res_prop_test_val = Holder()\n res_prop_test_val.statistic = np.array([\n 13.20305530710751\n ]).reshape(1,1, order='F')\n res_prop_test_val.parameter = np.array([\n 4\n ]).reshape(1,1, order='F')\n res_prop_test_val.p_value = 0.010325090041836\n res_prop_test_val.estimate = np.array([\n 0.848837209302326, 0.967741935483871, 0.838235294117647,\n 0.9146341463414634\n ]).reshape(4,1, order='F')\n res_prop_test_val.null_value = np.array([\n 0.9, 0.9, 0.9, 0.9\n ]).reshape(4,1, order='F')\n res_prop_test_val.conf_int = '''NULL'''\n res_prop_test_val.alternative = 'two.sided'\n res_prop_test_val.method = '4-sample test for given proportions without continuity correction'\n res_prop_test_val.data_name = 'smokers2 out of patients, null probabilities rep(c(0.9), 4)'\n self.res_prop_test_val = res_prop_test_val\n\n #> pt = prop.test(smokers2[1], patients[1], p=0.9, correct=FALSE)\n #> cat_items(pt, \"res_prop_test_1.\")\n res_prop_test_1 = Holder()\n res_prop_test_1.statistic = 2.501291989664086\n res_prop_test_1.parameter = 1\n res_prop_test_1.p_value = 0.113752943640092\n res_prop_test_1.estimate = 0.848837209302326\n res_prop_test_1.null_value = 0.9\n res_prop_test_1.conf_int = np.array([0.758364348004061,\n 0.9094787701686766])\n res_prop_test_1.alternative = 'two.sided'\n res_prop_test_1.method = '1-sample proportions test without continuity correction'\n res_prop_test_1.data_name = 'smokers2[1] out of patients[1], null probability 0.9'\n self.res_prop_test_1 = res_prop_test_1\n\n # GH 2969\n def test_default_values(self):\n count = np.array([5, 12])\n nobs = np.array([83, 99])\n stat, pval = smprop.proportions_ztest(count, nobs, value=None)\n assert_almost_equal(stat, -1.4078304151258787)\n assert_almost_equal(pval, 0.15918129181156992)\n\n # GH 2779\n def test_scalar(self):\n count = 5\n nobs = 83\n value = 0.05\n stat, pval = smprop.proportions_ztest(count, nobs, value=value)\n assert_almost_equal(stat, 0.392126026314)\n assert_almost_equal(pval, 0.694965098115)\n\n assert_raises(ValueError, smprop.proportions_ztest, count, nobs, value=None)\n\n\ndef test_binom_test():\n #> bt = binom.test(51,235,(1/6),alternative=\"less\")\n #> cat_items(bt, \"binom_test_less.\")\n binom_test_less = Holder()\n binom_test_less.statistic = 51\n binom_test_less.parameter = 235\n binom_test_less.p_value = 0.982022657605858\n binom_test_less.conf_int = [0, 0.2659460862574313]\n binom_test_less.estimate = 0.2170212765957447\n binom_test_less.null_value = 1. / 6\n binom_test_less.alternative = 'less'\n binom_test_less.method = 'Exact binomial test'\n binom_test_less.data_name = '51 and 235'\n\n #> bt = binom.test(51,235,(1/6),alternative=\"greater\")\n #> cat_items(bt, \"binom_test_greater.\")\n binom_test_greater = Holder()\n binom_test_greater.statistic = 51\n binom_test_greater.parameter = 235\n binom_test_greater.p_value = 0.02654424571169085\n binom_test_greater.conf_int = [0.1735252778065201, 1]\n binom_test_greater.estimate = 0.2170212765957447\n binom_test_greater.null_value = 1. / 6\n binom_test_greater.alternative = 'greater'\n binom_test_greater.method = 'Exact binomial test'\n binom_test_greater.data_name = '51 and 235'\n\n #> bt = binom.test(51,235,(1/6),alternative=\"t\")\n #> cat_items(bt, \"binom_test_2sided.\")\n binom_test_2sided = Holder()\n binom_test_2sided.statistic = 51\n binom_test_2sided.parameter = 235\n binom_test_2sided.p_value = 0.0437479701823997\n binom_test_2sided.conf_int = [0.1660633298083073, 0.2752683640289254]\n binom_test_2sided.estimate = 0.2170212765957447\n binom_test_2sided.null_value = 1. / 6\n binom_test_2sided.alternative = 'two.sided'\n binom_test_2sided.method = 'Exact binomial test'\n binom_test_2sided.data_name = '51 and 235'\n\n alltests = [('larger', binom_test_greater),\n ('smaller', binom_test_less),\n ('two-sided', binom_test_2sided)]\n\n for alt, res0 in alltests:\n # only p-value is returned\n res = smprop.binom_test(51, 235, prop=1. / 6, alternative=alt)\n #assert_almost_equal(res[0], res0.statistic)\n assert_almost_equal(res, res0.p_value, decimal=13)\n\n # R binom_test returns Copper-Pearson confint\n ci_2s = smprop.proportion_confint(51, 235, alpha=0.05, method='beta')\n ci_low, ci_upp = smprop.proportion_confint(51, 235, alpha=0.1,\n method='beta')\n assert_almost_equal(ci_2s, binom_test_2sided.conf_int, decimal=13)\n assert_almost_equal(ci_upp, binom_test_less.conf_int[1], decimal=13)\n assert_almost_equal(ci_low, binom_test_greater.conf_int[0], decimal=13)\n\n\ndef test_binom_rejection_interval():\n # consistency check with binom_test\n # some code duplication but limit checks are different\n alpha = 0.05\n nobs = 200\n prop = 12./20\n alternative='smaller'\n ci_low, ci_upp = smprop.binom_test_reject_interval(prop, nobs, alpha=alpha,\n alternative=alternative)\n assert_equal(ci_upp, nobs)\n pval = smprop.binom_test(ci_low, nobs, prop=prop,\n alternative=alternative)\n assert_array_less(pval, alpha)\n pval = smprop.binom_test(ci_low + 1, nobs, prop=prop,\n alternative=alternative)\n assert_array_less(alpha, pval)\n\n alternative='larger'\n ci_low, ci_upp = smprop.binom_test_reject_interval(prop, nobs, alpha=alpha,\n alternative=alternative)\n assert_equal(ci_low, 0)\n pval = smprop.binom_test(ci_upp, nobs, prop=prop,\n alternative=alternative)\n assert_array_less(pval, alpha)\n pval = smprop.binom_test(ci_upp - 1, nobs, prop=prop,\n alternative=alternative)\n assert_array_less(alpha, pval)\n\n alternative='two-sided'\n ci_low, ci_upp = smprop.binom_test_reject_interval(prop, nobs, alpha=alpha,\n alternative=alternative)\n pval = smprop.binom_test(ci_upp, nobs, prop=prop,\n alternative=alternative)\n assert_array_less(pval, alpha)\n pval = smprop.binom_test(ci_upp - 1, nobs, prop=prop,\n alternative=alternative)\n assert_array_less(alpha, pval)\n pval = smprop.binom_test(ci_upp, nobs, prop=prop,\n alternative=alternative)\n assert_array_less(pval, alpha)\n\n pval = smprop.binom_test(ci_upp - 1, nobs, prop=prop,\n alternative=alternative)\n assert_array_less(alpha, pval)\n\n\n\ndef test_binom_tost():\n # consistency check with two different implementation,\n # proportion_confint is tested against R\n # no reference case from other package available\n ci = smprop.proportion_confint(10, 20, method='beta', alpha=0.1)\n bt = smprop.binom_tost(10, 20, *ci)\n assert_almost_equal(bt, [0.05] * 3, decimal=12)\n\n ci = smprop.proportion_confint(5, 20, method='beta', alpha=0.1)\n bt = smprop.binom_tost(5, 20, *ci)\n assert_almost_equal(bt, [0.05] * 3, decimal=12)\n\n # vectorized, TODO: observed proportion = 0 returns nan\n ci = smprop.proportion_confint(np.arange(1, 20), 20, method='beta',\n alpha=0.05)\n bt = smprop.binom_tost(np.arange(1, 20), 20, *ci)\n bt = np.asarray(bt)\n assert_almost_equal(bt, 0.025 * np.ones(bt.shape), decimal=12)\n\ndef test_power_binom_tost():\n # comparison numbers from PASS manual\n p_alt = 0.6 + np.linspace(0, 0.09, 10)\n power = smprop.power_binom_tost(0.5, 0.7, 500, p_alt=p_alt, alpha=0.05)\n res_power = np.array([0.9965, 0.9940, 0.9815, 0.9482, 0.8783, 0.7583,\n 0.5914, 0.4041, 0.2352, 0.1139])\n assert_almost_equal(power, res_power, decimal=4)\n\n rej_int = smprop.binom_tost_reject_interval(0.5, 0.7, 500)\n res_rej_int = (269, 332)\n assert_equal(rej_int, res_rej_int)\n\n # TODO: actual alpha=0.0489 for all p_alt above\n\n # another case\n nobs = np.arange(20, 210, 20)\n power = smprop.power_binom_tost(0.4, 0.6, nobs, p_alt=0.5, alpha=0.05)\n res_power = np.array([ 0., 0., 0., 0.0889, 0.2356, 0.3517, 0.4457,\n 0.6154, 0.6674, 0.7708])\n # TODO: I currently do not impose power>=0, i.e np.maximum(power, 0)\n assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)\n\ndef test_power_ztost_prop():\n power = smprop.power_ztost_prop(0.1, 0.9, 10, p_alt=0.6, alpha=0.05,\n discrete=True, dist='binom')[0]\n assert_almost_equal(power, 0.8204, decimal=4) # PASS example\n\n with warnings.catch_warnings(): # python >= 2.6\n warnings.simplefilter(\"ignore\", HypothesisTestWarning)\n power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),\n p_alt=0.5, alpha=0.05, discrete=False,\n dist='binom')[0]\n\n res_power = np.array([ 0., 0., 0., 0.0889, 0.2356, 0.4770, 0.5530,\n 0.6154, 0.7365, 0.7708])\n # TODO: I currently do not impose power>=0, i.e np.maximum(power, 0)\n assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)\n\n # with critval_continuity correction\n power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),\n p_alt=0.5, alpha=0.05, discrete=False,\n dist='binom', variance_prop=None,\n continuity=2, critval_continuity=1)[0]\n\n res_power = np.array([0., 0., 0., 0.0889, 0.2356, 0.3517, 0.4457,\n 0.6154, 0.6674, 0.7708])\n # TODO: I currently do not impose power>=0, i.e np.maximum(power, 0)\n assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)\n\n power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),\n p_alt=0.5, alpha=0.05, discrete=False,\n dist='binom', variance_prop=0.5,\n critval_continuity=1)[0]\n\n res_power = np.array([0., 0., 0., 0.0889, 0.2356, 0.3517, 0.4457,\n 0.6154, 0.6674, 0.7112])\n # TODO: I currently do not impose power>=0, i.e np.maximum(power, 0)\n assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)\n\n\ndef test_ztost():\n xfair = np.repeat([1, 0], [228, 762-228])\n\n # comparing to SAS last output at\n # http://support.sas.com/documentation/cdl/en/procstat/63104/HTML/default/viewer.htm#procstat_freq_sect028.htm\n # confidence interval for tost\n # generic ztost is moved to weightstats\n from statsmodels.stats.weightstats import zconfint, ztost\n ci01 = zconfint(xfair, alpha=0.1, ddof=0)\n assert_almost_equal(ci01, [0.2719, 0.3265], 4)\n res = ztost(xfair, 0.18, 0.38, ddof=0)\n\n assert_almost_equal(res[1][0], 7.1865, 4)\n assert_almost_equal(res[2][0], -4.8701, 4)\n assert_array_less(res[0], 0.0001)\n\n\ndef test_power_ztost_prop_norm():\n # regression test for normal distribution\n # from a rough comparison, the results and variations look reasonable\n with pytest.warns(HypothesisTestWarning):\n power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),\n p_alt=0.5, alpha=0.05, discrete=False,\n dist='norm', variance_prop=0.5,\n continuity=0, critval_continuity=0)[0]\n\n res_power = np.array([0., 0., 0., 0.11450013, 0.27752006, 0.41495922,\n 0.52944621, 0.62382638, 0.70092914, 0.76341806])\n # TODO: I currently do not impose power>=0, i.e np.maximum(power, 0)\n assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)\n\n # regression test for normal distribution\n with pytest.warns(HypothesisTestWarning):\n power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),\n p_alt=0.5, alpha=0.05, discrete=False,\n dist='norm', variance_prop=0.5,\n continuity=1, critval_continuity=0)[0]\n\n res_power = np.array([0., 0., 0.02667562, 0.20189793, 0.35099606,\n 0.47608598, 0.57981118, 0.66496683, 0.73427591,\n 0.79026127])\n # TODO: I currently do not impose power>=0, i.e np.maximum(power, 0)\n assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)\n\n # regression test for normal distribution\n with pytest.warns(HypothesisTestWarning):\n power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),\n p_alt=0.5, alpha=0.05, discrete=True,\n dist='norm', variance_prop=0.5,\n continuity=1, critval_continuity=0)[0]\n\n res_power = np.array([0., 0., 0., 0.08902071, 0.23582284, 0.35192313,\n 0.55312718, 0.61549537, 0.66743625, 0.77066806])\n # TODO: I currently do not impose power>=0, i.e np.maximum(power, 0)\n assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)\n\n # regression test for normal distribution\n with pytest.warns(HypothesisTestWarning):\n power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),\n p_alt=0.5, alpha=0.05, discrete=True,\n dist='norm', variance_prop=0.5,\n continuity=1, critval_continuity=1)[0]\n\n res_power = np.array([0., 0., 0., 0.08902071, 0.23582284, 0.35192313,\n 0.44588687, 0.61549537, 0.66743625, 0.71115563])\n # TODO: I currently do not impose power>=0, i.e np.maximum(power, 0)\n assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)\n\n # regression test for normal distribution\n with pytest.warns(HypothesisTestWarning):\n power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),\n p_alt=0.5, alpha=0.05, discrete=True,\n dist='norm', variance_prop=None,\n continuity=0, critval_continuity=0)[0]\n\n res_power = np.array([0., 0., 0., 0., 0.15851942, 0.41611758,\n 0.5010377, 0.5708047, 0.70328247, 0.74210096])\n # TODO: I currently do not impose power>=0, i.e np.maximum(power, 0)\n assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)\n\n\ndef test_proportion_ztests():\n # currently only consistency test with proportions chisquare\n # Note: alternative handling is generic\n\n res1 = smprop.proportions_ztest(15, 20., value=0.5, prop_var=0.5)\n res2 = smprop.proportions_chisquare(15, 20., value=0.5)\n assert_almost_equal(res1[1], res2[1], decimal=13)\n\n res1 = smprop.proportions_ztest(np.asarray([15, 10]),\n np.asarray([20., 20]),\n value=0, prop_var=None)\n res2 = smprop.proportions_chisquare(np.asarray([15, 10]),\n np.asarray([20., 20]))\n # test only p-value\n assert_almost_equal(res1[1], res2[1], decimal=13)\n\n\ndef test_confint_2indep():\n # alpha = 0.05\n count1, nobs1 = 7, 34\n count2, nobs2 = 1, 34\n\n # result tables from Fagerland et al 2015\n '''\n diff:\n Wald 0.029 0.32 0.29\n Agresti–Caffo 0.012 0.32 0.31\n Newcombe hybrid score 0.019 0.34 0.32\n Miettinen–Nurminen asymptotic score 0.028 0.34 0.31\n Santner–Snell exact unconditional -0.069 0.41 0.48\n Chan–Zhang exact unconditional 0.019 0.36 0.34\n Agresti–Min exact unconditional 0.024 0.35 0.33\n\n ratio:\n Katz log 0.91 54 4.08\n Adjusted log 0.92 27 3.38\n Inverse sinh 1.17 42 3.58\n Koopman asymptotic score 1.21 43 3.57\n Chan–Zhang 1.22 181 5.00\n Agresti–Min 1.15 89 4.35\n\n odds-ratio\n Woolf logit 0.99 74 4.31\n Gart adjusted logit 0.98 38 3.65\n Independence-smoothed logit 0.99 60 4.11\n Cornfield exact conditional 0.97 397 6.01\n Cornfield mid-p 1.19 200 5.12\n Baptista–Pike exact conditional 1.00 195 5.28\n Baptista–Pike mid-p 1.33 99 4.31\n Agresti–Min exact unconditional 1.19 72 4.10\n ''' # pylint: disable=W0105\n ci = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n method='newcomb',\n compare='diff', alpha=0.05)\n # one decimal to upp added from regression result\n assert_allclose(ci, [0.019, 0.340], atol=0.005)\n ci = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n method='wald',\n compare='diff', alpha=0.05)\n assert_allclose(ci, [0.029, 0.324], atol=0.005)\n ci = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n method='agresti-caffo',\n compare='diff', alpha=0.05)\n assert_allclose(ci, [0.012, 0.322], atol=0.005)\n ci = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n compare='diff',\n method='score', correction=True)\n assert_allclose(ci, [0.028, 0.343], rtol=0.03)\n\n # ratio\n ci = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n compare='ratio',\n method='log')\n assert_allclose(ci, [0.91, 54], rtol=0.01)\n ci = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n compare='ratio',\n method='log-adjusted')\n assert_allclose(ci, [0.92, 27], rtol=0.01)\n ci = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n compare='ratio',\n method='score', correction=False)\n assert_allclose(ci, [1.21, 43], rtol=0.01)\n\n # odds-ratio\n ci = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n compare='or',\n method='logit')\n assert_allclose(ci, [0.99, 74], rtol=0.01)\n ci = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n compare='or',\n method='logit-adjusted')\n assert_allclose(ci, [0.98, 38], rtol=0.01)\n ci = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n compare='or',\n method='logit-smoothed')\n assert_allclose(ci, [0.99, 60], rtol=0.01)\n ci = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n compare='odds-ratio',\n method='score', correction=True)\n # regression test\n assert_allclose(ci, [1.246622, 56.461576], rtol=0.01)\n\n\ndef test_confint_2indep_propcis():\n # unit tests compared to R package PropCis\n # alpha = 0.05\n count1, nobs1 = 7, 34\n count2, nobs2 = 1, 34\n\n # > library(PropCIs)\n # > diffscoreci(7, 34, 1, 34, 0.95)\n ci = 0.0270416, 0.3452912\n ci1 = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n compare=\"diff\",\n method=\"score\", correction=False)\n assert_allclose(ci1, ci, atol=0.002) # lower agreement (iterative)\n # > wald2ci(7, 34, 1, 34, 0.95, adjust=\"AC\")\n ci = 0.01161167, 0.32172166\n ci1 = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n compare=\"diff\",\n method=\"agresti-caffo\")\n assert_allclose(ci1, ci, atol=6e-7)\n # > wald2ci(7, 34, 1, 34, 0.95, adjust=\"Wald\")\n ci = 0.02916942, 0.32377176\n ci1 = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n compare=\"diff\",\n method=\"wald\", correction=False)\n assert_allclose(ci1, ci, atol=6e-7)\n\n # > orscoreci(7, 34, 1, 34, 0.95)\n ci = 1.246309, 56.486130\n ci1 = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n compare=\"odds-ratio\",\n method=\"score\", correction=True)\n assert_allclose(ci1, ci, rtol=5e-4) # lower agreement (iterative)\n\n # > riskscoreci(7, 34, 1, 34, 0.95)\n ci = 1.220853, 42.575718\n ci1 = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n compare=\"ratio\",\n method=\"score\", correction=False)\n assert_allclose(ci1, ci, atol=6e-7)\n\n\ndef test_score_test_2indep():\n # this does not verify the statistic and pvalue yet\n count1, nobs1 = 7, 34\n count2, nobs2 = 1, 34\n\n for co in ['diff', 'ratio', 'or']:\n res = score_test_proportions_2indep(count1, nobs1, count2, nobs2,\n compare=co)\n assert_allclose(res.prop1_null, res.prop2_null, rtol=1e-10)\n\n # check that equality case is handled\n val = 0 if co == 'diff' else 1.\n s0, pv0 = score_test_proportions_2indep(count1, nobs1, count2, nobs2,\n compare=co, value=val,\n return_results=False)[:2]\n s1, pv1 = score_test_proportions_2indep(count1, nobs1, count2, nobs2,\n compare=co, value=val + 1e-10,\n return_results=False)[:2]\n assert_allclose(s0, s1, rtol=1e-8)\n assert_allclose(pv0, pv1, rtol=1e-8)\n s1, pv1 = score_test_proportions_2indep(count1, nobs1, count2, nobs2,\n compare=co, value=val - 1e-10,\n return_results=False)[:2]\n assert_allclose(s0, s1, rtol=1e-8)\n assert_allclose(pv0, pv1, rtol=1e-8)\n\n\ndef test_test_2indep():\n # this checks the pvalue of the hypothesis test at value equal to the\n # confidence limit\n alpha = 0.05\n count1, nobs1 = 7, 34\n count2, nobs2 = 1, 34\n\n methods_both = [\n ('diff', 'agresti-caffo'),\n # ('diff', 'newcomb'), # only confint\n ('diff', 'score'),\n ('diff', 'wald'),\n ('ratio', 'log'),\n ('ratio', 'log-adjusted'),\n ('ratio', 'score'),\n ('odds-ratio', 'logit'),\n ('odds-ratio', 'logit-adjusted'),\n ('odds-ratio', 'logit-smoothed'),\n ('odds-ratio', 'score'),\n ]\n\n for co, method in methods_both:\n low, upp = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n compare=co, method=method,\n alpha=alpha, correction=False)\n\n res = smprop.test_proportions_2indep(\n count1, nobs1, count2, nobs2, value=low, compare=co,\n method=method, correction=False)\n assert_allclose(res.pvalue, alpha, atol=1e-10)\n\n res = smprop.test_proportions_2indep(\n count1, nobs1, count2, nobs2, value=upp, compare=co,\n method=method, correction=False)\n assert_allclose(res.pvalue, alpha, atol=1e-10)\n\n _, pv = smprop.test_proportions_2indep(\n count1, nobs1, count2, nobs2, value=upp, compare=co,\n method=method, alternative='smaller',\n correction=False, return_results=False)\n assert_allclose(pv, alpha / 2, atol=1e-10)\n\n _, pv = smprop.test_proportions_2indep(\n count1, nobs1, count2, nobs2, value=low, compare=co,\n method=method, alternative='larger',\n correction=False, return_results=False)\n assert_allclose(pv, alpha / 2, atol=1e-10)\n\n # test Miettinen/Nurminen small sample correction\n co, method = 'ratio', 'score'\n low, upp = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n compare=co, method=method,\n alpha=alpha, correction=True)\n\n res = smprop.test_proportions_2indep(\n count1, nobs1, count2, nobs2, value=low, compare=co,\n method=method, correction=True)\n assert_allclose(res.pvalue, alpha, atol=1e-10)\n\n\ndef test_equivalence_2indep():\n # this checks the pvalue of the equivalence test at value equal to the\n # confidence limit\n alpha = 0.05\n count1, nobs1 = 7, 34\n count2, nobs2 = 1, 34\n\n methods_both = [\n ('diff', 'agresti-caffo'),\n # ('diff', 'newcomb'), # only confint\n ('diff', 'score'),\n ('diff', 'wald'),\n ('ratio', 'log'),\n ('ratio', 'log-adjusted'),\n ('ratio', 'score'),\n ('odds-ratio', 'logit'),\n ('odds-ratio', 'logit-adjusted'),\n ('odds-ratio', 'logit-smoothed'),\n ('odds-ratio', 'score'),\n ]\n\n for co, method in methods_both:\n low, upp = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n compare=co, method=method,\n alpha=2 * alpha,\n correction=False)\n\n res = smprop.tost_proportions_2indep(\n count1, nobs1, count2, nobs2, low, upp, compare=co,\n method=method, correction=False)\n assert_allclose(res.pvalue, alpha, atol=1e-10)\n\n\ndef test_score_confint_koopman_nam():\n\n # example Koopman, based on Nam 1995\n\n x0, n0 = 16, 80\n x1, n1 = 36, 40\n # x = x0 + x1\n # n = n0 + n1\n # p0 = x0 / n0\n # p1 = x1 / n1\n\n results_nam = Holder()\n results_nam.p0_roots = [0.1278, 0.2939, 0.4876]\n results_nam.conf_int = [2.940, 7.152]\n\n res = smprop._confint_riskratio_koopman(x1, n1, x0, n0, alpha=0.05)\n\n assert_allclose(res._p_roots, results_nam.p0_roots, atol=4)\n assert_allclose(res.confint, results_nam.conf_int, atol=3)\n\n table = [67, 9, 7, 16] # [67, 7, 9, 16]\n resp = smprop._confint_riskratio_paired_nam(table, alpha=0.05)\n # TODO: currently regression test, need verified results\n ci_old = [0.917832, 1.154177]\n assert_allclose(resp.confint, ci_old, atol=3)\n\n\ndef test_power_2indep():\n # test against R\n pow_ = power_proportions_2indep(-0.25, 0.75, 76.70692)\n assert_allclose(pow_.power, 0.9, atol=1e-8)\n\n n = samplesize_proportions_2indep_onetail(-0.25, 0.75, 0.9, ratio=1,\n alpha=0.05, value=0,\n alternative='two-sided')\n assert_allclose(n, 76.70692, atol=1e-5)\n\n power_proportions_2indep(-0.25, 0.75, 62.33551, alternative=\"smaller\")\n assert_allclose(pow_.power, 0.9, atol=1e-8)\n\n pow_ = power_proportions_2indep(0.25, 0.5, 62.33551, alternative=\"smaller\")\n assert_array_less(pow_.power, 0.05)\n\n pow_ = power_proportions_2indep(0.25, 0.5, 62.33551, alternative=\"larger\",\n return_results=False)\n assert_allclose(pow_, 0.9, atol=1e-8)\n\n pow_ = power_proportions_2indep(-0.15, 0.65, 83.4373, return_results=False)\n assert_allclose(pow_, 0.5, atol=1e-8)\n\n n = samplesize_proportions_2indep_onetail(-0.15, 0.65, 0.5, ratio=1,\n alpha=0.05, value=0,\n alternative='two-sided')\n\n assert_allclose(n, 83.4373, atol=0.05)\n\n # Stata example\n from statsmodels.stats.power import normal_sample_size_one_tail\n res = power_proportions_2indep(-0.014, 0.015, 550, ratio=1.)\n assert_allclose(res.power, 0.7415600, atol=1e-7)\n n = normal_sample_size_one_tail(-0.014, 0.7415600, 0.05 / 2,\n std_null=res.std_null,\n std_alternative=res.std_alt)\n assert_allclose(n, 550, atol=0.05)\n n2 = samplesize_proportions_2indep_onetail(-0.014, 0.015, 0.7415600,\n ratio=1, alpha=0.05, value=0,\n alternative='two-sided')\n assert_allclose(n2, n, rtol=1e-13)\n", "\"\"\"\nThe legend module defines the Legend class, which is responsible for\ndrawing legends associated with axes and/or figures.\n\n.. important::\n\n It is unlikely that you would ever create a Legend instance manually.\n Most users would normally create a legend via the `~.Axes.legend`\n function. For more details on legends there is also a :doc:`legend guide\n </tutorials/intermediate/legend_guide>`.\n\nThe `Legend` class is a container of legend handles and legend texts.\n\nThe legend handler map specifies how to create legend handles from artists\n(lines, patches, etc.) in the axes or figures. Default legend handlers are\ndefined in the :mod:`~matplotlib.legend_handler` module. While not all artist\ntypes are covered by the default legend handlers, custom legend handlers can be\ndefined to support arbitrary objects.\n\nSee the :doc:`legend guide </tutorials/intermediate/legend_guide>` for more\ninformation.\n\"\"\"\n\nimport itertools\nimport logging\nimport time\n\nimport numpy as np\n\nimport matplotlib as mpl\nfrom matplotlib import _api, docstring, colors\nfrom matplotlib.artist import Artist, allow_rasterization\nfrom matplotlib.cbook import silent_list\nfrom matplotlib.font_manager import FontProperties\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import (Patch, Rectangle, Shadow, FancyBboxPatch,\n StepPatch)\nfrom matplotlib.collections import (LineCollection, RegularPolyCollection,\n CircleCollection, PathCollection,\n PolyCollection)\nfrom matplotlib.transforms import Bbox, BboxBase, TransformedBbox\nfrom matplotlib.transforms import BboxTransformTo, BboxTransformFrom\n\nfrom matplotlib.offsetbox import HPacker, VPacker, TextArea, DrawingArea\nfrom matplotlib.offsetbox import DraggableOffsetBox\n\nfrom matplotlib.container import ErrorbarContainer, BarContainer, StemContainer\nfrom . import legend_handler\n\n\nclass DraggableLegend(DraggableOffsetBox):\n def __init__(self, legend, use_blit=False, update=\"loc\"):\n \"\"\"\n Wrapper around a `.Legend` to support mouse dragging.\n\n Parameters\n ----------\n legend : `.Legend`\n The `.Legend` instance to wrap.\n use_blit : bool, optional\n Use blitting for faster image composition. For details see\n :ref:`func-animation`.\n update : {'loc', 'bbox'}, optional\n If \"loc\", update the *loc* parameter of the legend upon finalizing.\n If \"bbox\", update the *bbox_to_anchor* parameter.\n \"\"\"\n self.legend = legend\n\n _api.check_in_list([\"loc\", \"bbox\"], update=update)\n self._update = update\n\n super().__init__(legend, legend._legend_box, use_blit=use_blit)\n\n def finalize_offset(self):\n if self._update == \"loc\":\n self._update_loc(self.get_loc_in_canvas())\n elif self._update == \"bbox\":\n self._bbox_to_anchor(self.get_loc_in_canvas())\n\n def _update_loc(self, loc_in_canvas):\n bbox = self.legend.get_bbox_to_anchor()\n # if bbox has zero width or height, the transformation is\n # ill-defined. Fall back to the default bbox_to_anchor.\n if bbox.width == 0 or bbox.height == 0:\n self.legend.set_bbox_to_anchor(None)\n bbox = self.legend.get_bbox_to_anchor()\n _bbox_transform = BboxTransformFrom(bbox)\n self.legend._loc = tuple(_bbox_transform.transform(loc_in_canvas))\n\n def _update_bbox_to_anchor(self, loc_in_canvas):\n loc_in_bbox = self.legend.axes.transAxes.transform(loc_in_canvas)\n self.legend.set_bbox_to_anchor(loc_in_bbox)\n\n\ndocstring.interpd.update(_legend_kw_doc=\"\"\"\nloc : str or pair of floats, default: :rc:`legend.loc` ('best' for axes, \\\n'upper right' for figures)\n The location of the legend.\n\n The strings\n ``'upper left', 'upper right', 'lower left', 'lower right'``\n place the legend at the corresponding corner of the axes/figure.\n\n The strings\n ``'upper center', 'lower center', 'center left', 'center right'``\n place the legend at the center of the corresponding edge of the\n axes/figure.\n\n The string ``'center'`` places the legend at the center of the axes/figure.\n\n The string ``'best'`` places the legend at the location, among the nine\n locations defined so far, with the minimum overlap with other drawn\n artists. This option can be quite slow for plots with large amounts of\n data; your plotting speed may benefit from providing a specific location.\n\n The location can also be a 2-tuple giving the coordinates of the lower-left\n corner of the legend in axes coordinates (in which case *bbox_to_anchor*\n will be ignored).\n\n For back-compatibility, ``'center right'`` (but no other location) can also\n be spelled ``'right'``, and each \"string\" locations can also be given as a\n numeric value:\n\n =============== =============\n Location String Location Code\n =============== =============\n 'best' 0\n 'upper right' 1\n 'upper left' 2\n 'lower left' 3\n 'lower right' 4\n 'right' 5\n 'center left' 6\n 'center right' 7\n 'lower center' 8\n 'upper center' 9\n 'center' 10\n =============== =============\n\nbbox_to_anchor : `.BboxBase`, 2-tuple, or 4-tuple of floats\n Box that is used to position the legend in conjunction with *loc*.\n Defaults to `axes.bbox` (if called as a method to `.Axes.legend`) or\n `figure.bbox` (if `.Figure.legend`). This argument allows arbitrary\n placement of the legend.\n\n Bbox coordinates are interpreted in the coordinate system given by\n *bbox_transform*, with the default transform\n Axes or Figure coordinates, depending on which ``legend`` is called.\n\n If a 4-tuple or `.BboxBase` is given, then it specifies the bbox\n ``(x, y, width, height)`` that the legend is placed in.\n To put the legend in the best location in the bottom right\n quadrant of the axes (or figure)::\n\n loc='best', bbox_to_anchor=(0.5, 0., 0.5, 0.5)\n\n A 2-tuple ``(x, y)`` places the corner of the legend specified by *loc* at\n x, y. For example, to put the legend's upper right-hand corner in the\n center of the axes (or figure) the following keywords can be used::\n\n loc='upper right', bbox_to_anchor=(0.5, 0.5)\n\nncol : int, default: 1\n The number of columns that the legend has.\n\nprop : None or `matplotlib.font_manager.FontProperties` or dict\n The font properties of the legend. If None (default), the current\n :data:`matplotlib.rcParams` will be used.\n\nfontsize : int or {'xx-small', 'x-small', 'small', 'medium', 'large', \\\n'x-large', 'xx-large'}\n The font size of the legend. If the value is numeric the size will be the\n absolute font size in points. String values are relative to the current\n default font size. This argument is only used if *prop* is not specified.\n\nlabelcolor : str or list\n The color of the text in the legend. Either a valid color string\n (for example, 'red'), or a list of color strings. The labelcolor can\n also be made to match the color of the line or marker using 'linecolor',\n 'markerfacecolor' (or 'mfc'), or 'markeredgecolor' (or 'mec').\n\nnumpoints : int, default: :rc:`legend.numpoints`\n The number of marker points in the legend when creating a legend\n entry for a `.Line2D` (line).\n\nscatterpoints : int, default: :rc:`legend.scatterpoints`\n The number of marker points in the legend when creating\n a legend entry for a `.PathCollection` (scatter plot).\n\nscatteryoffsets : iterable of floats, default: ``[0.375, 0.5, 0.3125]``\n The vertical offset (relative to the font size) for the markers\n created for a scatter plot legend entry. 0.0 is at the base the\n legend text, and 1.0 is at the top. To draw all markers at the\n same height, set to ``[0.5]``.\n\nmarkerscale : float, default: :rc:`legend.markerscale`\n The relative size of legend markers compared with the originally\n drawn ones.\n\nmarkerfirst : bool, default: True\n If *True*, legend marker is placed to the left of the legend label.\n If *False*, legend marker is placed to the right of the legend label.\n\nframeon : bool, default: :rc:`legend.frameon`\n Whether the legend should be drawn on a patch (frame).\n\nfancybox : bool, default: :rc:`legend.fancybox`\n Whether round edges should be enabled around the `~.FancyBboxPatch` which\n makes up the legend's background.\n\nshadow : bool, default: :rc:`legend.shadow`\n Whether to draw a shadow behind the legend.\n\nframealpha : float, default: :rc:`legend.framealpha`\n The alpha transparency of the legend's background.\n If *shadow* is activated and *framealpha* is ``None``, the default value is\n ignored.\n\nfacecolor : \"inherit\" or color, default: :rc:`legend.facecolor`\n The legend's background color.\n If ``\"inherit\"``, use :rc:`axes.facecolor`.\n\nedgecolor : \"inherit\" or color, default: :rc:`legend.edgecolor`\n The legend's background patch edge color.\n If ``\"inherit\"``, use take :rc:`axes.edgecolor`.\n\nmode : {\"expand\", None}\n If *mode* is set to ``\"expand\"`` the legend will be horizontally\n expanded to fill the axes area (or *bbox_to_anchor* if defines\n the legend's size).\n\nbbox_transform : None or `matplotlib.transforms.Transform`\n The transform for the bounding box (*bbox_to_anchor*). For a value\n of ``None`` (default) the Axes'\n :data:`~matplotlib.axes.Axes.transAxes` transform will be used.\n\ntitle : str or None\n The legend's title. Default is no title (``None``).\n\ntitle_fontsize : int or {'xx-small', 'x-small', 'small', 'medium', 'large', \\\n'x-large', 'xx-large'}, default: :rc:`legend.title_fontsize`\n The font size of the legend's title.\n\nborderpad : float, default: :rc:`legend.borderpad`\n The fractional whitespace inside the legend border, in font-size units.\n\nlabelspacing : float, default: :rc:`legend.labelspacing`\n The vertical space between the legend entries, in font-size units.\n\nhandlelength : float, default: :rc:`legend.handlelength`\n The length of the legend handles, in font-size units.\n\nhandletextpad : float, default: :rc:`legend.handletextpad`\n The pad between the legend handle and text, in font-size units.\n\nborderaxespad : float, default: :rc:`legend.borderaxespad`\n The pad between the axes and legend border, in font-size units.\n\ncolumnspacing : float, default: :rc:`legend.columnspacing`\n The spacing between columns, in font-size units.\n\nhandler_map : dict or None\n The custom dictionary mapping instances or types to a legend\n handler. This *handler_map* updates the default handler map\n found at `matplotlib.legend.Legend.get_legend_handler_map`.\n\"\"\")\n\n\nclass Legend(Artist):\n \"\"\"\n Place a legend on the axes at location loc.\n\n \"\"\"\n codes = {'best': 0, # only implemented for axes legends\n 'upper right': 1,\n 'upper left': 2,\n 'lower left': 3,\n 'lower right': 4,\n 'right': 5,\n 'center left': 6,\n 'center right': 7,\n 'lower center': 8,\n 'upper center': 9,\n 'center': 10,\n }\n\n zorder = 5\n\n def __str__(self):\n return \"Legend\"\n\n @docstring.dedent_interpd\n def __init__(self, parent, handles, labels,\n loc=None,\n numpoints=None, # the number of points in the legend line\n markerscale=None, # the relative size of legend markers\n # vs. original\n markerfirst=True, # controls ordering (left-to-right) of\n # legend marker and label\n scatterpoints=None, # number of scatter points\n scatteryoffsets=None,\n prop=None, # properties for the legend texts\n fontsize=None, # keyword to set font size directly\n labelcolor=None, # keyword to set the text color\n\n # spacing & pad defined as a fraction of the font-size\n borderpad=None, # the whitespace inside the legend border\n labelspacing=None, # the vertical space between the legend\n # entries\n handlelength=None, # the length of the legend handles\n handleheight=None, # the height of the legend handles\n handletextpad=None, # the pad between the legend handle\n # and text\n borderaxespad=None, # the pad between the axes and legend\n # border\n columnspacing=None, # spacing between columns\n\n ncol=1, # number of columns\n mode=None, # mode for horizontal distribution of columns.\n # None, \"expand\"\n\n fancybox=None, # True use a fancy box, false use a rounded\n # box, none use rc\n shadow=None,\n title=None, # set a title for the legend\n title_fontsize=None, # the font size for the title\n framealpha=None, # set frame alpha\n edgecolor=None, # frame patch edgecolor\n facecolor=None, # frame patch facecolor\n\n bbox_to_anchor=None, # bbox that the legend will be anchored.\n bbox_transform=None, # transform for the bbox\n frameon=None, # draw frame\n handler_map=None,\n ):\n \"\"\"\n Parameters\n ----------\n parent : `~matplotlib.axes.Axes` or `.Figure`\n The artist that contains the legend.\n\n handles : list of `.Artist`\n A list of Artists (lines, patches) to be added to the legend.\n\n labels : list of str\n A list of labels to show next to the artists. The length of handles\n and labels should be the same. If they are not, they are truncated\n to the smaller of both lengths.\n\n Other Parameters\n ----------------\n %(_legend_kw_doc)s\n\n Notes\n -----\n Users can specify any arbitrary location for the legend using the\n *bbox_to_anchor* keyword argument. *bbox_to_anchor* can be a\n `.BboxBase` (or derived therefrom) or a tuple of 2 or 4 floats.\n See `set_bbox_to_anchor` for more detail.\n\n The legend location can be specified by setting *loc* with a tuple of\n 2 floats, which is interpreted as the lower-left corner of the legend\n in the normalized axes coordinate.\n \"\"\"\n # local import only to avoid circularity\n from matplotlib.axes import Axes\n from matplotlib.figure import Figure\n\n super().__init__()\n\n if prop is None:\n if fontsize is not None:\n self.prop = FontProperties(size=fontsize)\n else:\n self.prop = FontProperties(\n size=mpl.rcParams[\"legend.fontsize\"])\n else:\n self.prop = FontProperties._from_any(prop)\n if isinstance(prop, dict) and \"size\" not in prop:\n self.prop.set_size(mpl.rcParams[\"legend.fontsize\"])\n\n self._fontsize = self.prop.get_size_in_points()\n\n self.texts = []\n self.legendHandles = []\n self._legend_title_box = None\n\n #: A dictionary with the extra handler mappings for this Legend\n #: instance.\n self._custom_handler_map = handler_map\n\n locals_view = locals()\n for name in [\"numpoints\", \"markerscale\", \"shadow\", \"columnspacing\",\n \"scatterpoints\", \"handleheight\", 'borderpad',\n 'labelspacing', 'handlelength', 'handletextpad',\n 'borderaxespad']:\n if locals_view[name] is None:\n value = mpl.rcParams[\"legend.\" + name]\n else:\n value = locals_view[name]\n setattr(self, name, value)\n del locals_view\n # trim handles and labels if illegal label...\n _lab, _hand = [], []\n for label, handle in zip(labels, handles):\n if isinstance(label, str) and label.startswith('_'):\n _api.warn_external('The handle {!r} has a label of {!r} '\n 'which cannot be automatically added to'\n ' the legend.'.format(handle, label))\n else:\n _lab.append(label)\n _hand.append(handle)\n labels, handles = _lab, _hand\n\n handles = list(handles)\n if len(handles) < 2:\n ncol = 1\n self._ncol = ncol\n\n if self.numpoints <= 0:\n raise ValueError(\"numpoints must be > 0; it was %d\" % numpoints)\n\n # introduce y-offset for handles of the scatter plot\n if scatteryoffsets is None:\n self._scatteryoffsets = np.array([3. / 8., 4. / 8., 2.5 / 8.])\n else:\n self._scatteryoffsets = np.asarray(scatteryoffsets)\n reps = self.scatterpoints // len(self._scatteryoffsets) + 1\n self._scatteryoffsets = np.tile(self._scatteryoffsets,\n reps)[:self.scatterpoints]\n\n # _legend_box is a VPacker instance that contains all\n # legend items and will be initialized from _init_legend_box()\n # method.\n self._legend_box = None\n\n if isinstance(parent, Axes):\n self.isaxes = True\n self.axes = parent\n self.set_figure(parent.figure)\n elif isinstance(parent, Figure):\n self.isaxes = False\n self.set_figure(parent)\n else:\n raise TypeError(\"Legend needs either Axes or Figure as parent\")\n self.parent = parent\n\n self._loc_used_default = loc is None\n if loc is None:\n loc = mpl.rcParams[\"legend.loc\"]\n if not self.isaxes and loc in [0, 'best']:\n loc = 'upper right'\n if isinstance(loc, str):\n if loc not in self.codes:\n raise ValueError(\n \"Unrecognized location {!r}. Valid locations are\\n\\t{}\\n\"\n .format(loc, '\\n\\t'.join(self.codes)))\n else:\n loc = self.codes[loc]\n if not self.isaxes and loc == 0:\n raise ValueError(\n \"Automatic legend placement (loc='best') not implemented for \"\n \"figure legend.\")\n\n self._mode = mode\n self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)\n\n # We use FancyBboxPatch to draw a legend frame. The location\n # and size of the box will be updated during the drawing time.\n\n if facecolor is None:\n facecolor = mpl.rcParams[\"legend.facecolor\"]\n if facecolor == 'inherit':\n facecolor = mpl.rcParams[\"axes.facecolor\"]\n\n if edgecolor is None:\n edgecolor = mpl.rcParams[\"legend.edgecolor\"]\n if edgecolor == 'inherit':\n edgecolor = mpl.rcParams[\"axes.edgecolor\"]\n\n if fancybox is None:\n fancybox = mpl.rcParams[\"legend.fancybox\"]\n\n self.legendPatch = FancyBboxPatch(\n xy=(0, 0), width=1, height=1,\n facecolor=facecolor, edgecolor=edgecolor,\n # If shadow is used, default to alpha=1 (#8943).\n alpha=(framealpha if framealpha is not None\n else 1 if shadow\n else mpl.rcParams[\"legend.framealpha\"]),\n # The width and height of the legendPatch will be set (in draw())\n # to the length that includes the padding. Thus we set pad=0 here.\n boxstyle=(\"round,pad=0,rounding_size=0.2\" if fancybox\n else \"square,pad=0\"),\n mutation_scale=self._fontsize,\n snap=True,\n visible=(frameon if frameon is not None\n else mpl.rcParams[\"legend.frameon\"])\n )\n self._set_artist_props(self.legendPatch)\n\n # init with null renderer\n self._init_legend_box(handles, labels, markerfirst)\n\n tmp = self._loc_used_default\n self._set_loc(loc)\n self._loc_used_default = tmp # ignore changes done by _set_loc\n\n # figure out title fontsize:\n if title_fontsize is None:\n title_fontsize = mpl.rcParams['legend.title_fontsize']\n tprop = FontProperties(size=title_fontsize)\n self.set_title(title, prop=tprop)\n self._draggable = None\n\n # set the text color\n\n color_getters = { # getter function depends on line or patch\n 'linecolor': ['get_color', 'get_facecolor'],\n 'markerfacecolor': ['get_markerfacecolor', 'get_facecolor'],\n 'mfc': ['get_markerfacecolor', 'get_facecolor'],\n 'markeredgecolor': ['get_markeredgecolor', 'get_edgecolor'],\n 'mec': ['get_markeredgecolor', 'get_edgecolor'],\n }\n if labelcolor is None:\n pass\n elif isinstance(labelcolor, str) and labelcolor in color_getters:\n getter_names = color_getters[labelcolor]\n for handle, text in zip(self.legendHandles, self.texts):\n for getter_name in getter_names:\n try:\n color = getattr(handle, getter_name)()\n text.set_color(color)\n break\n except AttributeError:\n pass\n elif np.iterable(labelcolor):\n for text, color in zip(self.texts,\n itertools.cycle(\n colors.to_rgba_array(labelcolor))):\n text.set_color(color)\n else:\n raise ValueError(\"Invalid argument for labelcolor : %s\" %\n str(labelcolor))\n\n def _set_artist_props(self, a):\n \"\"\"\n Set the boilerplate props for artists added to axes.\n \"\"\"\n a.set_figure(self.figure)\n if self.isaxes:\n # a.set_axes(self.axes)\n a.axes = self.axes\n\n a.set_transform(self.get_transform())\n\n def _set_loc(self, loc):\n # find_offset function will be provided to _legend_box and\n # _legend_box will draw itself at the location of the return\n # value of the find_offset.\n self._loc_used_default = False\n self._loc_real = loc\n self.stale = True\n self._legend_box.set_offset(self._findoffset)\n\n def _get_loc(self):\n return self._loc_real\n\n _loc = property(_get_loc, _set_loc)\n\n def _findoffset(self, width, height, xdescent, ydescent, renderer):\n \"\"\"Helper function to locate the legend.\"\"\"\n\n if self._loc == 0: # \"best\".\n x, y = self._find_best_position(width, height, renderer)\n elif self._loc in Legend.codes.values(): # Fixed location.\n bbox = Bbox.from_bounds(0, 0, width, height)\n x, y = self._get_anchored_bbox(self._loc, bbox,\n self.get_bbox_to_anchor(),\n renderer)\n else: # Axes or figure coordinates.\n fx, fy = self._loc\n bbox = self.get_bbox_to_anchor()\n x, y = bbox.x0 + bbox.width * fx, bbox.y0 + bbox.height * fy\n\n return x + xdescent, y + ydescent\n\n @allow_rasterization\n def draw(self, renderer):\n # docstring inherited\n if not self.get_visible():\n return\n\n renderer.open_group('legend', gid=self.get_gid())\n\n fontsize = renderer.points_to_pixels(self._fontsize)\n\n # if mode == fill, set the width of the legend_box to the\n # width of the parent (minus pads)\n if self._mode in [\"expand\"]:\n pad = 2 * (self.borderaxespad + self.borderpad) * fontsize\n self._legend_box.set_width(self.get_bbox_to_anchor().width - pad)\n\n # update the location and size of the legend. This needs to\n # be done in any case to clip the figure right.\n bbox = self._legend_box.get_window_extent(renderer)\n self.legendPatch.set_bounds(bbox.x0, bbox.y0, bbox.width, bbox.height)\n self.legendPatch.set_mutation_scale(fontsize)\n\n if self.shadow:\n Shadow(self.legendPatch, 2, -2).draw(renderer)\n\n self.legendPatch.draw(renderer)\n self._legend_box.draw(renderer)\n\n renderer.close_group('legend')\n self.stale = False\n\n # _default_handler_map defines the default mapping between plot\n # elements and the legend handlers.\n\n _default_handler_map = {\n StemContainer: legend_handler.HandlerStem(),\n ErrorbarContainer: legend_handler.HandlerErrorbar(),\n Line2D: legend_handler.HandlerLine2D(),\n Patch: legend_handler.HandlerPatch(),\n StepPatch: legend_handler.HandlerStepPatch(),\n LineCollection: legend_handler.HandlerLineCollection(),\n RegularPolyCollection: legend_handler.HandlerRegularPolyCollection(),\n CircleCollection: legend_handler.HandlerCircleCollection(),\n BarContainer: legend_handler.HandlerPatch(\n update_func=legend_handler.update_from_first_child),\n tuple: legend_handler.HandlerTuple(),\n PathCollection: legend_handler.HandlerPathCollection(),\n PolyCollection: legend_handler.HandlerPolyCollection()\n }\n\n # (get|set|update)_default_handler_maps are public interfaces to\n # modify the default handler map.\n\n @classmethod\n def get_default_handler_map(cls):\n \"\"\"\n A class method that returns the default handler map.\n \"\"\"\n return cls._default_handler_map\n\n @classmethod\n def set_default_handler_map(cls, handler_map):\n \"\"\"\n A class method to set the default handler map.\n \"\"\"\n cls._default_handler_map = handler_map\n\n @classmethod\n def update_default_handler_map(cls, handler_map):\n \"\"\"\n A class method to update the default handler map.\n \"\"\"\n cls._default_handler_map.update(handler_map)\n\n def get_legend_handler_map(self):\n \"\"\"\n Return the handler map.\n \"\"\"\n\n default_handler_map = self.get_default_handler_map()\n\n if self._custom_handler_map:\n hm = default_handler_map.copy()\n hm.update(self._custom_handler_map)\n return hm\n else:\n return default_handler_map\n\n @staticmethod\n def get_legend_handler(legend_handler_map, orig_handle):\n \"\"\"\n Return a legend handler from *legend_handler_map* that\n corresponds to *orig_handler*.\n\n *legend_handler_map* should be a dictionary object (that is\n returned by the get_legend_handler_map method).\n\n It first checks if the *orig_handle* itself is a key in the\n *legend_handler_map* and return the associated value.\n Otherwise, it checks for each of the classes in its\n method-resolution-order. If no matching key is found, it\n returns ``None``.\n \"\"\"\n try:\n return legend_handler_map[orig_handle]\n except (TypeError, KeyError): # TypeError if unhashable.\n pass\n for handle_type in type(orig_handle).mro():\n try:\n return legend_handler_map[handle_type]\n except KeyError:\n pass\n return None\n\n def _init_legend_box(self, handles, labels, markerfirst=True):\n \"\"\"\n Initialize the legend_box. The legend_box is an instance of\n the OffsetBox, which is packed with legend handles and\n texts. Once packed, their location is calculated during the\n drawing time.\n \"\"\"\n\n fontsize = self._fontsize\n\n # legend_box is a HPacker, horizontally packed with\n # columns. Each column is a VPacker, vertically packed with\n # legend items. Each legend item is HPacker packed with\n # legend handleBox and labelBox. handleBox is an instance of\n # offsetbox.DrawingArea which contains legend handle. labelBox\n # is an instance of offsetbox.TextArea which contains legend\n # text.\n\n text_list = [] # the list of text instances\n handle_list = [] # the list of text instances\n handles_and_labels = []\n\n label_prop = dict(verticalalignment='baseline',\n horizontalalignment='left',\n fontproperties=self.prop,\n )\n\n # The approximate height and descent of text. These values are\n # only used for plotting the legend handle.\n descent = 0.35 * fontsize * (self.handleheight - 0.7)\n # 0.35 and 0.7 are just heuristic numbers and may need to be improved.\n height = fontsize * self.handleheight - descent\n # each handle needs to be drawn inside a box of (x, y, w, h) =\n # (0, -descent, width, height). And their coordinates should\n # be given in the display coordinates.\n\n # The transformation of each handle will be automatically set\n # to self.get_transform(). If the artist does not use its\n # default transform (e.g., Collections), you need to\n # manually set their transform to the self.get_transform().\n legend_handler_map = self.get_legend_handler_map()\n\n for orig_handle, lab in zip(handles, labels):\n handler = self.get_legend_handler(legend_handler_map, orig_handle)\n if handler is None:\n _api.warn_external(\n \"Legend does not support {!r} instances.\\nA proxy artist \"\n \"may be used instead.\\nSee: \"\n \"https://matplotlib.org/users/legend_guide.html\"\n \"#creating-artists-specifically-for-adding-to-the-legend-\"\n \"aka-proxy-artists\".format(orig_handle))\n # We don't have a handle for this artist, so we just defer\n # to None.\n handle_list.append(None)\n else:\n textbox = TextArea(lab, textprops=label_prop,\n multilinebaseline=True)\n handlebox = DrawingArea(width=self.handlelength * fontsize,\n height=height,\n xdescent=0., ydescent=descent)\n\n text_list.append(textbox._text)\n # Create the artist for the legend which represents the\n # original artist/handle.\n handle_list.append(handler.legend_artist(self, orig_handle,\n fontsize, handlebox))\n handles_and_labels.append((handlebox, textbox))\n\n if handles_and_labels:\n # We calculate number of rows in each column. The first\n # (num_largecol) columns will have (nrows+1) rows, and remaining\n # (num_smallcol) columns will have (nrows) rows.\n ncol = min(self._ncol, len(handles_and_labels))\n nrows, num_largecol = divmod(len(handles_and_labels), ncol)\n num_smallcol = ncol - num_largecol\n # starting index of each column and number of rows in it.\n rows_per_col = [nrows + 1] * num_largecol + [nrows] * num_smallcol\n start_idxs = np.concatenate([[0], np.cumsum(rows_per_col)[:-1]])\n cols = zip(start_idxs, rows_per_col)\n else:\n cols = []\n\n columnbox = []\n for i0, di in cols:\n # pack handleBox and labelBox into itemBox\n itemBoxes = [HPacker(pad=0,\n sep=self.handletextpad * fontsize,\n children=[h, t] if markerfirst else [t, h],\n align=\"baseline\")\n for h, t in handles_and_labels[i0:i0 + di]]\n # pack columnBox\n alignment = \"baseline\" if markerfirst else \"right\"\n columnbox.append(VPacker(pad=0,\n sep=self.labelspacing * fontsize,\n align=alignment,\n children=itemBoxes))\n\n mode = \"expand\" if self._mode == \"expand\" else \"fixed\"\n sep = self.columnspacing * fontsize\n self._legend_handle_box = HPacker(pad=0,\n sep=sep, align=\"baseline\",\n mode=mode,\n children=columnbox)\n self._legend_title_box = TextArea(\"\")\n self._legend_box = VPacker(pad=self.borderpad * fontsize,\n sep=self.labelspacing * fontsize,\n align=\"center\",\n children=[self._legend_title_box,\n self._legend_handle_box])\n self._legend_box.set_figure(self.figure)\n self.texts = text_list\n self.legendHandles = handle_list\n\n def _auto_legend_data(self):\n \"\"\"\n Return display coordinates for hit testing for \"best\" positioning.\n\n Returns\n -------\n bboxes\n List of bounding boxes of all patches.\n lines\n List of `.Path` corresponding to each line.\n offsets\n List of (x, y) offsets of all collection.\n \"\"\"\n assert self.isaxes # always holds, as this is only called internally\n ax = self.parent\n lines = [line.get_transform().transform_path(line.get_path())\n for line in ax.lines]\n bboxes = [patch.get_bbox().transformed(patch.get_data_transform())\n if isinstance(patch, Rectangle) else\n patch.get_path().get_extents(patch.get_transform())\n for patch in ax.patches]\n offsets = []\n for handle in ax.collections:\n _, transOffset, hoffsets, _ = handle._prepare_points()\n for offset in transOffset.transform(hoffsets):\n offsets.append(offset)\n return bboxes, lines, offsets\n\n def get_children(self):\n # docstring inherited\n return [self._legend_box, self.get_frame()]\n\n def get_frame(self):\n \"\"\"Return the `~.patches.Rectangle` used to frame the legend.\"\"\"\n return self.legendPatch\n\n def get_lines(self):\n r\"\"\"Return the list of `~.lines.Line2D`\\s in the legend.\"\"\"\n return [h for h in self.legendHandles if isinstance(h, Line2D)]\n\n def get_patches(self):\n r\"\"\"Return the list of `~.patches.Patch`\\s in the legend.\"\"\"\n return silent_list('Patch',\n [h for h in self.legendHandles\n if isinstance(h, Patch)])\n\n def get_texts(self):\n r\"\"\"Return the list of `~.text.Text`\\s in the legend.\"\"\"\n return silent_list('Text', self.texts)\n\n def set_title(self, title, prop=None):\n \"\"\"\n Set the legend title. Fontproperties can be optionally set\n with *prop* parameter.\n \"\"\"\n self._legend_title_box._text.set_text(title)\n if title:\n self._legend_title_box._text.set_visible(True)\n self._legend_title_box.set_visible(True)\n else:\n self._legend_title_box._text.set_visible(False)\n self._legend_title_box.set_visible(False)\n\n if prop is not None:\n self._legend_title_box._text.set_fontproperties(prop)\n\n self.stale = True\n\n def get_title(self):\n \"\"\"Return the `.Text` instance for the legend title.\"\"\"\n return self._legend_title_box._text\n\n def get_window_extent(self, renderer=None):\n # docstring inherited\n if renderer is None:\n renderer = self.figure._cachedRenderer\n return self._legend_box.get_window_extent(renderer=renderer)\n\n def get_tightbbox(self, renderer):\n \"\"\"\n Like `.Legend.get_window_extent`, but uses the box for the legend.\n\n Parameters\n ----------\n renderer : `.RendererBase` subclass\n renderer that will be used to draw the figures (i.e.\n ``fig.canvas.get_renderer()``)\n\n Returns\n -------\n `.BboxBase`\n The bounding box in figure pixel coordinates.\n \"\"\"\n return self._legend_box.get_window_extent(renderer)\n\n def get_frame_on(self):\n \"\"\"Get whether the legend box patch is drawn.\"\"\"\n return self.legendPatch.get_visible()\n\n def set_frame_on(self, b):\n \"\"\"\n Set whether the legend box patch is drawn.\n\n Parameters\n ----------\n b : bool\n \"\"\"\n self.legendPatch.set_visible(b)\n self.stale = True\n\n draw_frame = set_frame_on # Backcompat alias.\n\n def get_bbox_to_anchor(self):\n \"\"\"Return the bbox that the legend will be anchored to.\"\"\"\n if self._bbox_to_anchor is None:\n return self.parent.bbox\n else:\n return self._bbox_to_anchor\n\n def set_bbox_to_anchor(self, bbox, transform=None):\n \"\"\"\n Set the bbox that the legend will be anchored to.\n\n Parameters\n ----------\n bbox : `~matplotlib.transforms.BboxBase` or tuple\n The bounding box can be specified in the following ways:\n\n - A `.BboxBase` instance\n - A tuple of ``(left, bottom, width, height)`` in the given\n transform (normalized axes coordinate if None)\n - A tuple of ``(left, bottom)`` where the width and height will be\n assumed to be zero.\n - *None*, to remove the bbox anchoring, and use the parent bbox.\n\n transform : `~matplotlib.transforms.Transform`, optional\n A transform to apply to the bounding box. If not specified, this\n will use a transform to the bounding box of the parent.\n \"\"\"\n if bbox is None:\n self._bbox_to_anchor = None\n return\n elif isinstance(bbox, BboxBase):\n self._bbox_to_anchor = bbox\n else:\n try:\n l = len(bbox)\n except TypeError as err:\n raise ValueError(\"Invalid argument for bbox : %s\" %\n str(bbox)) from err\n\n if l == 2:\n bbox = [bbox[0], bbox[1], 0, 0]\n\n self._bbox_to_anchor = Bbox.from_bounds(*bbox)\n\n if transform is None:\n transform = BboxTransformTo(self.parent.bbox)\n\n self._bbox_to_anchor = TransformedBbox(self._bbox_to_anchor,\n transform)\n self.stale = True\n\n def _get_anchored_bbox(self, loc, bbox, parentbbox, renderer):\n \"\"\"\n Place the *bbox* inside the *parentbbox* according to a given\n location code. Return the (x, y) coordinate of the bbox.\n\n Parameters\n ----------\n loc : int\n A location code in range(1, 11). This corresponds to the possible\n values for ``self._loc``, excluding \"best\".\n bbox : `~matplotlib.transforms.Bbox`\n bbox to be placed, in display coordinates.\n parentbbox : `~matplotlib.transforms.Bbox`\n A parent box which will contain the bbox, in display coordinates.\n\n \"\"\"\n assert loc in range(1, 11) # called only internally\n\n BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = range(11)\n\n anchor_coefs = {UR: \"NE\",\n UL: \"NW\",\n LL: \"SW\",\n LR: \"SE\",\n R: \"E\",\n CL: \"W\",\n CR: \"E\",\n LC: \"S\",\n UC: \"N\",\n C: \"C\"}\n\n c = anchor_coefs[loc]\n\n fontsize = renderer.points_to_pixels(self._fontsize)\n container = parentbbox.padded(-self.borderaxespad * fontsize)\n anchored_box = bbox.anchored(c, container=container)\n return anchored_box.x0, anchored_box.y0\n\n def _find_best_position(self, width, height, renderer, consider=None):\n \"\"\"\n Determine the best location to place the legend.\n\n *consider* is a list of ``(x, y)`` pairs to consider as a potential\n lower-left corner of the legend. All are display coords.\n \"\"\"\n assert self.isaxes # always holds, as this is only called internally\n\n start_time = time.perf_counter()\n\n bboxes, lines, offsets = self._auto_legend_data()\n\n bbox = Bbox.from_bounds(0, 0, width, height)\n if consider is None:\n consider = [self._get_anchored_bbox(x, bbox,\n self.get_bbox_to_anchor(),\n renderer)\n for x in range(1, len(self.codes))]\n\n candidates = []\n for idx, (l, b) in enumerate(consider):\n legendBox = Bbox.from_bounds(l, b, width, height)\n badness = 0\n # XXX TODO: If markers are present, it would be good to take them\n # into account when checking vertex overlaps in the next line.\n badness = (sum(legendBox.count_contains(line.vertices)\n for line in lines)\n + legendBox.count_contains(offsets)\n + legendBox.count_overlaps(bboxes)\n + sum(line.intersects_bbox(legendBox, filled=False)\n for line in lines))\n if badness == 0:\n return l, b\n # Include the index to favor lower codes in case of a tie.\n candidates.append((badness, idx, (l, b)))\n\n _, _, (l, b) = min(candidates)\n\n if self._loc_used_default and time.perf_counter() - start_time > 1:\n _api.warn_external(\n 'Creating legend with loc=\"best\" can be slow with large '\n 'amounts of data.')\n\n return l, b\n\n def contains(self, event):\n inside, info = self._default_contains(event)\n if inside is not None:\n return inside, info\n return self.legendPatch.contains(event)\n\n def set_draggable(self, state, use_blit=False, update='loc'):\n \"\"\"\n Enable or disable mouse dragging support of the legend.\n\n Parameters\n ----------\n state : bool\n Whether mouse dragging is enabled.\n use_blit : bool, optional\n Use blitting for faster image composition. For details see\n :ref:`func-animation`.\n update : {'loc', 'bbox'}, optional\n The legend parameter to be changed when dragged:\n\n - 'loc': update the *loc* parameter of the legend\n - 'bbox': update the *bbox_to_anchor* parameter of the legend\n\n Returns\n -------\n `.DraggableLegend` or *None*\n If *state* is ``True`` this returns the `.DraggableLegend` helper\n instance. Otherwise this returns *None*.\n \"\"\"\n if state:\n if self._draggable is None:\n self._draggable = DraggableLegend(self,\n use_blit,\n update=update)\n else:\n if self._draggable is not None:\n self._draggable.disconnect()\n self._draggable = None\n return self._draggable\n\n def get_draggable(self):\n \"\"\"Return ``True`` if the legend is draggable, ``False`` otherwise.\"\"\"\n return self._draggable is not None\n\n\n# Helper functions to parse legend arguments for both `figure.legend` and\n# `axes.legend`:\ndef _get_legend_handles(axs, legend_handler_map=None):\n \"\"\"\n Return a generator of artists that can be used as handles in\n a legend.\n\n \"\"\"\n handles_original = []\n for ax in axs:\n handles_original += (ax.lines + ax.patches +\n ax.collections + ax.containers)\n # support parasite axes:\n if hasattr(ax, 'parasites'):\n for axx in ax.parasites:\n handles_original += (axx.lines + axx.patches +\n axx.collections + axx.containers)\n\n handler_map = Legend.get_default_handler_map()\n\n if legend_handler_map is not None:\n handler_map = handler_map.copy()\n handler_map.update(legend_handler_map)\n\n has_handler = Legend.get_legend_handler\n\n for handle in handles_original:\n label = handle.get_label()\n if label != '_nolegend_' and has_handler(handler_map, handle):\n yield handle\n\n\ndef _get_legend_handles_labels(axs, legend_handler_map=None):\n \"\"\"\n Return handles and labels for legend, internal method.\n\n \"\"\"\n handles = []\n labels = []\n\n for handle in _get_legend_handles(axs, legend_handler_map):\n label = handle.get_label()\n if label and not label.startswith('_'):\n handles.append(handle)\n labels.append(label)\n return handles, labels\n\n\ndef _parse_legend_args(axs, *args, handles=None, labels=None, **kwargs):\n \"\"\"\n Get the handles and labels from the calls to either ``figure.legend``\n or ``axes.legend``.\n\n The parser is a bit involved because we support::\n\n legend()\n legend(labels)\n legend(handles, labels)\n legend(labels=labels)\n legend(handles=handles)\n legend(handles=handles, labels=labels)\n\n The behavior for a mixture of positional and keyword handles and labels\n is undefined and issues a warning.\n\n Parameters\n ----------\n axs : list of `.Axes`\n If handles are not given explicitly, the artists in these Axes are\n used as handles.\n *args : tuple\n Positional parameters passed to ``legend()``.\n handles\n The value of the keyword argument ``legend(handles=...)``, or *None*\n if that keyword argument was not used.\n labels\n The value of the keyword argument ``legend(labels=...)``, or *None*\n if that keyword argument was not used.\n **kwargs\n All other keyword arguments passed to ``legend()``.\n\n Returns\n -------\n handles : list of `.Artist`\n The legend handles.\n labels : list of str\n The legend labels.\n extra_args : tuple\n *args* with positional handles and labels removed.\n kwargs : dict\n *kwargs* with keywords handles and labels removed.\n\n \"\"\"\n log = logging.getLogger(__name__)\n\n handlers = kwargs.get('handler_map', {}) or {}\n extra_args = ()\n\n if (handles is not None or labels is not None) and args:\n _api.warn_external(\"You have mixed positional and keyword arguments, \"\n \"some input may be discarded.\")\n\n # if got both handles and labels as kwargs, make same length\n if handles and labels:\n handles, labels = zip(*zip(handles, labels))\n\n elif handles is not None and labels is None:\n labels = [handle.get_label() for handle in handles]\n\n elif labels is not None and handles is None:\n # Get as many handles as there are labels.\n handles = [handle for handle, label\n in zip(_get_legend_handles(axs, handlers), labels)]\n\n # No arguments - automatically detect labels and handles.\n elif len(args) == 0:\n handles, labels = _get_legend_handles_labels(axs, handlers)\n if not handles:\n log.warning('No handles with labels found to put in legend.')\n\n # One argument. User defined labels - automatic handle detection.\n elif len(args) == 1:\n labels, = args\n if any(isinstance(l, Artist) for l in labels):\n raise TypeError(\"A single argument passed to legend() must be a \"\n \"list of labels, but found an Artist in there.\")\n\n # Get as many handles as there are labels.\n handles = [handle for handle, label\n in zip(_get_legend_handles(axs, handlers), labels)]\n\n # Two arguments:\n # * user defined handles and labels\n elif len(args) >= 2:\n handles, labels = args[:2]\n extra_args = args[2:]\n\n else:\n raise TypeError('Invalid arguments to legend.')\n\n return handles, labels, extra_args, kwargs\n", "import numpy as np\nimport statsmodels.api as sm\nimport os\nfrom statsmodels.stats.mediation import Mediation\nimport pandas as pd\nfrom numpy.testing import assert_allclose\nimport patsy\nimport pytest\n\n# Compare to mediation R package vignette\ndf = [['index', 'Estimate', 'Lower CI bound', 'Upper CI bound', 'P-value'],\n ['ACME (control)', 0.085106, 0.029938, 0.141525, 0.00],\n ['ACME (treated)', 0.085674, 0.031089, 0.147762, 0.00],\n ['ADE (control)', 0.016938, -0.129157, 0.121945, 0.66],\n ['ADE (treated)', 0.017506, -0.139649, 0.130030, 0.66],\n ['Total effect', 0.102612, -0.036749, 0.227213, 0.20],\n ['Prop. mediated (control)', 0.698070, -6.901715, 2.725978, 0.20],\n ['Prop. mediated (treated)', 0.718648, -6.145419, 2.510750, 0.20],\n ['ACME (average)', 0.085390, 0.030272, 0.144768, 0.00],\n ['ADE (average)', 0.017222, -0.134465, 0.125987, 0.66],\n ['Prop. mediated (average)', 0.710900, -6.523567, 2.618364, 0.20]]\nframing_boot_4231 = pd.DataFrame(df[1:], columns=df[0]).set_index('index')\n\n# Compare to mediation R package vignette\ndf = [['index', 'Estimate', 'Lower CI bound', 'Upper CI bound', 'P-value'],\n ['ACME (control)', 0.075529, 0.024995, 0.132408, 0.00],\n ['ACME (treated)', 0.076348, 0.027475, 0.130138, 0.00],\n ['ADE (control)', 0.021389, -0.094323, 0.139148, 0.68],\n ['ADE (treated)', 0.022207, -0.101239, 0.145740, 0.68],\n ['Total effect', 0.097736, -0.025384, 0.225386, 0.16],\n ['Prop. mediated (control)', 0.656820, -3.664956, 4.845269, 0.16],\n ['Prop. mediated (treated)', 0.687690, -3.449415, 4.469289, 0.16],\n ['ACME (average)', 0.075938, 0.026109, 0.129450, 0.00],\n ['ADE (average)', 0.021798, -0.097781, 0.142444, 0.68],\n ['Prop. mediated (average)', 0.669659, -3.557185, 4.657279, 0.16]]\nframing_para_4231 = pd.DataFrame(df[1:], columns=df[0]).set_index('index')\n\n\n\ndf = [['index', 'Estimate', 'Lower CI bound', 'Upper CI bound', 'P-value'],\n ['ACME (control)', 0.065989, 0.003366, 0.152261, 0.04],\n ['ACME (treated)', 0.081424, 0.008888, 0.199853, 0.04],\n ['ADE (control)', 0.240392, -0.026286, 0.470918, 0.08],\n ['ADE (treated)', 0.255827, -0.030681, 0.491535, 0.08],\n ['Total effect', 0.321816, 0.037238, 0.549530, 0.00],\n ['Prop. mediated (control)', 0.196935, 0.015232, 1.864804, 0.04],\n ['Prop. mediated (treated)', 0.248896, 0.032229, 1.738846, 0.04],\n ['ACME (average)', 0.073707, 0.006883, 0.169923, 0.04],\n ['ADE (average)', 0.248109, -0.028483, 0.478978, 0.08],\n ['Prop. mediated (average)', 0.226799, 0.028865, 1.801825, 0.04]]\nframing_moderated_4231 = pd.DataFrame(df[1:], columns=df[0]).set_index('index')\n\n\[email protected]\ndef test_framing_example():\n\n cur_dir = os.path.dirname(os.path.abspath(__file__))\n data = pd.read_csv(os.path.join(cur_dir, 'results', \"framing.csv\"))\n\n outcome = np.asarray(data[\"cong_mesg\"])\n outcome_exog = patsy.dmatrix(\"emo + treat + age + educ + gender + income\", data,\n return_type='dataframe')\n probit = sm.families.links.probit\n outcome_model = sm.GLM(outcome, outcome_exog, family=sm.families.Binomial(link=probit()))\n\n mediator = np.asarray(data[\"emo\"])\n mediator_exog = patsy.dmatrix(\"treat + age + educ + gender + income\", data,\n return_type='dataframe')\n mediator_model = sm.OLS(mediator, mediator_exog)\n\n tx_pos = [outcome_exog.columns.tolist().index(\"treat\"),\n mediator_exog.columns.tolist().index(\"treat\")]\n med_pos = outcome_exog.columns.tolist().index(\"emo\")\n\n med = Mediation(outcome_model, mediator_model, tx_pos, med_pos,\n outcome_fit_kwargs={'atol':1e-11})\n\n np.random.seed(4231)\n para_rslt = med.fit(method='parametric', n_rep=100)\n diff = np.asarray(para_rslt.summary() - framing_para_4231)\n assert_allclose(diff, 0, atol=1e-6)\n\n np.random.seed(4231)\n boot_rslt = med.fit(method='boot', n_rep=100)\n diff = np.asarray(boot_rslt.summary() - framing_boot_4231)\n assert_allclose(diff, 0, atol=1e-6)\n\n\n\ndef test_framing_example_moderator():\n # moderation without formulas, generally not useful but test anyway\n\n cur_dir = os.path.dirname(os.path.abspath(__file__))\n data = pd.read_csv(os.path.join(cur_dir, 'results', \"framing.csv\"))\n\n outcome = np.asarray(data[\"cong_mesg\"])\n outcome_exog = patsy.dmatrix(\"emo + treat + age + educ + gender + income\", data,\n return_type='dataframe')\n probit = sm.families.links.probit\n outcome_model = sm.GLM(outcome, outcome_exog, family=sm.families.Binomial(link=probit()))\n\n mediator = np.asarray(data[\"emo\"])\n mediator_exog = patsy.dmatrix(\"treat + age + educ + gender + income\", data,\n return_type='dataframe')\n mediator_model = sm.OLS(mediator, mediator_exog)\n\n tx_pos = [outcome_exog.columns.tolist().index(\"treat\"),\n mediator_exog.columns.tolist().index(\"treat\")]\n med_pos = outcome_exog.columns.tolist().index(\"emo\")\n\n ix = (outcome_exog.columns.tolist().index(\"age\"),\n mediator_exog.columns.tolist().index(\"age\"))\n moderators = {ix : 20}\n med = Mediation(outcome_model, mediator_model, tx_pos, med_pos,\n moderators=moderators)\n\n # Just a smoke test\n np.random.seed(4231)\n med_rslt = med.fit(method='parametric', n_rep=100)\n\n\[email protected]\ndef test_framing_example_formula():\n\n cur_dir = os.path.dirname(os.path.abspath(__file__))\n data = pd.read_csv(os.path.join(cur_dir, 'results', \"framing.csv\"))\n\n probit = sm.families.links.probit\n outcome_model = sm.GLM.from_formula(\"cong_mesg ~ emo + treat + age + educ + gender + income\",\n data, family=sm.families.Binomial(link=probit()))\n\n mediator_model = sm.OLS.from_formula(\"emo ~ treat + age + educ + gender + income\", data)\n\n med = Mediation(outcome_model, mediator_model, \"treat\", \"emo\",\n outcome_fit_kwargs={'atol': 1e-11})\n\n np.random.seed(4231)\n med_rslt = med.fit(method='boot', n_rep=100)\n diff = np.asarray(med_rslt.summary() - framing_boot_4231)\n assert_allclose(diff, 0, atol=1e-6)\n\n np.random.seed(4231)\n med_rslt = med.fit(method='parametric', n_rep=100)\n diff = np.asarray(med_rslt.summary() - framing_para_4231)\n assert_allclose(diff, 0, atol=1e-6)\n\n\[email protected]\ndef test_framing_example_moderator_formula():\n\n cur_dir = os.path.dirname(os.path.abspath(__file__))\n data = pd.read_csv(os.path.join(cur_dir, 'results', \"framing.csv\"))\n\n probit = sm.families.links.probit\n outcome_model = sm.GLM.from_formula(\"cong_mesg ~ emo + treat*age + emo*age + educ + gender + income\",\n data, family=sm.families.Binomial(link=probit()))\n\n mediator_model = sm.OLS.from_formula(\"emo ~ treat*age + educ + gender + income\", data)\n\n moderators = {\"age\" : 20}\n med = Mediation(outcome_model, mediator_model, \"treat\", \"emo\",\n moderators=moderators)\n\n np.random.seed(4231)\n med_rslt = med.fit(method='parametric', n_rep=100)\n diff = np.asarray(med_rslt.summary() - framing_moderated_4231)\n assert_allclose(diff, 0, atol=1e-6)\n\n\ndef test_mixedlm():\n\n np.random.seed(3424)\n\n n = 200\n\n # The exposure (not time varying)\n x = np.random.normal(size=n)\n xv = np.outer(x, np.ones(3))\n\n # The mediator (with random intercept)\n mx = np.asarray([4., 4, 1])\n mx /= np.sqrt(np.sum(mx**2))\n med = mx[0] * np.outer(x, np.ones(3))\n med += mx[1] * np.outer(np.random.normal(size=n), np.ones(3))\n med += mx[2] * np.random.normal(size=(n, 3))\n\n # The outcome (exposure and mediator effects)\n ey = np.outer(x, np.r_[0, 0.5, 1]) + med\n\n # Random structure of the outcome (random intercept and slope)\n ex = np.asarray([5., 2, 2])\n ex /= np.sqrt(np.sum(ex**2))\n e = ex[0] * np.outer(np.random.normal(size=n), np.ones(3))\n e += ex[1] * np.outer(np.random.normal(size=n), np.r_[-1, 0, 1])\n e += ex[2] * np.random.normal(size=(n, 3))\n y = ey + e\n\n # Group membership\n idx = np.outer(np.arange(n), np.ones(3))\n\n # Time\n tim = np.outer(np.ones(n), np.r_[-1, 0, 1])\n\n df = pd.DataFrame({\"y\": y.flatten(), \"x\": xv.flatten(),\n \"id\": idx.flatten(), \"time\": tim.flatten(),\n \"med\": med.flatten()})\n\n mediator_model = sm.MixedLM.from_formula(\"med ~ x\", groups=\"id\", data=df)\n outcome_model = sm.MixedLM.from_formula(\"y ~ med + x\", groups=\"id\", data=df)\n me = Mediation(outcome_model, mediator_model, \"x\", \"med\")\n mr = me.fit(n_rep=2)\n st = mr.summary()\n pm = st.loc[\"Prop. mediated (average)\", \"Estimate\"]\n assert_allclose(pm, 0.52, rtol=1e-2, atol=1e-2)\n\n\ndef test_surv():\n\n np.random.seed(2341)\n\n n = 1000\n\n # Generate exposures\n exp = np.random.normal(size=n)\n\n # Generate mediators\n mn = np.exp(exp)\n mtime0 = -mn * np.log(np.random.uniform(size=n))\n ctime = -2 * mn * np.log(np.random.uniform(size=n))\n mstatus = (ctime >= mtime0).astype(int)\n mtime = np.where(mtime0 <= ctime, mtime0, ctime)\n\n for mt in \"full\", \"partial\", \"no\":\n\n # Outcome\n if mt == \"full\":\n lp = 0.5*mtime0\n elif mt == \"partial\":\n lp = exp + mtime0\n else:\n lp = exp\n\n # Generate outcomes\n mn = np.exp(-lp)\n ytime0 = -mn * np.log(np.random.uniform(size=n))\n ctime = -2 * mn * np.log(np.random.uniform(size=n))\n ystatus = (ctime >= ytime0).astype(int)\n ytime = np.where(ytime0 <= ctime, ytime0, ctime)\n\n df = pd.DataFrame({\"ytime\": ytime, \"ystatus\": ystatus,\n \"mtime\": mtime, \"mstatus\": mstatus,\n \"exp\": exp})\n\n fml = \"ytime ~ exp + mtime\"\n outcome_model = sm.PHReg.from_formula(fml, status=\"ystatus\", data=df)\n fml = \"mtime ~ exp\"\n mediator_model = sm.PHReg.from_formula(fml, status=\"mstatus\", data=df)\n\n med = Mediation(outcome_model, mediator_model, \"exp\", \"mtime\",\n outcome_predict_kwargs={\"pred_only\": True},\n outcome_fit_kwargs={\"method\": \"lbfgs\"},\n mediator_fit_kwargs={\"method\": \"lbfgs\"})\n med_result = med.fit(n_rep=2)\n dr = med_result.summary()\n pm = dr.loc[\"Prop. mediated (average)\", \"Estimate\"]\n if mt == \"no\":\n assert_allclose(pm, 0, atol=0.1, rtol=0.1)\n elif mt == \"full\":\n assert_allclose(pm, 1, atol=0.1, rtol=0.1)\n else:\n assert_allclose(pm, 0.5, atol=0.1, rtol=0.1)\n", "# -*- coding: utf-8 -*-\n\"\"\"Example for gam.AdditiveModel and PolynomialSmoother\n\nThis example was written as a test case.\nThe data generating process is chosen so the parameters are well identified\nand estimated.\n\nCreated on Fri Nov 04 13:45:43 2011\n\nAuthor: Josef Perktold\n\n\"\"\"\nfrom statsmodels.compat.python import lrange\n\nimport numpy as np\n\nfrom statsmodels.sandbox.gam import AdditiveModel\nfrom statsmodels.regression.linear_model import OLS\n\nnp.random.seed(8765993)\n#seed is chosen for nice result, not randomly\n#other seeds are pretty off in the prediction\n\n#DGP: simple polynomial\norder = 3\nsigma_noise = 0.5\nnobs = 1000 #1000 #with 1000, OLS and Additivemodel agree in params at 2 decimals\nlb, ub = -3.5, 4#2.5\nx1 = np.linspace(lb, ub, nobs)\nx2 = np.sin(2*x1)\nx = np.column_stack((x1/x1.max()*2, x2))\nexog = (x[:,:,None]**np.arange(order+1)[None, None, :]).reshape(nobs, -1)\nidx = lrange((order+1)*2)\ndel idx[order+1]\nexog_reduced = exog[:,idx] #remove duplicate constant\ny_true = exog.sum(1) / 2.\nz = y_true #alias check\nd = x\ny = y_true + sigma_noise * np.random.randn(nobs)\n\nexample = 1\n\nif example == 1:\n m = AdditiveModel(d)\n m.fit(y)\n\n y_pred = m.results.predict(d)\n\n\nfor ss in m.smoothers:\n print(ss.params)\n\nres_ols = OLS(y, exog_reduced).fit()\nprint(res_ols.params)\n\n#from numpy.testing import assert_almost_equal\n#assert_almost_equal(y_pred, res_ols.fittedvalues, 3)\n\nif example > 0:\n import matplotlib.pyplot as plt\n\n plt.figure()\n plt.plot(exog)\n\n y_pred = m.results.mu# + m.results.alpha #m.results.predict(d)\n plt.figure()\n plt.subplot(2,2,1)\n plt.plot(y, '.', alpha=0.25)\n plt.plot(y_true, 'k-', label='true')\n\n plt.plot(res_ols.fittedvalues, 'g-', label='OLS', lw=2, alpha=-.7)\n plt.plot(y_pred, 'r-', label='AM')\n plt.legend(loc='upper left')\n plt.title('gam.AdditiveModel')\n\n counter = 2\n for ii, xx in zip(['z', 'x1', 'x2'], [z, x[:,0], x[:,1]]):\n sortidx = np.argsort(xx)\n #plt.figure()\n plt.subplot(2, 2, counter)\n plt.plot(xx[sortidx], y[sortidx], '.', alpha=0.25)\n plt.plot(xx[sortidx], y_true[sortidx], 'k.', label='true', lw=2)\n plt.plot(xx[sortidx], y_pred[sortidx], 'r.', label='AM')\n plt.legend(loc='upper left')\n plt.title('gam.AdditiveModel ' + ii)\n counter += 1\n\n plt.show()\n", "'''More Goodness of fit tests\n\ncontains\n\nGOF : 1 sample gof tests based on Stephens 1970, plus AD A^2\nbootstrap : vectorized bootstrap p-values for gof test with fitted parameters\n\n\nCreated : 2011-05-21\nAuthor : Josef Perktold\n\nparts based on ks_2samp and kstest from scipy.stats\n(license: Scipy BSD, but were completely rewritten by Josef Perktold)\n\n\nReferences\n----------\n\n'''\nfrom statsmodels.compat.python import lmap\nimport numpy as np\n\nfrom scipy.stats import distributions\n\nfrom statsmodels.tools.decorators import cache_readonly\n\nfrom scipy.special import kolmogorov as ksprob\n\n#from scipy.stats unchanged\ndef ks_2samp(data1, data2):\n \"\"\"\n Computes the Kolmogorov-Smirnof statistic on 2 samples.\n\n This is a two-sided test for the null hypothesis that 2 independent samples\n are drawn from the same continuous distribution.\n\n Parameters\n ----------\n a, b : sequence of 1-D ndarrays\n two arrays of sample observations assumed to be drawn from a continuous\n distribution, sample sizes can be different\n\n\n Returns\n -------\n D : float\n KS statistic\n p-value : float\n two-tailed p-value\n\n\n Notes\n -----\n\n This tests whether 2 samples are drawn from the same distribution. Note\n that, like in the case of the one-sample K-S test, the distribution is\n assumed to be continuous.\n\n This is the two-sided test, one-sided tests are not implemented.\n The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.\n\n If the K-S statistic is small or the p-value is high, then we cannot\n reject the hypothesis that the distributions of the two samples\n are the same.\n\n Examples\n --------\n\n >>> from scipy import stats\n >>> import numpy as np\n >>> from scipy.stats import ks_2samp\n\n >>> #fix random seed to get the same result\n >>> np.random.seed(12345678)\n\n >>> n1 = 200 # size of first sample\n >>> n2 = 300 # size of second sample\n\n different distribution\n we can reject the null hypothesis since the pvalue is below 1%\n\n >>> rvs1 = stats.norm.rvs(size=n1,loc=0.,scale=1)\n >>> rvs2 = stats.norm.rvs(size=n2,loc=0.5,scale=1.5)\n >>> ks_2samp(rvs1,rvs2)\n (0.20833333333333337, 4.6674975515806989e-005)\n\n slightly different distribution\n we cannot reject the null hypothesis at a 10% or lower alpha since\n the pvalue at 0.144 is higher than 10%\n\n >>> rvs3 = stats.norm.rvs(size=n2,loc=0.01,scale=1.0)\n >>> ks_2samp(rvs1,rvs3)\n (0.10333333333333333, 0.14498781825751686)\n\n identical distribution\n we cannot reject the null hypothesis since the pvalue is high, 41%\n\n >>> rvs4 = stats.norm.rvs(size=n2,loc=0.0,scale=1.0)\n >>> ks_2samp(rvs1,rvs4)\n (0.07999999999999996, 0.41126949729859719)\n \"\"\"\n data1, data2 = lmap(np.asarray, (data1, data2))\n n1 = data1.shape[0]\n n2 = data2.shape[0]\n n1 = len(data1)\n n2 = len(data2)\n data1 = np.sort(data1)\n data2 = np.sort(data2)\n data_all = np.concatenate([data1,data2])\n #reminder: searchsorted inserts 2nd into 1st array\n cdf1 = np.searchsorted(data1,data_all,side='right')/(1.0*n1)\n cdf2 = (np.searchsorted(data2,data_all,side='right'))/(1.0*n2)\n d = np.max(np.absolute(cdf1-cdf2))\n #Note: d absolute not signed distance\n en = np.sqrt(n1*n2/float(n1+n2))\n try:\n prob = ksprob((en+0.12+0.11/en)*d)\n except:\n prob = 1.0\n return d, prob\n\n\n\n#from scipy.stats unchanged\ndef kstest(rvs, cdf, args=(), N=20, alternative = 'two_sided', mode='approx',**kwds):\n \"\"\"\n Perform the Kolmogorov-Smirnov test for goodness of fit\n\n This performs a test of the distribution G(x) of an observed\n random variable against a given distribution F(x). Under the null\n hypothesis the two distributions are identical, G(x)=F(x). The\n alternative hypothesis can be either 'two_sided' (default), 'less'\n or 'greater'. The KS test is only valid for continuous distributions.\n\n Parameters\n ----------\n rvs : str or array or callable\n string: name of a distribution in scipy.stats\n\n array: 1-D observations of random variables\n\n callable: function to generate random variables, requires keyword\n argument `size`\n\n cdf : str or callable\n string: name of a distribution in scipy.stats, if rvs is a string then\n cdf can evaluate to `False` or be the same as rvs\n callable: function to evaluate cdf\n\n args : tuple, sequence\n distribution parameters, used if rvs or cdf are strings\n N : int\n sample size if rvs is string or callable\n alternative : 'two_sided' (default), 'less' or 'greater'\n defines the alternative hypothesis (see explanation)\n\n mode : 'approx' (default) or 'asymp'\n defines the distribution used for calculating p-value\n\n 'approx' : use approximation to exact distribution of test statistic\n\n 'asymp' : use asymptotic distribution of test statistic\n\n\n Returns\n -------\n D : float\n KS test statistic, either D, D+ or D-\n p-value : float\n one-tailed or two-tailed p-value\n\n Notes\n -----\n\n In the one-sided test, the alternative is that the empirical\n cumulative distribution function of the random variable is \"less\"\n or \"greater\" than the cumulative distribution function F(x) of the\n hypothesis, G(x)<=F(x), resp. G(x)>=F(x).\n\n Examples\n --------\n\n >>> from scipy import stats\n >>> import numpy as np\n >>> from scipy.stats import kstest\n\n >>> x = np.linspace(-15,15,9)\n >>> kstest(x,'norm')\n (0.44435602715924361, 0.038850142705171065)\n\n >>> np.random.seed(987654321) # set random seed to get the same result\n >>> kstest('norm','',N=100)\n (0.058352892479417884, 0.88531190944151261)\n\n is equivalent to this\n\n >>> np.random.seed(987654321)\n >>> kstest(stats.norm.rvs(size=100),'norm')\n (0.058352892479417884, 0.88531190944151261)\n\n Test against one-sided alternative hypothesis:\n\n >>> np.random.seed(987654321)\n\n Shift distribution to larger values, so that cdf_dgp(x)< norm.cdf(x):\n\n >>> x = stats.norm.rvs(loc=0.2, size=100)\n >>> kstest(x,'norm', alternative = 'less')\n (0.12464329735846891, 0.040989164077641749)\n\n Reject equal distribution against alternative hypothesis: less\n\n >>> kstest(x,'norm', alternative = 'greater')\n (0.0072115233216311081, 0.98531158590396395)\n\n Do not reject equal distribution against alternative hypothesis: greater\n\n >>> kstest(x,'norm', mode='asymp')\n (0.12464329735846891, 0.08944488871182088)\n\n\n Testing t distributed random variables against normal distribution:\n\n With 100 degrees of freedom the t distribution looks close to the normal\n distribution, and the kstest does not reject the hypothesis that the sample\n came from the normal distribution\n\n >>> np.random.seed(987654321)\n >>> stats.kstest(stats.t.rvs(100,size=100),'norm')\n (0.072018929165471257, 0.67630062862479168)\n\n With 3 degrees of freedom the t distribution looks sufficiently different\n from the normal distribution, that we can reject the hypothesis that the\n sample came from the normal distribution at a alpha=10% level\n\n >>> np.random.seed(987654321)\n >>> stats.kstest(stats.t.rvs(3,size=100),'norm')\n (0.131016895759829, 0.058826222555312224)\n \"\"\"\n if isinstance(rvs, str):\n #cdf = getattr(stats, rvs).cdf\n if (not cdf) or (cdf == rvs):\n cdf = getattr(distributions, rvs).cdf\n rvs = getattr(distributions, rvs).rvs\n else:\n raise AttributeError('if rvs is string, cdf has to be the same distribution')\n\n\n if isinstance(cdf, str):\n cdf = getattr(distributions, cdf).cdf\n if callable(rvs):\n kwds = {'size':N}\n vals = np.sort(rvs(*args,**kwds))\n else:\n vals = np.sort(rvs)\n N = len(vals)\n cdfvals = cdf(vals, *args)\n\n if alternative in ['two_sided', 'greater']:\n Dplus = (np.arange(1.0, N+1)/N - cdfvals).max()\n if alternative == 'greater':\n return Dplus, distributions.ksone.sf(Dplus,N)\n\n if alternative in ['two_sided', 'less']:\n Dmin = (cdfvals - np.arange(0.0, N)/N).max()\n if alternative == 'less':\n return Dmin, distributions.ksone.sf(Dmin,N)\n\n if alternative == 'two_sided':\n D = np.max([Dplus,Dmin])\n if mode == 'asymp':\n return D, distributions.kstwobign.sf(D*np.sqrt(N))\n if mode == 'approx':\n pval_two = distributions.kstwobign.sf(D*np.sqrt(N))\n if N > 2666 or pval_two > 0.80 - N*0.3/1000.0 :\n return D, distributions.kstwobign.sf(D*np.sqrt(N))\n else:\n return D, distributions.ksone.sf(D,N)*2\n\n#TODO: split into modification and pvalue functions separately ?\n# for separate testing and combining different pieces\n\ndef dplus_st70_upp(stat, nobs):\n mod_factor = np.sqrt(nobs) + 0.12 + 0.11 / np.sqrt(nobs)\n stat_modified = stat * mod_factor\n pval = np.exp(-2 * stat_modified**2)\n digits = np.sum(stat > np.array([0.82, 0.82, 1.00]))\n #repeat low to get {0,2,3}\n return stat_modified, pval, digits\n\ndminus_st70_upp = dplus_st70_upp\n\n\ndef d_st70_upp(stat, nobs):\n mod_factor = np.sqrt(nobs) + 0.12 + 0.11 / np.sqrt(nobs)\n stat_modified = stat * mod_factor\n pval = 2 * np.exp(-2 * stat_modified**2)\n digits = np.sum(stat > np.array([0.91, 0.91, 1.08]))\n #repeat low to get {0,2,3}\n return stat_modified, pval, digits\n\ndef v_st70_upp(stat, nobs):\n mod_factor = np.sqrt(nobs) + 0.155 + 0.24 / np.sqrt(nobs)\n #repeat low to get {0,2,3}\n stat_modified = stat * mod_factor\n zsqu = stat_modified**2\n pval = (8 * zsqu - 2) * np.exp(-2 * zsqu)\n digits = np.sum(stat > np.array([1.06, 1.06, 1.26]))\n return stat_modified, pval, digits\n\ndef wsqu_st70_upp(stat, nobs):\n nobsinv = 1. / nobs\n stat_modified = (stat - 0.4 * nobsinv + 0.6 * nobsinv**2) * (1 + nobsinv)\n pval = 0.05 * np.exp(2.79 - 6 * stat_modified)\n digits = np.nan # some explanation in txt\n #repeat low to get {0,2,3}\n return stat_modified, pval, digits\n\ndef usqu_st70_upp(stat, nobs):\n nobsinv = 1. / nobs\n stat_modified = (stat - 0.1 * nobsinv + 0.1 * nobsinv**2)\n stat_modified *= (1 + 0.8 * nobsinv)\n pval = 2 * np.exp(- 2 * stat_modified * np.pi**2)\n digits = np.sum(stat > np.array([0.29, 0.29, 0.34]))\n #repeat low to get {0,2,3}\n return stat_modified, pval, digits\n\ndef a_st70_upp(stat, nobs):\n nobsinv = 1. / nobs\n stat_modified = (stat - 0.7 * nobsinv + 0.9 * nobsinv**2)\n stat_modified *= (1 + 1.23 * nobsinv)\n pval = 1.273 * np.exp(- 2 * stat_modified / 2. * np.pi**2)\n digits = np.sum(stat > np.array([0.11, 0.11, 0.452]))\n #repeat low to get {0,2,3}\n return stat_modified, pval, digits\n\n\n\ngof_pvals = {}\n\ngof_pvals['stephens70upp'] = {\n 'd_plus' : dplus_st70_upp,\n 'd_minus' : dplus_st70_upp,\n 'd' : d_st70_upp,\n 'v' : v_st70_upp,\n 'wsqu' : wsqu_st70_upp,\n 'usqu' : usqu_st70_upp,\n 'a' : a_st70_upp }\n\ndef pval_kstest_approx(D, N):\n pval_two = distributions.kstwobign.sf(D*np.sqrt(N))\n if N > 2666 or pval_two > 0.80 - N*0.3/1000.0 :\n return D, distributions.kstwobign.sf(D*np.sqrt(N)), np.nan\n else:\n return D, distributions.ksone.sf(D,N)*2, np.nan\n\ngof_pvals['scipy'] = {\n 'd_plus' : lambda Dplus, N: (Dplus, distributions.ksone.sf(Dplus, N), np.nan),\n 'd_minus' : lambda Dmin, N: (Dmin, distributions.ksone.sf(Dmin,N), np.nan),\n 'd' : lambda D, N: (D, distributions.kstwobign.sf(D*np.sqrt(N)), np.nan)\n }\n\ngof_pvals['scipy_approx'] = {\n 'd' : pval_kstest_approx }\n\nclass GOF(object):\n '''One Sample Goodness of Fit tests\n\n includes Kolmogorov-Smirnov D, D+, D-, Kuiper V, Cramer-von Mises W^2, U^2 and\n Anderson-Darling A, A^2. The p-values for all tests except for A^2 are based on\n the approximatiom given in Stephens 1970. A^2 has currently no p-values. For\n the Kolmogorov-Smirnov test the tests as given in scipy.stats are also available\n as options.\n\n\n\n\n design: I might want to retest with different distributions, to calculate\n data summary statistics only once, or add separate class that holds\n summary statistics and data (sounds good).\n\n\n\n\n '''\n\n\n\n\n def __init__(self, rvs, cdf, args=(), N=20):\n if isinstance(rvs, str):\n #cdf = getattr(stats, rvs).cdf\n if (not cdf) or (cdf == rvs):\n cdf = getattr(distributions, rvs).cdf\n rvs = getattr(distributions, rvs).rvs\n else:\n raise AttributeError('if rvs is string, cdf has to be the same distribution')\n\n\n if isinstance(cdf, str):\n cdf = getattr(distributions, cdf).cdf\n if callable(rvs):\n kwds = {'size':N}\n vals = np.sort(rvs(*args,**kwds))\n else:\n vals = np.sort(rvs)\n N = len(vals)\n cdfvals = cdf(vals, *args)\n\n self.nobs = N\n self.vals_sorted = vals\n self.cdfvals = cdfvals\n\n\n\n @cache_readonly\n def d_plus(self):\n nobs = self.nobs\n cdfvals = self.cdfvals\n return (np.arange(1.0, nobs+1)/nobs - cdfvals).max()\n\n @cache_readonly\n def d_minus(self):\n nobs = self.nobs\n cdfvals = self.cdfvals\n return (cdfvals - np.arange(0.0, nobs)/nobs).max()\n\n @cache_readonly\n def d(self):\n return np.max([self.d_plus, self.d_minus])\n\n @cache_readonly\n def v(self):\n '''Kuiper'''\n return self.d_plus + self.d_minus\n\n @cache_readonly\n def wsqu(self):\n '''Cramer von Mises'''\n nobs = self.nobs\n cdfvals = self.cdfvals\n #use literal formula, TODO: simplify with arange(,,2)\n wsqu = ((cdfvals - (2. * np.arange(1., nobs+1) - 1)/nobs/2.)**2).sum() \\\n + 1./nobs/12.\n return wsqu\n\n @cache_readonly\n def usqu(self):\n nobs = self.nobs\n cdfvals = self.cdfvals\n #use literal formula, TODO: simplify with arange(,,2)\n usqu = self.wsqu - nobs * (cdfvals.mean() - 0.5)**2\n return usqu\n\n @cache_readonly\n def a(self):\n nobs = self.nobs\n cdfvals = self.cdfvals\n\n #one loop instead of large array\n msum = 0\n for j in range(1,nobs):\n mj = cdfvals[j] - cdfvals[:j]\n mask = (mj > 0.5)\n mj[mask] = 1 - mj[mask]\n msum += mj.sum()\n\n a = nobs / 4. - 2. / nobs * msum\n return a\n\n @cache_readonly\n def asqu(self):\n '''Stephens 1974, does not have p-value formula for A^2'''\n nobs = self.nobs\n cdfvals = self.cdfvals\n\n asqu = -((2. * np.arange(1., nobs+1) - 1) *\n (np.log(cdfvals) + np.log(1-cdfvals[::-1]) )).sum()/nobs - nobs\n\n return asqu\n\n\n def get_test(self, testid='d', pvals='stephens70upp'):\n '''\n\n '''\n #print gof_pvals[pvals][testid]\n stat = getattr(self, testid)\n if pvals == 'stephens70upp':\n return gof_pvals[pvals][testid](stat, self.nobs), stat\n else:\n return gof_pvals[pvals][testid](stat, self.nobs)\n\n\n\n\n\n\n\n\ndef gof_mc(randfn, distr, nobs=100):\n #print '\\nIs it correctly sized?'\n from collections import defaultdict\n\n results = defaultdict(list)\n for i in range(1000):\n rvs = randfn(nobs)\n goft = GOF(rvs, distr)\n for ti in all_gofs:\n results[ti].append(goft.get_test(ti, 'stephens70upp')[0][1])\n\n resarr = np.array([results[ti] for ti in all_gofs])\n print(' ', ' '.join(all_gofs))\n print('at 0.01:', (resarr < 0.01).mean(1))\n print('at 0.05:', (resarr < 0.05).mean(1))\n print('at 0.10:', (resarr < 0.1).mean(1))\n\ndef asquare(cdfvals, axis=0):\n '''vectorized Anderson Darling A^2, Stephens 1974'''\n ndim = len(cdfvals.shape)\n nobs = cdfvals.shape[axis]\n slice_reverse = [slice(None)] * ndim #might make copy if not specific axis???\n islice = [None] * ndim\n islice[axis] = slice(None)\n slice_reverse[axis] = slice(None, None, -1)\n asqu = -((2. * np.arange(1., nobs+1)[tuple(islice)] - 1) *\n (np.log(cdfvals) + np.log(1-cdfvals[tuple(slice_reverse)]))/nobs).sum(axis) \\\n - nobs\n\n return asqu\n\n\n#class OneSGOFFittedVec(object):\n# '''for vectorized fitting'''\n # currently I use the bootstrap as function instead of full class\n\n #note: kwds loc and scale are a pain\n # I would need to overwrite rvs, fit and cdf depending on fixed parameters\n\n #def bootstrap(self, distr, args=(), kwds={}, nobs=200, nrep=1000,\ndef bootstrap(distr, args=(), nobs=200, nrep=100, value=None, batch_size=None):\n '''Monte Carlo (or parametric bootstrap) p-values for gof\n\n currently hardcoded for A^2 only\n\n assumes vectorized fit_vec method,\n builds and analyses (nobs, nrep) sample in one step\n\n rename function to less generic\n\n this works also with nrep=1\n\n '''\n #signature similar to kstest ?\n #delegate to fn ?\n\n #rvs_kwds = {'size':(nobs, nrep)}\n #rvs_kwds.update(kwds)\n\n\n #it will be better to build a separate batch function that calls bootstrap\n #keep batch if value is true, but batch iterate from outside if stat is returned\n if batch_size is not None:\n if value is None:\n raise ValueError('using batching requires a value')\n n_batch = int(np.ceil(nrep/float(batch_size)))\n count = 0\n for irep in range(n_batch):\n rvs = distr.rvs(args, **{'size':(batch_size, nobs)})\n params = distr.fit_vec(rvs, axis=1)\n params = lmap(lambda x: np.expand_dims(x, 1), params)\n cdfvals = np.sort(distr.cdf(rvs, params), axis=1)\n stat = asquare(cdfvals, axis=1)\n count += (stat >= value).sum()\n return count / float(n_batch * batch_size)\n else:\n #rvs = distr.rvs(args, **kwds) #extension to distribution kwds ?\n rvs = distr.rvs(args, **{'size':(nrep, nobs)})\n params = distr.fit_vec(rvs, axis=1)\n params = lmap(lambda x: np.expand_dims(x, 1), params)\n cdfvals = np.sort(distr.cdf(rvs, params), axis=1)\n stat = asquare(cdfvals, axis=1)\n if value is None: #return all bootstrap results\n stat_sorted = np.sort(stat)\n return stat_sorted\n else: #calculate and return specific p-value\n return (stat >= value).mean()\n\n\n\ndef bootstrap2(value, distr, args=(), nobs=200, nrep=100):\n '''Monte Carlo (or parametric bootstrap) p-values for gof\n\n currently hardcoded for A^2 only\n\n non vectorized, loops over all parametric bootstrap replications and calculates\n and returns specific p-value,\n\n rename function to less generic\n\n '''\n #signature similar to kstest ?\n #delegate to fn ?\n\n #rvs_kwds = {'size':(nobs, nrep)}\n #rvs_kwds.update(kwds)\n\n\n count = 0\n for irep in range(nrep):\n #rvs = distr.rvs(args, **kwds) #extension to distribution kwds ?\n rvs = distr.rvs(args, **{'size':nobs})\n params = distr.fit_vec(rvs)\n cdfvals = np.sort(distr.cdf(rvs, params))\n stat = asquare(cdfvals, axis=0)\n count += (stat >= value)\n return count * 1. / nrep\n\n\nclass NewNorm(object):\n '''just a holder for modified distributions\n '''\n\n def fit_vec(self, x, axis=0):\n return x.mean(axis), x.std(axis)\n\n def cdf(self, x, args):\n return distributions.norm.cdf(x, loc=args[0], scale=args[1])\n\n def rvs(self, args, size):\n loc=args[0]\n scale=args[1]\n return loc + scale * distributions.norm.rvs(size=size)\n\n\n\n\n\nif __name__ == '__main__':\n from scipy import stats\n #rvs = np.random.randn(1000)\n rvs = stats.t.rvs(3, size=200)\n print('scipy kstest')\n print(kstest(rvs, 'norm'))\n goft = GOF(rvs, 'norm')\n print(goft.get_test())\n\n all_gofs = ['d', 'd_plus', 'd_minus', 'v', 'wsqu', 'usqu', 'a']\n for ti in all_gofs:\n print(ti, goft.get_test(ti, 'stephens70upp'))\n\n print('\\nIs it correctly sized?')\n from collections import defaultdict\n\n results = defaultdict(list)\n nobs = 200\n for i in range(100):\n rvs = np.random.randn(nobs)\n goft = GOF(rvs, 'norm')\n for ti in all_gofs:\n results[ti].append(goft.get_test(ti, 'stephens70upp')[0][1])\n\n resarr = np.array([results[ti] for ti in all_gofs])\n print(' ', ' '.join(all_gofs))\n print('at 0.01:', (resarr < 0.01).mean(1))\n print('at 0.05:', (resarr < 0.05).mean(1))\n print('at 0.10:', (resarr < 0.1).mean(1))\n\n gof_mc(lambda nobs: stats.t.rvs(3, size=nobs), 'norm', nobs=200)\n\n nobs = 200\n nrep = 100\n bt = bootstrap(NewNorm(), args=(0,1), nobs=nobs, nrep=nrep, value=None)\n quantindex = np.floor(nrep * np.array([0.99, 0.95, 0.9])).astype(int)\n print(bt[quantindex])\n\n #the bootstrap results match Stephens pretty well for nobs=100, but not so well for\n #large (1000) or small (20) nobs\n '''\n >>> np.array([15.0, 10.0, 5.0, 2.5, 1.0])/100. #Stephens\n array([ 0.15 , 0.1 , 0.05 , 0.025, 0.01 ])\n >>> nobs = 100\n >>> [bootstrap(NewNorm(), args=(0,1), nobs=nobs, nrep=10000, value=c/ (1 + 4./nobs - 25./nobs**2)) for c in [0.576, 0.656, 0.787, 0.918, 1.092]]\n [0.1545, 0.10009999999999999, 0.049000000000000002, 0.023, 0.0104]\n >>>\n '''\n", "import numpy as np\n\nc16 = np.complex128(1)\nf8 = np.float64(1)\ni8 = np.int64(1)\nu8 = np.uint64(1)\n\nc8 = np.complex64(1)\nf4 = np.float32(1)\ni4 = np.int32(1)\nu4 = np.uint32(1)\n\ndt = np.datetime64(1, \"D\")\ntd = np.timedelta64(1, \"D\")\n\nb_ = np.bool_(1)\n\nb = bool(1)\nc = complex(1)\nf = float(1)\ni = int(1)\n\nAR = np.ones(1, dtype=np.float64)\nAR.setflags(write=False)\n\n# unary ops\n\n-c16\n-c8\n-f8\n-f4\n-i8\n-i4\n-u8\n-u4\n-td\n-AR\n\n+c16\n+c8\n+f8\n+f4\n+i8\n+i4\n+u8\n+u4\n+td\n+AR\n\nabs(c16)\nabs(c8)\nabs(f8)\nabs(f4)\nabs(i8)\nabs(i4)\nabs(u8)\nabs(u4)\nabs(td)\nabs(b_)\nabs(AR)\n\n# Time structures\n\ndt + td\ndt + i\ndt + i4\ndt + i8\ndt - dt\ndt - i\ndt - i4\ndt - i8\n\ntd + td\ntd + i\ntd + i4\ntd + i8\ntd - td\ntd - i\ntd - i4\ntd - i8\ntd / f\ntd / f4\ntd / f8\ntd / td\ntd // td\ntd % td\n\n\n# boolean\n\nb_ / b\nb_ / b_\nb_ / i\nb_ / i8\nb_ / i4\nb_ / u8\nb_ / u4\nb_ / f\nb_ / f8\nb_ / f4\nb_ / c\nb_ / c16\nb_ / c8\n\nb / b_\nb_ / b_\ni / b_\ni8 / b_\ni4 / b_\nu8 / b_\nu4 / b_\nf / b_\nf8 / b_\nf4 / b_\nc / b_\nc16 / b_\nc8 / b_\n\n# Complex\n\nc16 + c16\nc16 + f8\nc16 + i8\nc16 + c8\nc16 + f4\nc16 + i4\nc16 + b_\nc16 + b\nc16 + c\nc16 + f\nc16 + i\nc16 + AR\n\nc16 + c16\nf8 + c16\ni8 + c16\nc8 + c16\nf4 + c16\ni4 + c16\nb_ + c16\nb + c16\nc + c16\nf + c16\ni + c16\nAR + c16\n\nc8 + c16\nc8 + f8\nc8 + i8\nc8 + c8\nc8 + f4\nc8 + i4\nc8 + b_\nc8 + b\nc8 + c\nc8 + f\nc8 + i\nc8 + AR\n\nc16 + c8\nf8 + c8\ni8 + c8\nc8 + c8\nf4 + c8\ni4 + c8\nb_ + c8\nb + c8\nc + c8\nf + c8\ni + c8\nAR + c8\n\n# Float\n\nf8 + f8\nf8 + i8\nf8 + f4\nf8 + i4\nf8 + b_\nf8 + b\nf8 + c\nf8 + f\nf8 + i\nf8 + AR\n\nf8 + f8\ni8 + f8\nf4 + f8\ni4 + f8\nb_ + f8\nb + f8\nc + f8\nf + f8\ni + f8\nAR + f8\n\nf4 + f8\nf4 + i8\nf4 + f4\nf4 + i4\nf4 + b_\nf4 + b\nf4 + c\nf4 + f\nf4 + i\nf4 + AR\n\nf8 + f4\ni8 + f4\nf4 + f4\ni4 + f4\nb_ + f4\nb + f4\nc + f4\nf + f4\ni + f4\nAR + f4\n\n# Int\n\ni8 + i8\ni8 + u8\ni8 + i4\ni8 + u4\ni8 + b_\ni8 + b\ni8 + c\ni8 + f\ni8 + i\ni8 + AR\n\nu8 + u8\nu8 + i4\nu8 + u4\nu8 + b_\nu8 + b\nu8 + c\nu8 + f\nu8 + i\nu8 + AR\n\ni8 + i8\nu8 + i8\ni4 + i8\nu4 + i8\nb_ + i8\nb + i8\nc + i8\nf + i8\ni + i8\nAR + i8\n\nu8 + u8\ni4 + u8\nu4 + u8\nb_ + u8\nb + u8\nc + u8\nf + u8\ni + u8\nAR + u8\n\ni4 + i8\ni4 + i4\ni4 + i\ni4 + b_\ni4 + b\ni4 + AR\n\nu4 + i8\nu4 + i4\nu4 + u8\nu4 + u4\nu4 + i\nu4 + b_\nu4 + b\nu4 + AR\n\ni8 + i4\ni4 + i4\ni + i4\nb_ + i4\nb + i4\nAR + i4\n\ni8 + u4\ni4 + u4\nu8 + u4\nu4 + u4\nb_ + u4\nb + u4\ni + u4\nAR + u4\n", "import numpy as np\nfrom numpy.testing import assert_equal, assert_array_equal\n\nfrom scipy.stats import rankdata, tiecorrect\nimport pytest\n\n\nclass TestTieCorrect(object):\n\n def test_empty(self):\n \"\"\"An empty array requires no correction, should return 1.0.\"\"\"\n ranks = np.array([], dtype=np.float64)\n c = tiecorrect(ranks)\n assert_equal(c, 1.0)\n\n def test_one(self):\n \"\"\"A single element requires no correction, should return 1.0.\"\"\"\n ranks = np.array([1.0], dtype=np.float64)\n c = tiecorrect(ranks)\n assert_equal(c, 1.0)\n\n def test_no_correction(self):\n \"\"\"Arrays with no ties require no correction.\"\"\"\n ranks = np.arange(2.0)\n c = tiecorrect(ranks)\n assert_equal(c, 1.0)\n ranks = np.arange(3.0)\n c = tiecorrect(ranks)\n assert_equal(c, 1.0)\n\n def test_basic(self):\n \"\"\"Check a few basic examples of the tie correction factor.\"\"\"\n # One tie of two elements\n ranks = np.array([1.0, 2.5, 2.5])\n c = tiecorrect(ranks)\n T = 2.0\n N = ranks.size\n expected = 1.0 - (T**3 - T) / (N**3 - N)\n assert_equal(c, expected)\n\n # One tie of two elements (same as above, but tie is not at the end)\n ranks = np.array([1.5, 1.5, 3.0])\n c = tiecorrect(ranks)\n T = 2.0\n N = ranks.size\n expected = 1.0 - (T**3 - T) / (N**3 - N)\n assert_equal(c, expected)\n\n # One tie of three elements\n ranks = np.array([1.0, 3.0, 3.0, 3.0])\n c = tiecorrect(ranks)\n T = 3.0\n N = ranks.size\n expected = 1.0 - (T**3 - T) / (N**3 - N)\n assert_equal(c, expected)\n\n # Two ties, lengths 2 and 3.\n ranks = np.array([1.5, 1.5, 4.0, 4.0, 4.0])\n c = tiecorrect(ranks)\n T1 = 2.0\n T2 = 3.0\n N = ranks.size\n expected = 1.0 - ((T1**3 - T1) + (T2**3 - T2)) / (N**3 - N)\n assert_equal(c, expected)\n\n def test_overflow(self):\n ntie, k = 2000, 5\n a = np.repeat(np.arange(k), ntie)\n n = a.size # ntie * k\n out = tiecorrect(rankdata(a))\n assert_equal(out, 1.0 - k * (ntie**3 - ntie) / float(n**3 - n))\n\n\nclass TestRankData(object):\n\n def test_empty(self):\n \"\"\"stats.rankdata([]) should return an empty array.\"\"\"\n a = np.array([], dtype=int)\n r = rankdata(a)\n assert_array_equal(r, np.array([], dtype=np.float64))\n r = rankdata([])\n assert_array_equal(r, np.array([], dtype=np.float64))\n\n def test_one(self):\n \"\"\"Check stats.rankdata with an array of length 1.\"\"\"\n data = [100]\n a = np.array(data, dtype=int)\n r = rankdata(a)\n assert_array_equal(r, np.array([1.0], dtype=np.float64))\n r = rankdata(data)\n assert_array_equal(r, np.array([1.0], dtype=np.float64))\n\n def test_basic(self):\n \"\"\"Basic tests of stats.rankdata.\"\"\"\n data = [100, 10, 50]\n expected = np.array([3.0, 1.0, 2.0], dtype=np.float64)\n a = np.array(data, dtype=int)\n r = rankdata(a)\n assert_array_equal(r, expected)\n r = rankdata(data)\n assert_array_equal(r, expected)\n\n data = [40, 10, 30, 10, 50]\n expected = np.array([4.0, 1.5, 3.0, 1.5, 5.0], dtype=np.float64)\n a = np.array(data, dtype=int)\n r = rankdata(a)\n assert_array_equal(r, expected)\n r = rankdata(data)\n assert_array_equal(r, expected)\n\n data = [20, 20, 20, 10, 10, 10]\n expected = np.array([5.0, 5.0, 5.0, 2.0, 2.0, 2.0], dtype=np.float64)\n a = np.array(data, dtype=int)\n r = rankdata(a)\n assert_array_equal(r, expected)\n r = rankdata(data)\n assert_array_equal(r, expected)\n # The docstring states explicitly that the argument is flattened.\n a2d = a.reshape(2, 3)\n r = rankdata(a2d)\n assert_array_equal(r, expected)\n\n def test_rankdata_object_string(self):\n min_rank = lambda a: [1 + sum(i < j for i in a) for j in a]\n max_rank = lambda a: [sum(i <= j for i in a) for j in a]\n ordinal_rank = lambda a: min_rank([(x, i) for i, x in enumerate(a)])\n\n def average_rank(a):\n return [(i + j) / 2.0 for i, j in zip(min_rank(a), max_rank(a))]\n\n def dense_rank(a):\n b = np.unique(a)\n return [1 + sum(i < j for i in b) for j in a]\n\n rankf = dict(min=min_rank, max=max_rank, ordinal=ordinal_rank,\n average=average_rank, dense=dense_rank)\n\n def check_ranks(a):\n for method in 'min', 'max', 'dense', 'ordinal', 'average':\n out = rankdata(a, method=method)\n assert_array_equal(out, rankf[method](a))\n\n val = ['foo', 'bar', 'qux', 'xyz', 'abc', 'efg', 'ace', 'qwe', 'qaz']\n check_ranks(np.random.choice(val, 200))\n check_ranks(np.random.choice(val, 200).astype('object'))\n\n val = np.array([0, 1, 2, 2.718, 3, 3.141], dtype='object')\n check_ranks(np.random.choice(val, 200).astype('object'))\n\n def test_large_int(self):\n data = np.array([2**60, 2**60+1], dtype=np.uint64)\n r = rankdata(data)\n assert_array_equal(r, [1.0, 2.0])\n\n data = np.array([2**60, 2**60+1], dtype=np.int64)\n r = rankdata(data)\n assert_array_equal(r, [1.0, 2.0])\n\n data = np.array([2**60, -2**60+1], dtype=np.int64)\n r = rankdata(data)\n assert_array_equal(r, [2.0, 1.0])\n\n def test_big_tie(self):\n for n in [10000, 100000, 1000000]:\n data = np.ones(n, dtype=int)\n r = rankdata(data)\n expected_rank = 0.5 * (n + 1)\n assert_array_equal(r, expected_rank * data,\n \"test failed with n=%d\" % n)\n\n def test_axis(self):\n data = [[0, 2, 1],\n [4, 2, 2]]\n expected0 = [[1., 1.5, 1.],\n [2., 1.5, 2.]]\n r0 = rankdata(data, axis=0)\n assert_array_equal(r0, expected0)\n expected1 = [[1., 3., 2.],\n [3., 1.5, 1.5]]\n r1 = rankdata(data, axis=1)\n assert_array_equal(r1, expected1)\n\n methods = [\"average\", \"min\", \"max\", \"dense\", \"ordinal\"]\n dtypes = [np.float64] + [np.int_]*4\n\n @pytest.mark.parametrize(\"axis\", [0, 1])\n @pytest.mark.parametrize(\"method, dtype\", zip(methods, dtypes))\n def test_size_0_axis(self, axis, method, dtype):\n shape = (3, 0)\n data = np.zeros(shape)\n r = rankdata(data, method=method, axis=axis)\n assert_equal(r.shape, shape)\n assert_equal(r.dtype, dtype)\n\n\n_cases = (\n # values, method, expected\n ([], 'average', []),\n ([], 'min', []),\n ([], 'max', []),\n ([], 'dense', []),\n ([], 'ordinal', []),\n #\n ([100], 'average', [1.0]),\n ([100], 'min', [1.0]),\n ([100], 'max', [1.0]),\n ([100], 'dense', [1.0]),\n ([100], 'ordinal', [1.0]),\n #\n ([100, 100, 100], 'average', [2.0, 2.0, 2.0]),\n ([100, 100, 100], 'min', [1.0, 1.0, 1.0]),\n ([100, 100, 100], 'max', [3.0, 3.0, 3.0]),\n ([100, 100, 100], 'dense', [1.0, 1.0, 1.0]),\n ([100, 100, 100], 'ordinal', [1.0, 2.0, 3.0]),\n #\n ([100, 300, 200], 'average', [1.0, 3.0, 2.0]),\n ([100, 300, 200], 'min', [1.0, 3.0, 2.0]),\n ([100, 300, 200], 'max', [1.0, 3.0, 2.0]),\n ([100, 300, 200], 'dense', [1.0, 3.0, 2.0]),\n ([100, 300, 200], 'ordinal', [1.0, 3.0, 2.0]),\n #\n ([100, 200, 300, 200], 'average', [1.0, 2.5, 4.0, 2.5]),\n ([100, 200, 300, 200], 'min', [1.0, 2.0, 4.0, 2.0]),\n ([100, 200, 300, 200], 'max', [1.0, 3.0, 4.0, 3.0]),\n ([100, 200, 300, 200], 'dense', [1.0, 2.0, 3.0, 2.0]),\n ([100, 200, 300, 200], 'ordinal', [1.0, 2.0, 4.0, 3.0]),\n #\n ([100, 200, 300, 200, 100], 'average', [1.5, 3.5, 5.0, 3.5, 1.5]),\n ([100, 200, 300, 200, 100], 'min', [1.0, 3.0, 5.0, 3.0, 1.0]),\n ([100, 200, 300, 200, 100], 'max', [2.0, 4.0, 5.0, 4.0, 2.0]),\n ([100, 200, 300, 200, 100], 'dense', [1.0, 2.0, 3.0, 2.0, 1.0]),\n ([100, 200, 300, 200, 100], 'ordinal', [1.0, 3.0, 5.0, 4.0, 2.0]),\n #\n ([10] * 30, 'ordinal', np.arange(1.0, 31.0)),\n)\n\n\ndef test_cases():\n for values, method, expected in _cases:\n r = rankdata(values, method=method)\n assert_array_equal(r, expected)\n", "\"\"\"\ntest .agg behavior / note that .apply is tested generally in test_groupby.py\n\"\"\"\nimport datetime\nimport functools\nfrom functools import partial\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import PerformanceWarning\n\nfrom pandas.core.dtypes.common import is_integer_dtype\n\nimport pandas as pd\nfrom pandas import DataFrame, Index, MultiIndex, Series, concat\nimport pandas._testing as tm\nfrom pandas.core.base import SpecificationError\nfrom pandas.core.groupby.grouper import Grouping\n\n\ndef test_groupby_agg_no_extra_calls():\n # GH#31760\n df = DataFrame({\"key\": [\"a\", \"b\", \"c\", \"c\"], \"value\": [1, 2, 3, 4]})\n gb = df.groupby(\"key\")[\"value\"]\n\n def dummy_func(x):\n assert len(x) != 0\n return x.sum()\n\n gb.agg(dummy_func)\n\n\ndef test_agg_regression1(tsframe):\n grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_must_agg(df):\n grouped = df.groupby(\"A\")[\"C\"]\n\n msg = \"Must produce aggregated value\"\n with pytest.raises(Exception, match=msg):\n grouped.agg(lambda x: x.describe())\n with pytest.raises(Exception, match=msg):\n grouped.agg(lambda x: x.index[:2])\n\n\ndef test_agg_ser_multi_key(df):\n # TODO(wesm): unused\n ser = df.C # noqa\n\n f = lambda x: x.sum()\n results = df.C.groupby([df.A, df.B]).aggregate(f)\n expected = df.groupby([\"A\", \"B\"]).sum()[\"C\"]\n tm.assert_series_equal(results, expected)\n\n\ndef test_groupby_aggregation_mixed_dtype():\n\n # GH 6212\n expected = DataFrame(\n {\n \"v1\": [5, 5, 7, np.nan, 3, 3, 4, 1],\n \"v2\": [55, 55, 77, np.nan, 33, 33, 44, 11],\n },\n index=MultiIndex.from_tuples(\n [\n (1, 95),\n (1, 99),\n (2, 95),\n (2, 99),\n (\"big\", \"damp\"),\n (\"blue\", \"dry\"),\n (\"red\", \"red\"),\n (\"red\", \"wet\"),\n ],\n names=[\"by1\", \"by2\"],\n ),\n )\n\n df = DataFrame(\n {\n \"v1\": [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9],\n \"v2\": [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99],\n \"by1\": [\"red\", \"blue\", 1, 2, np.nan, \"big\", 1, 2, \"red\", 1, np.nan, 12],\n \"by2\": [\n \"wet\",\n \"dry\",\n 99,\n 95,\n np.nan,\n \"damp\",\n 95,\n 99,\n \"red\",\n 99,\n np.nan,\n np.nan,\n ],\n }\n )\n\n g = df.groupby([\"by1\", \"by2\"])\n result = g[[\"v1\", \"v2\"]].mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_aggregation_multi_level_column():\n # GH 29772\n lst = [\n [True, True, True, False],\n [True, False, np.nan, False],\n [True, True, np.nan, False],\n [True, True, np.nan, False],\n ]\n df = DataFrame(\n data=lst,\n columns=pd.MultiIndex.from_tuples([(\"A\", 0), (\"A\", 1), (\"B\", 0), (\"B\", 1)]),\n )\n\n result = df.groupby(level=1, axis=1).sum()\n expected = DataFrame({0: [2.0, 1, 1, 1], 1: [1, 0, 1, 1]})\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_apply_corner(ts, tsframe):\n # nothing to group, all NA\n grouped = ts.groupby(ts * np.nan)\n assert ts.dtype == np.float64\n\n # groupby float64 values results in Float64Index\n exp = Series([], dtype=np.float64, index=Index([], dtype=np.float64))\n tm.assert_series_equal(grouped.sum(), exp)\n tm.assert_series_equal(grouped.agg(np.sum), exp)\n tm.assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False)\n\n # DataFrame\n grouped = tsframe.groupby(tsframe[\"A\"] * np.nan)\n exp_df = DataFrame(\n columns=tsframe.columns, dtype=float, index=Index([], dtype=np.float64)\n )\n tm.assert_frame_equal(grouped.sum(), exp_df, check_names=False)\n tm.assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False)\n tm.assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0], check_names=False)\n\n\ndef test_agg_grouping_is_list_tuple(ts):\n df = tm.makeTimeDataFrame()\n\n grouped = df.groupby(lambda x: x.year)\n grouper = grouped.grouper.groupings[0].grouper\n grouped.grouper.groupings[0] = Grouping(ts.index, list(grouper))\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n grouped.grouper.groupings[0] = Grouping(ts.index, tuple(grouper))\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_python_multiindex(mframe):\n grouped = mframe.groupby([\"A\", \"B\"])\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"groupbyfunc\", [lambda x: x.weekday(), [lambda x: x.month, lambda x: x.weekday()]]\n)\ndef test_aggregate_str_func(tsframe, groupbyfunc):\n grouped = tsframe.groupby(groupbyfunc)\n\n # single series\n result = grouped[\"A\"].agg(\"std\")\n expected = grouped[\"A\"].std()\n tm.assert_series_equal(result, expected)\n\n # group frame by function name\n result = grouped.aggregate(\"var\")\n expected = grouped.var()\n tm.assert_frame_equal(result, expected)\n\n # group frame by function dict\n result = grouped.agg({\"A\": \"var\", \"B\": \"std\", \"C\": \"mean\", \"D\": \"sem\"})\n expected = DataFrame(\n {\n \"A\": grouped[\"A\"].var(),\n \"B\": grouped[\"B\"].std(),\n \"C\": grouped[\"C\"].mean(),\n \"D\": grouped[\"D\"].sem(),\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_aggregate_item_by_item(df):\n grouped = df.groupby(\"A\")\n\n aggfun = lambda ser: ser.size\n result = grouped.agg(aggfun)\n foo = (df.A == \"foo\").sum()\n bar = (df.A == \"bar\").sum()\n K = len(result.columns)\n\n # GH5782\n # odd comparisons can result here, so cast to make easy\n exp = Series(np.array([foo] * K), index=list(\"BCD\"), dtype=np.float64, name=\"foo\")\n tm.assert_series_equal(result.xs(\"foo\"), exp)\n\n exp = Series(np.array([bar] * K), index=list(\"BCD\"), dtype=np.float64, name=\"bar\")\n tm.assert_almost_equal(result.xs(\"bar\"), exp)\n\n def aggfun(ser):\n return ser.size\n\n result = DataFrame().groupby(df.A).agg(aggfun)\n assert isinstance(result, DataFrame)\n assert len(result) == 0\n\n\ndef test_wrap_agg_out(three_group):\n grouped = three_group.groupby([\"A\", \"B\"])\n\n def func(ser):\n if ser.dtype == object:\n raise TypeError\n else:\n return ser.sum()\n\n result = grouped.aggregate(func)\n exp_grouped = three_group.loc[:, three_group.columns != \"C\"]\n expected = exp_grouped.groupby([\"A\", \"B\"]).aggregate(func)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_multiple_functions_maintain_order(df):\n # GH #610\n funcs = [(\"mean\", np.mean), (\"max\", np.max), (\"min\", np.min)]\n result = df.groupby(\"A\")[\"C\"].agg(funcs)\n exp_cols = Index([\"mean\", \"max\", \"min\"])\n\n tm.assert_index_equal(result.columns, exp_cols)\n\n\ndef test_agg_multiple_functions_same_name():\n # GH 30880\n df = DataFrame(\n np.random.randn(1000, 3),\n index=pd.date_range(\"1/1/2012\", freq=\"S\", periods=1000),\n columns=[\"A\", \"B\", \"C\"],\n )\n result = df.resample(\"3T\").agg(\n {\"A\": [partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}\n )\n expected_index = pd.date_range(\"1/1/2012\", freq=\"3T\", periods=6)\n expected_columns = MultiIndex.from_tuples([(\"A\", \"quantile\"), (\"A\", \"quantile\")])\n expected_values = np.array(\n [df.resample(\"3T\").A.quantile(q=q).values for q in [0.9999, 0.1111]]\n ).T\n expected = DataFrame(\n expected_values, columns=expected_columns, index=expected_index\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_multiple_functions_same_name_with_ohlc_present():\n # GH 30880\n # ohlc expands dimensions, so different test to the above is required.\n df = DataFrame(\n np.random.randn(1000, 3),\n index=pd.date_range(\"1/1/2012\", freq=\"S\", periods=1000),\n columns=[\"A\", \"B\", \"C\"],\n )\n result = df.resample(\"3T\").agg(\n {\"A\": [\"ohlc\", partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}\n )\n expected_index = pd.date_range(\"1/1/2012\", freq=\"3T\", periods=6)\n expected_columns = pd.MultiIndex.from_tuples(\n [\n (\"A\", \"ohlc\", \"open\"),\n (\"A\", \"ohlc\", \"high\"),\n (\"A\", \"ohlc\", \"low\"),\n (\"A\", \"ohlc\", \"close\"),\n (\"A\", \"quantile\", \"A\"),\n (\"A\", \"quantile\", \"A\"),\n ]\n )\n non_ohlc_expected_values = np.array(\n [df.resample(\"3T\").A.quantile(q=q).values for q in [0.9999, 0.1111]]\n ).T\n expected_values = np.hstack([df.resample(\"3T\").A.ohlc(), non_ohlc_expected_values])\n expected = DataFrame(\n expected_values, columns=expected_columns, index=expected_index\n )\n # PerformanceWarning is thrown by `assert col in right` in assert_frame_equal\n with tm.assert_produces_warning(PerformanceWarning):\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_functions_tuples_and_non_tuples(df):\n # #1359\n funcs = [(\"foo\", \"mean\"), \"std\"]\n ex_funcs = [(\"foo\", \"mean\"), (\"std\", \"std\")]\n\n result = df.groupby(\"A\")[\"C\"].agg(funcs)\n expected = df.groupby(\"A\")[\"C\"].agg(ex_funcs)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby(\"A\").agg(funcs)\n expected = df.groupby(\"A\").agg(ex_funcs)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_more_flexible_frame_multi_function(df):\n grouped = df.groupby(\"A\")\n\n exmean = grouped.agg({\"C\": np.mean, \"D\": np.mean})\n exstd = grouped.agg({\"C\": np.std, \"D\": np.std})\n\n expected = concat([exmean, exstd], keys=[\"mean\", \"std\"], axis=1)\n expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1)\n\n d = {\"C\": [np.mean, np.std], \"D\": [np.mean, np.std]}\n result = grouped.aggregate(d)\n\n tm.assert_frame_equal(result, expected)\n\n # be careful\n result = grouped.aggregate({\"C\": np.mean, \"D\": [np.mean, np.std]})\n expected = grouped.aggregate({\"C\": np.mean, \"D\": [np.mean, np.std]})\n tm.assert_frame_equal(result, expected)\n\n def foo(x):\n return np.mean(x)\n\n def bar(x):\n return np.std(x, ddof=1)\n\n # this uses column selection & renaming\n msg = r\"nested renamer is not supported\"\n with pytest.raises(SpecificationError, match=msg):\n d = {\"C\": np.mean, \"D\": {\"foo\": np.mean, \"bar\": np.std}}\n grouped.aggregate(d)\n\n # But without renaming, these functions are OK\n d = {\"C\": [np.mean], \"D\": [foo, bar]}\n grouped.aggregate(d)\n\n\ndef test_multi_function_flexible_mix(df):\n # GH #1268\n grouped = df.groupby(\"A\")\n\n # Expected\n d = {\"C\": {\"foo\": \"mean\", \"bar\": \"std\"}, \"D\": {\"sum\": \"sum\"}}\n # this uses column selection & renaming\n msg = r\"nested renamer is not supported\"\n with pytest.raises(SpecificationError, match=msg):\n grouped.aggregate(d)\n\n # Test 1\n d = {\"C\": {\"foo\": \"mean\", \"bar\": \"std\"}, \"D\": \"sum\"}\n # this uses column selection & renaming\n with pytest.raises(SpecificationError, match=msg):\n grouped.aggregate(d)\n\n # Test 2\n d = {\"C\": {\"foo\": \"mean\", \"bar\": \"std\"}, \"D\": \"sum\"}\n # this uses column selection & renaming\n with pytest.raises(SpecificationError, match=msg):\n grouped.aggregate(d)\n\n\ndef test_groupby_agg_coercing_bools():\n # issue 14873\n dat = DataFrame({\"a\": [1, 1, 2, 2], \"b\": [0, 1, 2, 3], \"c\": [None, None, 1, 1]})\n gp = dat.groupby(\"a\")\n\n index = Index([1, 2], name=\"a\")\n\n result = gp[\"b\"].aggregate(lambda x: (x != 0).all())\n expected = Series([False, True], index=index, name=\"b\")\n tm.assert_series_equal(result, expected)\n\n result = gp[\"c\"].aggregate(lambda x: x.isnull().all())\n expected = Series([True, False], index=index, name=\"c\")\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"op\",\n [\n lambda x: x.sum(),\n lambda x: x.cumsum(),\n lambda x: x.transform(\"sum\"),\n lambda x: x.transform(\"cumsum\"),\n lambda x: x.agg(\"sum\"),\n lambda x: x.agg(\"cumsum\"),\n ],\n)\ndef test_bool_agg_dtype(op):\n # GH 7001\n # Bool sum aggregations result in int\n df = DataFrame({\"a\": [1, 1], \"b\": [False, True]})\n s = df.set_index(\"a\")[\"b\"]\n\n result = op(df.groupby(\"a\"))[\"b\"].dtype\n assert is_integer_dtype(result)\n\n result = op(s.groupby(\"a\")).dtype\n assert is_integer_dtype(result)\n\n\ndef test_order_aggregate_multiple_funcs():\n # GH 25692\n df = DataFrame({\"A\": [1, 1, 2, 2], \"B\": [1, 2, 3, 4]})\n\n res = df.groupby(\"A\").agg([\"sum\", \"max\", \"mean\", \"ohlc\", \"min\"])\n result = res.columns.levels[1]\n\n expected = Index([\"sum\", \"max\", \"mean\", \"ohlc\", \"min\"])\n\n tm.assert_index_equal(result, expected)\n\n\[email protected](\"dtype\", [np.int64, np.uint64])\[email protected](\"how\", [\"first\", \"last\", \"min\", \"max\", \"mean\", \"median\"])\ndef test_uint64_type_handling(dtype, how):\n # GH 26310\n df = DataFrame({\"x\": 6903052872240755750, \"y\": [1, 2]})\n expected = df.groupby(\"y\").agg({\"x\": how})\n df.x = df.x.astype(dtype)\n result = df.groupby(\"y\").agg({\"x\": how})\n result.x = result.x.astype(np.int64)\n tm.assert_frame_equal(result, expected, check_exact=True)\n\n\ndef test_func_duplicates_raises():\n # GH28426\n msg = \"Function names\"\n df = DataFrame({\"A\": [0, 0, 1, 1], \"B\": [1, 2, 3, 4]})\n with pytest.raises(SpecificationError, match=msg):\n df.groupby(\"A\").agg([\"min\", \"min\"])\n\n\[email protected](\n \"index\",\n [\n pd.CategoricalIndex(list(\"abc\")),\n pd.interval_range(0, 3),\n pd.period_range(\"2020\", periods=3, freq=\"D\"),\n pd.MultiIndex.from_tuples([(\"a\", 0), (\"a\", 1), (\"b\", 0)]),\n ],\n)\ndef test_agg_index_has_complex_internals(index):\n # GH 31223\n df = DataFrame({\"group\": [1, 1, 2], \"value\": [0, 1, 0]}, index=index)\n result = df.groupby(\"group\").agg({\"value\": Series.nunique})\n expected = DataFrame({\"group\": [1, 2], \"value\": [2, 1]}).set_index(\"group\")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_split_block():\n # https://github.com/pandas-dev/pandas/issues/31522\n df = DataFrame(\n {\n \"key1\": [\"a\", \"a\", \"b\", \"b\", \"a\"],\n \"key2\": [\"one\", \"two\", \"one\", \"two\", \"one\"],\n \"key3\": [\"three\", \"three\", \"three\", \"six\", \"six\"],\n }\n )\n result = df.groupby(\"key1\").min()\n expected = DataFrame(\n {\"key2\": [\"one\", \"one\"], \"key3\": [\"six\", \"six\"]},\n index=Index([\"a\", \"b\"], name=\"key1\"),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_split_object_part_datetime():\n # https://github.com/pandas-dev/pandas/pull/31616\n df = DataFrame(\n {\n \"A\": pd.date_range(\"2000\", periods=4),\n \"B\": [\"a\", \"b\", \"c\", \"d\"],\n \"C\": [1, 2, 3, 4],\n \"D\": [\"b\", \"c\", \"d\", \"e\"],\n \"E\": pd.date_range(\"2000\", periods=4),\n \"F\": [1, 2, 3, 4],\n }\n ).astype(object)\n result = df.groupby([0, 0, 0, 0]).min()\n expected = DataFrame(\n {\n \"A\": [pd.Timestamp(\"2000\")],\n \"B\": [\"a\"],\n \"C\": [1],\n \"D\": [\"b\"],\n \"E\": [pd.Timestamp(\"2000\")],\n \"F\": [1],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\nclass TestNamedAggregationSeries:\n def test_series_named_agg(self):\n df = Series([1, 2, 3, 4])\n gr = df.groupby([0, 0, 1, 1])\n result = gr.agg(a=\"sum\", b=\"min\")\n expected = DataFrame(\n {\"a\": [3, 7], \"b\": [1, 3]}, columns=[\"a\", \"b\"], index=[0, 1]\n )\n tm.assert_frame_equal(result, expected)\n\n result = gr.agg(b=\"min\", a=\"sum\")\n expected = expected[[\"b\", \"a\"]]\n tm.assert_frame_equal(result, expected)\n\n def test_no_args_raises(self):\n gr = Series([1, 2]).groupby([0, 1])\n with pytest.raises(TypeError, match=\"Must provide\"):\n gr.agg()\n\n # but we do allow this\n result = gr.agg([])\n expected = DataFrame()\n tm.assert_frame_equal(result, expected)\n\n def test_series_named_agg_duplicates_no_raises(self):\n # GH28426\n gr = Series([1, 2, 3]).groupby([0, 0, 1])\n grouped = gr.agg(a=\"sum\", b=\"sum\")\n expected = DataFrame({\"a\": [3, 3], \"b\": [3, 3]})\n tm.assert_frame_equal(expected, grouped)\n\n def test_mangled(self):\n gr = Series([1, 2, 3]).groupby([0, 0, 1])\n result = gr.agg(a=lambda x: 0, b=lambda x: 1)\n expected = DataFrame({\"a\": [0, 0], \"b\": [1, 1]})\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"inp\",\n [\n pd.NamedAgg(column=\"anything\", aggfunc=\"min\"),\n (\"anything\", \"min\"),\n [\"anything\", \"min\"],\n ],\n )\n def test_named_agg_nametuple(self, inp):\n # GH34422\n s = Series([1, 1, 2, 2, 3, 3, 4, 5])\n msg = f\"func is expected but received {type(inp).__name__}\"\n with pytest.raises(TypeError, match=msg):\n s.groupby(s.values).agg(a=inp)\n\n\nclass TestNamedAggregationDataFrame:\n def test_agg_relabel(self):\n df = DataFrame(\n {\"group\": [\"a\", \"a\", \"b\", \"b\"], \"A\": [0, 1, 2, 3], \"B\": [5, 6, 7, 8]}\n )\n result = df.groupby(\"group\").agg(a_max=(\"A\", \"max\"), b_max=(\"B\", \"max\"))\n expected = DataFrame(\n {\"a_max\": [1, 3], \"b_max\": [6, 8]},\n index=Index([\"a\", \"b\"], name=\"group\"),\n columns=[\"a_max\", \"b_max\"],\n )\n tm.assert_frame_equal(result, expected)\n\n # order invariance\n p98 = functools.partial(np.percentile, q=98)\n result = df.groupby(\"group\").agg(\n b_min=(\"B\", \"min\"),\n a_min=(\"A\", min),\n a_mean=(\"A\", np.mean),\n a_max=(\"A\", \"max\"),\n b_max=(\"B\", \"max\"),\n a_98=(\"A\", p98),\n )\n expected = DataFrame(\n {\n \"b_min\": [5, 7],\n \"a_min\": [0, 2],\n \"a_mean\": [0.5, 2.5],\n \"a_max\": [1, 3],\n \"b_max\": [6, 8],\n \"a_98\": [0.98, 2.98],\n },\n index=Index([\"a\", \"b\"], name=\"group\"),\n columns=[\"b_min\", \"a_min\", \"a_mean\", \"a_max\", \"b_max\", \"a_98\"],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_agg_relabel_non_identifier(self):\n df = DataFrame(\n {\"group\": [\"a\", \"a\", \"b\", \"b\"], \"A\": [0, 1, 2, 3], \"B\": [5, 6, 7, 8]}\n )\n\n result = df.groupby(\"group\").agg(**{\"my col\": (\"A\", \"max\")})\n expected = DataFrame({\"my col\": [1, 3]}, index=Index([\"a\", \"b\"], name=\"group\"))\n tm.assert_frame_equal(result, expected)\n\n def test_duplicate_no_raises(self):\n # GH 28426, if use same input function on same column,\n # no error should raise\n df = DataFrame({\"A\": [0, 0, 1, 1], \"B\": [1, 2, 3, 4]})\n\n grouped = df.groupby(\"A\").agg(a=(\"B\", \"min\"), b=(\"B\", \"min\"))\n expected = DataFrame({\"a\": [1, 3], \"b\": [1, 3]}, index=Index([0, 1], name=\"A\"))\n tm.assert_frame_equal(grouped, expected)\n\n quant50 = functools.partial(np.percentile, q=50)\n quant70 = functools.partial(np.percentile, q=70)\n quant50.__name__ = \"quant50\"\n quant70.__name__ = \"quant70\"\n\n test = DataFrame({\"col1\": [\"a\", \"a\", \"b\", \"b\", \"b\"], \"col2\": [1, 2, 3, 4, 5]})\n\n grouped = test.groupby(\"col1\").agg(\n quantile_50=(\"col2\", quant50), quantile_70=(\"col2\", quant70)\n )\n expected = DataFrame(\n {\"quantile_50\": [1.5, 4.0], \"quantile_70\": [1.7, 4.4]},\n index=Index([\"a\", \"b\"], name=\"col1\"),\n )\n tm.assert_frame_equal(grouped, expected)\n\n def test_agg_relabel_with_level(self):\n df = DataFrame(\n {\"A\": [0, 0, 1, 1], \"B\": [1, 2, 3, 4]},\n index=pd.MultiIndex.from_product([[\"A\", \"B\"], [\"a\", \"b\"]]),\n )\n result = df.groupby(level=0).agg(\n aa=(\"A\", \"max\"), bb=(\"A\", \"min\"), cc=(\"B\", \"mean\")\n )\n expected = DataFrame(\n {\"aa\": [0, 1], \"bb\": [0, 1], \"cc\": [1.5, 3.5]}, index=[\"A\", \"B\"]\n )\n tm.assert_frame_equal(result, expected)\n\n def test_agg_relabel_other_raises(self):\n df = DataFrame({\"A\": [0, 0, 1], \"B\": [1, 2, 3]})\n grouped = df.groupby(\"A\")\n match = \"Must provide\"\n with pytest.raises(TypeError, match=match):\n grouped.agg(foo=1)\n\n with pytest.raises(TypeError, match=match):\n grouped.agg()\n\n with pytest.raises(TypeError, match=match):\n grouped.agg(a=(\"B\", \"max\"), b=(1, 2, 3))\n\n def test_missing_raises(self):\n df = DataFrame({\"A\": [0, 1], \"B\": [1, 2]})\n with pytest.raises(KeyError, match=\"Column 'C' does not exist\"):\n df.groupby(\"A\").agg(c=(\"C\", \"sum\"))\n\n def test_agg_namedtuple(self):\n df = DataFrame({\"A\": [0, 1], \"B\": [1, 2]})\n result = df.groupby(\"A\").agg(\n b=pd.NamedAgg(\"B\", \"sum\"), c=pd.NamedAgg(column=\"B\", aggfunc=\"count\")\n )\n expected = df.groupby(\"A\").agg(b=(\"B\", \"sum\"), c=(\"B\", \"count\"))\n tm.assert_frame_equal(result, expected)\n\n def test_mangled(self):\n df = DataFrame({\"A\": [0, 1], \"B\": [1, 2], \"C\": [3, 4]})\n result = df.groupby(\"A\").agg(b=(\"B\", lambda x: 0), c=(\"C\", lambda x: 1))\n expected = DataFrame({\"b\": [0, 0], \"c\": [1, 1]}, index=Index([0, 1], name=\"A\"))\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3\",\n [\n (\n ((\"y\", \"A\"), \"max\"),\n ((\"y\", \"A\"), np.min),\n ((\"y\", \"B\"), \"mean\"),\n [1, 3],\n [0, 2],\n [5.5, 7.5],\n ),\n (\n ((\"y\", \"A\"), lambda x: max(x)),\n ((\"y\", \"A\"), lambda x: 1),\n ((\"y\", \"B\"), \"mean\"),\n [1, 3],\n [1, 1],\n [5.5, 7.5],\n ),\n (\n pd.NamedAgg((\"y\", \"A\"), \"max\"),\n pd.NamedAgg((\"y\", \"B\"), np.mean),\n pd.NamedAgg((\"y\", \"A\"), lambda x: 1),\n [1, 3],\n [5.5, 7.5],\n [1, 1],\n ),\n ],\n)\ndef test_agg_relabel_multiindex_column(\n agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3\n):\n # GH 29422, add tests for multiindex column cases\n df = DataFrame(\n {\"group\": [\"a\", \"a\", \"b\", \"b\"], \"A\": [0, 1, 2, 3], \"B\": [5, 6, 7, 8]}\n )\n df.columns = pd.MultiIndex.from_tuples([(\"x\", \"group\"), (\"y\", \"A\"), (\"y\", \"B\")])\n idx = Index([\"a\", \"b\"], name=(\"x\", \"group\"))\n\n result = df.groupby((\"x\", \"group\")).agg(a_max=((\"y\", \"A\"), \"max\"))\n expected = DataFrame({\"a_max\": [1, 3]}, index=idx)\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby((\"x\", \"group\")).agg(\n col_1=agg_col1, col_2=agg_col2, col_3=agg_col3\n )\n expected = DataFrame(\n {\"col_1\": agg_result1, \"col_2\": agg_result2, \"col_3\": agg_result3}, index=idx\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_relabel_multiindex_raises_not_exist():\n # GH 29422, add test for raises senario when aggregate column does not exist\n df = DataFrame(\n {\"group\": [\"a\", \"a\", \"b\", \"b\"], \"A\": [0, 1, 2, 3], \"B\": [5, 6, 7, 8]}\n )\n df.columns = pd.MultiIndex.from_tuples([(\"x\", \"group\"), (\"y\", \"A\"), (\"y\", \"B\")])\n\n with pytest.raises(KeyError, match=\"does not exist\"):\n df.groupby((\"x\", \"group\")).agg(a=((\"Y\", \"a\"), \"max\"))\n\n\ndef test_agg_relabel_multiindex_duplicates():\n # GH29422, add test for raises senario when getting duplicates\n # GH28426, after this change, duplicates should also work if the relabelling is\n # different\n df = DataFrame(\n {\"group\": [\"a\", \"a\", \"b\", \"b\"], \"A\": [0, 1, 2, 3], \"B\": [5, 6, 7, 8]}\n )\n df.columns = pd.MultiIndex.from_tuples([(\"x\", \"group\"), (\"y\", \"A\"), (\"y\", \"B\")])\n\n result = df.groupby((\"x\", \"group\")).agg(\n a=((\"y\", \"A\"), \"min\"), b=((\"y\", \"A\"), \"min\")\n )\n idx = Index([\"a\", \"b\"], name=(\"x\", \"group\"))\n expected = DataFrame({\"a\": [0, 2], \"b\": [0, 2]}, index=idx)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"kwargs\", [{\"c\": [\"min\"]}, {\"b\": [], \"c\": [\"min\"]}])\ndef test_groupby_aggregate_empty_key(kwargs):\n # GH: 32580\n df = DataFrame({\"a\": [1, 1, 2], \"b\": [1, 2, 3], \"c\": [1, 2, 4]})\n result = df.groupby(\"a\").agg(kwargs)\n expected = DataFrame(\n [1, 4],\n index=Index([1, 2], dtype=\"int64\", name=\"a\"),\n columns=pd.MultiIndex.from_tuples([[\"c\", \"min\"]]),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_aggregate_empty_key_empty_return():\n # GH: 32580 Check if everything works, when return is empty\n df = DataFrame({\"a\": [1, 1, 2], \"b\": [1, 2, 3], \"c\": [1, 2, 4]})\n result = df.groupby(\"a\").agg({\"b\": []})\n expected = DataFrame(columns=pd.MultiIndex(levels=[[\"b\"], []], codes=[[], []]))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_grouby_agg_loses_results_with_as_index_false_relabel():\n # GH 32240: When the aggregate function relabels column names and\n # as_index=False is specified, the results are dropped.\n\n df = DataFrame(\n {\"key\": [\"x\", \"y\", \"z\", \"x\", \"y\", \"z\"], \"val\": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75]}\n )\n\n grouped = df.groupby(\"key\", as_index=False)\n result = grouped.agg(min_val=pd.NamedAgg(column=\"val\", aggfunc=\"min\"))\n expected = DataFrame({\"key\": [\"x\", \"y\", \"z\"], \"min_val\": [1.0, 0.8, 0.75]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_grouby_agg_loses_results_with_as_index_false_relabel_multiindex():\n # GH 32240: When the aggregate function relabels column names and\n # as_index=False is specified, the results are dropped. Check if\n # multiindex is returned in the right order\n\n df = DataFrame(\n {\n \"key\": [\"x\", \"y\", \"x\", \"y\", \"x\", \"x\"],\n \"key1\": [\"a\", \"b\", \"c\", \"b\", \"a\", \"c\"],\n \"val\": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75],\n }\n )\n\n grouped = df.groupby([\"key\", \"key1\"], as_index=False)\n result = grouped.agg(min_val=pd.NamedAgg(column=\"val\", aggfunc=\"min\"))\n expected = DataFrame(\n {\"key\": [\"x\", \"x\", \"y\"], \"key1\": [\"a\", \"c\", \"b\"], \"min_val\": [1.0, 0.75, 0.8]}\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"func\", [lambda s: s.mean(), lambda s: np.mean(s), lambda s: np.nanmean(s)]\n)\ndef test_multiindex_custom_func(func):\n # GH 31777\n data = [[1, 4, 2], [5, 7, 1]]\n df = DataFrame(data, columns=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 3]]))\n result = df.groupby(np.array([0, 1])).agg(func)\n expected_dict = {(1, 3): {0: 1, 1: 5}, (1, 4): {0: 4, 1: 7}, (2, 3): {0: 2, 1: 1}}\n expected = DataFrame(expected_dict)\n tm.assert_frame_equal(result, expected)\n\n\ndef myfunc(s):\n return np.percentile(s, q=0.90)\n\n\[email protected](\"func\", [lambda s: np.percentile(s, q=0.90), myfunc])\ndef test_lambda_named_agg(func):\n # see gh-28467\n animals = DataFrame(\n {\n \"kind\": [\"cat\", \"dog\", \"cat\", \"dog\"],\n \"height\": [9.1, 6.0, 9.5, 34.0],\n \"weight\": [7.9, 7.5, 9.9, 198.0],\n }\n )\n\n result = animals.groupby(\"kind\").agg(\n mean_height=(\"height\", \"mean\"), perc90=(\"height\", func)\n )\n expected = DataFrame(\n [[9.3, 9.1036], [20.0, 6.252]],\n columns=[\"mean_height\", \"perc90\"],\n index=Index([\"cat\", \"dog\"], name=\"kind\"),\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_aggregate_mixed_types():\n # GH 16916\n df = DataFrame(\n data=np.array([0] * 9).reshape(3, 3), columns=list(\"XYZ\"), index=list(\"abc\")\n )\n df[\"grouping\"] = [\"group 1\", \"group 1\", 2]\n result = df.groupby(\"grouping\").aggregate(lambda x: x.tolist())\n expected_data = [[[0], [0], [0]], [[0, 0], [0, 0], [0, 0]]]\n expected = DataFrame(\n expected_data,\n index=Index([2, \"group 1\"], dtype=\"object\", name=\"grouping\"),\n columns=Index([\"X\", \"Y\", \"Z\"], dtype=\"object\"),\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](reason=\"Not implemented;see GH 31256\")\ndef test_aggregate_udf_na_extension_type():\n # https://github.com/pandas-dev/pandas/pull/31359\n # This is currently failing to cast back to Int64Dtype.\n # The presence of the NA causes two problems\n # 1. NA is not an instance of Int64Dtype.type (numpy.int64)\n # 2. The presence of an NA forces object type, so the non-NA values is\n # a Python int rather than a NumPy int64. Python ints aren't\n # instances of numpy.int64.\n def aggfunc(x):\n if all(x > 2):\n return 1\n else:\n return pd.NA\n\n df = DataFrame({\"A\": pd.array([1, 2, 3])})\n result = df.groupby([1, 1, 2]).agg(aggfunc)\n expected = DataFrame({\"A\": pd.array([1, pd.NA], dtype=\"Int64\")}, index=[1, 2])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"func\", [\"min\", \"max\"])\ndef test_groupby_aggregate_period_column(func):\n # GH 31471\n groups = [1, 2]\n periods = pd.period_range(\"2020\", periods=2, freq=\"Y\")\n df = DataFrame({\"a\": groups, \"b\": periods})\n\n result = getattr(df.groupby(\"a\")[\"b\"], func)()\n idx = pd.Int64Index([1, 2], name=\"a\")\n expected = Series(periods, index=idx, name=\"b\")\n\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"func\", [\"min\", \"max\"])\ndef test_groupby_aggregate_period_frame(func):\n # GH 31471\n groups = [1, 2]\n periods = pd.period_range(\"2020\", periods=2, freq=\"Y\")\n df = DataFrame({\"a\": groups, \"b\": periods})\n\n result = getattr(df.groupby(\"a\"), func)()\n idx = pd.Int64Index([1, 2], name=\"a\")\n expected = DataFrame({\"b\": periods}, index=idx)\n\n tm.assert_frame_equal(result, expected)\n\n\nclass TestLambdaMangling:\n def test_basic(self):\n df = DataFrame({\"A\": [0, 0, 1, 1], \"B\": [1, 2, 3, 4]})\n result = df.groupby(\"A\").agg({\"B\": [lambda x: 0, lambda x: 1]})\n\n expected = DataFrame(\n {(\"B\", \"<lambda_0>\"): [0, 0], (\"B\", \"<lambda_1>\"): [1, 1]},\n index=Index([0, 1], name=\"A\"),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_mangle_series_groupby(self):\n gr = Series([1, 2, 3, 4]).groupby([0, 0, 1, 1])\n result = gr.agg([lambda x: 0, lambda x: 1])\n expected = DataFrame({\"<lambda_0>\": [0, 0], \"<lambda_1>\": [1, 1]})\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.xfail(reason=\"GH-26611. kwargs for multi-agg.\")\n def test_with_kwargs(self):\n f1 = lambda x, y, b=1: x.sum() + y + b\n f2 = lambda x, y, b=2: x.sum() + y * b\n result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0)\n expected = DataFrame({\"<lambda_0>\": [4], \"<lambda_1>\": [6]})\n tm.assert_frame_equal(result, expected)\n\n result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0, b=10)\n expected = DataFrame({\"<lambda_0>\": [13], \"<lambda_1>\": [30]})\n tm.assert_frame_equal(result, expected)\n\n def test_agg_with_one_lambda(self):\n # GH 25719, write tests for DataFrameGroupby.agg with only one lambda\n df = DataFrame(\n {\n \"kind\": [\"cat\", \"dog\", \"cat\", \"dog\"],\n \"height\": [9.1, 6.0, 9.5, 34.0],\n \"weight\": [7.9, 7.5, 9.9, 198.0],\n }\n )\n\n columns = [\"height_sqr_min\", \"height_max\", \"weight_max\"]\n expected = DataFrame(\n {\n \"height_sqr_min\": [82.81, 36.00],\n \"height_max\": [9.5, 34.0],\n \"weight_max\": [9.9, 198.0],\n },\n index=Index([\"cat\", \"dog\"], name=\"kind\"),\n columns=columns,\n )\n\n # check pd.NameAgg case\n result1 = df.groupby(by=\"kind\").agg(\n height_sqr_min=pd.NamedAgg(\n column=\"height\", aggfunc=lambda x: np.min(x ** 2)\n ),\n height_max=pd.NamedAgg(column=\"height\", aggfunc=\"max\"),\n weight_max=pd.NamedAgg(column=\"weight\", aggfunc=\"max\"),\n )\n tm.assert_frame_equal(result1, expected)\n\n # check agg(key=(col, aggfunc)) case\n result2 = df.groupby(by=\"kind\").agg(\n height_sqr_min=(\"height\", lambda x: np.min(x ** 2)),\n height_max=(\"height\", \"max\"),\n weight_max=(\"weight\", \"max\"),\n )\n tm.assert_frame_equal(result2, expected)\n\n def test_agg_multiple_lambda(self):\n # GH25719, test for DataFrameGroupby.agg with multiple lambdas\n # with mixed aggfunc\n df = DataFrame(\n {\n \"kind\": [\"cat\", \"dog\", \"cat\", \"dog\"],\n \"height\": [9.1, 6.0, 9.5, 34.0],\n \"weight\": [7.9, 7.5, 9.9, 198.0],\n }\n )\n columns = [\n \"height_sqr_min\",\n \"height_max\",\n \"weight_max\",\n \"height_max_2\",\n \"weight_min\",\n ]\n expected = DataFrame(\n {\n \"height_sqr_min\": [82.81, 36.00],\n \"height_max\": [9.5, 34.0],\n \"weight_max\": [9.9, 198.0],\n \"height_max_2\": [9.5, 34.0],\n \"weight_min\": [7.9, 7.5],\n },\n index=Index([\"cat\", \"dog\"], name=\"kind\"),\n columns=columns,\n )\n\n # check agg(key=(col, aggfunc)) case\n result1 = df.groupby(by=\"kind\").agg(\n height_sqr_min=(\"height\", lambda x: np.min(x ** 2)),\n height_max=(\"height\", \"max\"),\n weight_max=(\"weight\", \"max\"),\n height_max_2=(\"height\", lambda x: np.max(x)),\n weight_min=(\"weight\", lambda x: np.min(x)),\n )\n tm.assert_frame_equal(result1, expected)\n\n # check pd.NamedAgg case\n result2 = df.groupby(by=\"kind\").agg(\n height_sqr_min=pd.NamedAgg(\n column=\"height\", aggfunc=lambda x: np.min(x ** 2)\n ),\n height_max=pd.NamedAgg(column=\"height\", aggfunc=\"max\"),\n weight_max=pd.NamedAgg(column=\"weight\", aggfunc=\"max\"),\n height_max_2=pd.NamedAgg(column=\"height\", aggfunc=lambda x: np.max(x)),\n weight_min=pd.NamedAgg(column=\"weight\", aggfunc=lambda x: np.min(x)),\n )\n tm.assert_frame_equal(result2, expected)\n\n\ndef test_groupby_get_by_index():\n # GH 33439\n df = DataFrame({\"A\": [\"S\", \"W\", \"W\"], \"B\": [1.0, 1.0, 2.0]})\n res = df.groupby(\"A\").agg({\"B\": lambda x: x.get(x.index[-1])})\n expected = DataFrame({\"A\": [\"S\", \"W\"], \"B\": [1.0, 2.0]}).set_index(\"A\")\n pd.testing.assert_frame_equal(res, expected)\n\n\[email protected](\n \"grp_col_dict, exp_data\",\n [\n ({\"nr\": \"min\", \"cat_ord\": \"min\"}, {\"nr\": [1, 5], \"cat_ord\": [\"a\", \"c\"]}),\n ({\"cat_ord\": \"min\"}, {\"cat_ord\": [\"a\", \"c\"]}),\n ({\"nr\": \"min\"}, {\"nr\": [1, 5]}),\n ],\n)\ndef test_groupby_single_agg_cat_cols(grp_col_dict, exp_data):\n # test single aggregations on ordered categorical cols GHGH27800\n\n # create the result dataframe\n input_df = DataFrame(\n {\n \"nr\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"cat_ord\": list(\"aabbccdd\"),\n \"cat\": list(\"aaaabbbb\"),\n }\n )\n\n input_df = input_df.astype({\"cat\": \"category\", \"cat_ord\": \"category\"})\n input_df[\"cat_ord\"] = input_df[\"cat_ord\"].cat.as_ordered()\n result_df = input_df.groupby(\"cat\").agg(grp_col_dict)\n\n # create expected dataframe\n cat_index = pd.CategoricalIndex(\n [\"a\", \"b\"], categories=[\"a\", \"b\"], ordered=False, name=\"cat\", dtype=\"category\"\n )\n\n expected_df = DataFrame(data=exp_data, index=cat_index)\n\n tm.assert_frame_equal(result_df, expected_df)\n\n\[email protected](\n \"grp_col_dict, exp_data\",\n [\n ({\"nr\": [\"min\", \"max\"], \"cat_ord\": \"min\"}, [(1, 4, \"a\"), (5, 8, \"c\")]),\n ({\"nr\": \"min\", \"cat_ord\": [\"min\", \"max\"]}, [(1, \"a\", \"b\"), (5, \"c\", \"d\")]),\n ({\"cat_ord\": [\"min\", \"max\"]}, [(\"a\", \"b\"), (\"c\", \"d\")]),\n ],\n)\ndef test_groupby_combined_aggs_cat_cols(grp_col_dict, exp_data):\n # test combined aggregations on ordered categorical cols GH27800\n\n # create the result dataframe\n input_df = DataFrame(\n {\n \"nr\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"cat_ord\": list(\"aabbccdd\"),\n \"cat\": list(\"aaaabbbb\"),\n }\n )\n\n input_df = input_df.astype({\"cat\": \"category\", \"cat_ord\": \"category\"})\n input_df[\"cat_ord\"] = input_df[\"cat_ord\"].cat.as_ordered()\n result_df = input_df.groupby(\"cat\").agg(grp_col_dict)\n\n # create expected dataframe\n cat_index = pd.CategoricalIndex(\n [\"a\", \"b\"], categories=[\"a\", \"b\"], ordered=False, name=\"cat\", dtype=\"category\"\n )\n\n # unpack the grp_col_dict to create the multi-index tuple\n # this tuple will be used to create the expected dataframe index\n multi_index_list = []\n for k, v in grp_col_dict.items():\n if isinstance(v, list):\n for value in v:\n multi_index_list.append([k, value])\n else:\n multi_index_list.append([k, v])\n multi_index = pd.MultiIndex.from_tuples(tuple(multi_index_list))\n\n expected_df = DataFrame(data=exp_data, columns=multi_index, index=cat_index)\n\n tm.assert_frame_equal(result_df, expected_df)\n\n\ndef test_nonagg_agg():\n # GH 35490 - Single/Multiple agg of non-agg function give same results\n # TODO: agg should raise for functions that don't aggregate\n df = DataFrame({\"a\": [1, 1, 2, 2], \"b\": [1, 2, 2, 1]})\n g = df.groupby(\"a\")\n\n result = g.agg([\"cumsum\"])\n result.columns = result.columns.droplevel(-1)\n expected = g.agg(\"cumsum\")\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_no_suffix_index():\n # GH36189\n df = DataFrame([[4, 9]] * 3, columns=[\"A\", \"B\"])\n result = df.agg([\"sum\", lambda x: x.sum(), lambda x: x.sum()])\n expected = DataFrame(\n {\"A\": [12, 12, 12], \"B\": [27, 27, 27]}, index=[\"sum\", \"<lambda>\", \"<lambda>\"]\n )\n tm.assert_frame_equal(result, expected)\n\n # test Series case\n result = df[\"A\"].agg([\"sum\", lambda x: x.sum(), lambda x: x.sum()])\n expected = Series([12, 12, 12], index=[\"sum\", \"<lambda>\", \"<lambda>\"], name=\"A\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_aggregate_datetime_objects():\n # https://github.com/pandas-dev/pandas/issues/36003\n # ensure we don't raise an error but keep object dtype for out-of-bounds\n # datetimes\n df = DataFrame(\n {\n \"A\": [\"X\", \"Y\"],\n \"B\": [\n datetime.datetime(2005, 1, 1, 10, 30, 23, 540000),\n datetime.datetime(3005, 1, 1, 10, 30, 23, 540000),\n ],\n }\n )\n result = df.groupby(\"A\").B.max()\n expected = df.set_index(\"A\")[\"B\"]\n tm.assert_series_equal(result, expected)\n\n\ndef test_aggregate_numeric_object_dtype():\n # https://github.com/pandas-dev/pandas/issues/39329\n # simplified case: multiple object columns where one is all-NaN\n # -> gets split as the all-NaN is inferred as float\n df = DataFrame(\n {\"key\": [\"A\", \"A\", \"B\", \"B\"], \"col1\": list(\"abcd\"), \"col2\": [np.nan] * 4},\n ).astype(object)\n result = df.groupby(\"key\").min()\n expected = DataFrame(\n {\"key\": [\"A\", \"B\"], \"col1\": [\"a\", \"c\"], \"col2\": [np.nan, np.nan]}\n ).set_index(\"key\")\n tm.assert_frame_equal(result, expected)\n\n # same but with numbers\n df = DataFrame(\n {\"key\": [\"A\", \"A\", \"B\", \"B\"], \"col1\": list(\"abcd\"), \"col2\": range(4)},\n ).astype(object)\n result = df.groupby(\"key\").min()\n expected = DataFrame(\n {\"key\": [\"A\", \"B\"], \"col1\": [\"a\", \"c\"], \"col2\": [0, 2]}\n ).set_index(\"key\")\n tm.assert_frame_equal(result, expected)\n", "from itertools import islice, cycle\n\nimport pandas as pd\nimport numpy as np\n\nfrom .geom import geom\nfrom .geom_rect import geom_rect\nfrom .annotate import annotate\nfrom ..coords import coord_flip\nfrom ..scales.scale import scale_discrete\n\n\nclass annotation_stripes(annotate):\n \"\"\"\n Alternating stripes, centered around each label.\n\n Useful as a background for geom_jitter.\n\n Parameters\n ----------\n fill : list-like\n List of colors for the strips. The default is\n `(\"#AAAAAA\", \"#CCCCCC\")`\n fill_range: 'cycle' | 'nocycle' | 'auto' | 'no'\n How to fill stripes beyond the range of scale::\n\n 'cycle' # keep cycling the colors of the\n # stripes after the range ends\n 'nocycle' # stop cycling the colors of the\n # stripes after the range ends\n 'auto' # 'cycle' for continuous scales and\n # 'nocycle' for discrete scales.\n 'no' # Do not add stripes passed the range\n # passed the range of the scales\n\n Default is 'auto'.\n direction : 'vertical' or 'horizontal'\n Orientation of the stripes\n extend : tuple\n Range of the stripes. The default is (0, 1), top to bottom.\n The values should be in the range [0, 1].\n **kwargs : dict\n Other aesthetic parameters for the rectangular stripes.\n They include; *alpha*, *color*, *linetype*, and *size*.\n \"\"\"\n\n def __init__(self, fill=('#AAAAAA', '#CCCCCC'), fill_range='auto',\n direction='vertical', extend=(0, 1), **kwargs):\n allowed = ('vertical', 'horizontal')\n if direction not in allowed:\n raise ValueError(\n \"direction must be one of {}\".format(allowed))\n self._annotation_geom = _geom_stripes(\n fill=fill, fill_range=fill_range, extend=extend,\n direction=direction, **kwargs)\n\n\nclass _geom_stripes(geom):\n\n DEFAULT_AES = {}\n REQUIRED_AES = set()\n DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity',\n 'na_rm': False, 'color': None,\n 'fill': ('#AAAAAA', '#CCCCCC'),\n 'linetype': 'solid', 'size': 1, 'alpha': 0.5,\n 'direction': 'vertical', 'extend': (0, 1),\n 'fill_range': 'auto'}\n legend_geom = \"polygon\"\n\n def draw_layer(self, data, layout, coord, **params):\n \"\"\"\n Draw stripes on every panel\n \"\"\"\n for pid in layout.layout['PANEL']:\n ploc = pid - 1\n panel_params = layout.panel_params[ploc]\n ax = layout.axs[ploc]\n self.draw_group(data, panel_params, coord, ax, **params)\n\n @staticmethod\n def draw_group(data, panel_params, coord, ax, **params):\n extend = params['extend']\n fill_range = params['fill_range']\n direction = params['direction']\n\n # Range\n if direction == 'vertical':\n axis, other_axis = 'x', 'y'\n else:\n axis, other_axis = 'y', 'x'\n\n if isinstance(coord, coord_flip):\n axis, other_axis = other_axis, axis\n\n _axis = getattr(panel_params, axis)\n breaks = _axis.breaks\n range = _axis.range\n other_range = getattr(panel_params, other_axis).range\n\n if fill_range == 'auto':\n if isinstance(_axis.scale, scale_discrete):\n fill_range = 'nocycle'\n else:\n fill_range = 'cycle'\n\n # Breaks along the width\n n_stripes = len(breaks)\n if n_stripes > 1:\n diff = np.diff(breaks)\n step = diff[0]\n equal_spaces = np.all(diff == step)\n if not equal_spaces:\n raise ValueError(\n \"The major breaks are not equally spaced. \"\n \"We cannot create stripes.\"\n )\n else:\n step = breaks[0]\n\n deltas = np.array([step/2] * n_stripes)\n many_stripes = len(breaks) > 1\n xmin = breaks - deltas\n xmax = breaks + deltas\n if fill_range in ('cycle', 'nocycle') and many_stripes:\n if range[0] < breaks[0]:\n n_stripes += 1\n xmax = np.insert(xmax, 0, xmin[0])\n xmin = np.insert(xmin, 0, range[0])\n if range[1] > breaks[1]:\n n_stripes += 1\n xmin = np.append(xmin, xmax[-1])\n xmax = np.append(xmax, range[1])\n\n # Height\n full_height = other_range[1] - other_range[0]\n ymin = other_range[0] + full_height * extend[0]\n ymax = other_range[0] + full_height * extend[1]\n fill = list(islice(cycle(params['fill']), n_stripes))\n if fill_range == 'nocycle' and many_stripes:\n # there are at least two stripes at this point\n fill[0] = fill[1]\n fill[-1] = fill[-2]\n\n if direction != 'vertical':\n xmin, xmax, ymin, ymax = ymin, ymax, xmin, xmax\n\n data = pd.DataFrame({\n 'xmin': xmin,\n 'xmax': xmax,\n 'ymin': ymin,\n 'ymax': ymax,\n 'fill': fill,\n 'alpha': params['alpha'],\n 'color': params['color'],\n 'linetype': params['linetype'],\n 'size': params['size']\n })\n\n return geom_rect.draw_group(data, panel_params, coord, ax, **params)\n", "import decimal\nimport math\nimport operator\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.tests.extension import base\n\nfrom .array import DecimalArray, DecimalDtype, make_data, to_decimal\n\n\[email protected]\ndef dtype():\n return DecimalDtype()\n\n\[email protected]\ndef data():\n return DecimalArray(make_data())\n\n\[email protected]\ndef data_for_twos():\n return DecimalArray([decimal.Decimal(2) for _ in range(100)])\n\n\[email protected]\ndef data_missing():\n return DecimalArray([decimal.Decimal(\"NaN\"), decimal.Decimal(1)])\n\n\[email protected]\ndef data_for_sorting():\n return DecimalArray(\n [decimal.Decimal(\"1\"), decimal.Decimal(\"2\"), decimal.Decimal(\"0\")]\n )\n\n\[email protected]\ndef data_missing_for_sorting():\n return DecimalArray(\n [decimal.Decimal(\"1\"), decimal.Decimal(\"NaN\"), decimal.Decimal(\"0\")]\n )\n\n\[email protected]\ndef na_cmp():\n return lambda x, y: x.is_nan() and y.is_nan()\n\n\[email protected]\ndef na_value():\n return decimal.Decimal(\"NaN\")\n\n\[email protected]\ndef data_for_grouping():\n b = decimal.Decimal(\"1.0\")\n a = decimal.Decimal(\"0.0\")\n c = decimal.Decimal(\"2.0\")\n na = decimal.Decimal(\"NaN\")\n return DecimalArray([b, b, na, na, a, a, b, c])\n\n\nclass BaseDecimal:\n @classmethod\n def assert_series_equal(cls, left, right, *args, **kwargs):\n def convert(x):\n # need to convert array([Decimal(NaN)], dtype='object') to np.NaN\n # because Series[object].isnan doesn't recognize decimal(NaN) as\n # NA.\n try:\n return math.isnan(x)\n except TypeError:\n return False\n\n if left.dtype == \"object\":\n left_na = left.apply(convert)\n else:\n left_na = left.isna()\n if right.dtype == \"object\":\n right_na = right.apply(convert)\n else:\n right_na = right.isna()\n\n tm.assert_series_equal(left_na, right_na)\n return tm.assert_series_equal(left[~left_na], right[~right_na], *args, **kwargs)\n\n @classmethod\n def assert_frame_equal(cls, left, right, *args, **kwargs):\n # TODO(EA): select_dtypes\n tm.assert_index_equal(\n left.columns,\n right.columns,\n exact=kwargs.get(\"check_column_type\", \"equiv\"),\n check_names=kwargs.get(\"check_names\", True),\n check_exact=kwargs.get(\"check_exact\", False),\n check_categorical=kwargs.get(\"check_categorical\", True),\n obj=f\"{kwargs.get('obj', 'DataFrame')}.columns\",\n )\n\n decimals = (left.dtypes == \"decimal\").index\n\n for col in decimals:\n cls.assert_series_equal(left[col], right[col], *args, **kwargs)\n\n left = left.drop(columns=decimals)\n right = right.drop(columns=decimals)\n tm.assert_frame_equal(left, right, *args, **kwargs)\n\n\nclass TestDtype(BaseDecimal, base.BaseDtypeTests):\n def test_hashable(self, dtype):\n pass\n\n\nclass TestInterface(BaseDecimal, base.BaseInterfaceTests):\n pass\n\n\nclass TestConstructors(BaseDecimal, base.BaseConstructorsTests):\n @pytest.mark.skip(reason=\"not implemented constructor from dtype\")\n def test_from_dtype(self, data):\n # construct from our dtype & string dtype\n pass\n\n\nclass TestReshaping(BaseDecimal, base.BaseReshapingTests):\n pass\n\n\nclass TestGetitem(BaseDecimal, base.BaseGetitemTests):\n def test_take_na_value_other_decimal(self):\n arr = DecimalArray([decimal.Decimal(\"1.0\"), decimal.Decimal(\"2.0\")])\n result = arr.take([0, -1], allow_fill=True, fill_value=decimal.Decimal(\"-1.0\"))\n expected = DecimalArray([decimal.Decimal(\"1.0\"), decimal.Decimal(\"-1.0\")])\n self.assert_extension_array_equal(result, expected)\n\n\nclass TestMissing(BaseDecimal, base.BaseMissingTests):\n pass\n\n\nclass Reduce:\n def check_reduce(self, s, op_name, skipna):\n\n if op_name in [\"median\", \"skew\", \"kurt\"]:\n msg = r\"decimal does not support the .* operation\"\n with pytest.raises(NotImplementedError, match=msg):\n getattr(s, op_name)(skipna=skipna)\n\n else:\n result = getattr(s, op_name)(skipna=skipna)\n expected = getattr(np.asarray(s), op_name)()\n tm.assert_almost_equal(result, expected)\n\n\nclass TestNumericReduce(Reduce, base.BaseNumericReduceTests):\n pass\n\n\nclass TestBooleanReduce(Reduce, base.BaseBooleanReduceTests):\n pass\n\n\nclass TestMethods(BaseDecimal, base.BaseMethodsTests):\n @pytest.mark.parametrize(\"dropna\", [True, False])\n @pytest.mark.xfail(reason=\"value_counts not implemented yet.\")\n def test_value_counts(self, all_data, dropna):\n all_data = all_data[:10]\n if dropna:\n other = np.array(all_data[~all_data.isna()])\n else:\n other = all_data\n\n result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()\n expected = pd.Series(other).value_counts(dropna=dropna).sort_index()\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.xfail(reason=\"value_counts not implemented yet.\")\n def test_value_counts_with_normalize(self, data):\n return super().test_value_counts_with_normalize(data)\n\n\nclass TestCasting(BaseDecimal, base.BaseCastingTests):\n pass\n\n\nclass TestGroupby(BaseDecimal, base.BaseGroupbyTests):\n @pytest.mark.xfail(\n reason=\"needs to correctly define __eq__ to handle nans, xref #27081.\"\n )\n def test_groupby_apply_identity(self, data_for_grouping):\n super().test_groupby_apply_identity(data_for_grouping)\n\n @pytest.mark.xfail(reason=\"GH#39098: Converts agg result to object\")\n def test_groupby_agg_extension(self, data_for_grouping):\n super().test_groupby_agg_extension(data_for_grouping)\n\n\nclass TestSetitem(BaseDecimal, base.BaseSetitemTests):\n pass\n\n\nclass TestPrinting(BaseDecimal, base.BasePrintingTests):\n def test_series_repr(self, data):\n # Overriding this base test to explicitly test that\n # the custom _formatter is used\n ser = pd.Series(data)\n assert data.dtype.name in repr(ser)\n assert \"Decimal: \" in repr(ser)\n\n\n# TODO(extension)\[email protected](\n reason=(\n \"raising AssertionError as this is not implemented, though easy enough to do\"\n )\n)\ndef test_series_constructor_coerce_data_to_extension_dtype_raises():\n xpr = (\n \"Cannot cast data to extension dtype 'decimal'. Pass the \"\n \"extension array directly.\"\n )\n with pytest.raises(ValueError, match=xpr):\n pd.Series([0, 1, 2], dtype=DecimalDtype())\n\n\ndef test_series_constructor_with_dtype():\n arr = DecimalArray([decimal.Decimal(\"10.0\")])\n result = pd.Series(arr, dtype=DecimalDtype())\n expected = pd.Series(arr)\n tm.assert_series_equal(result, expected)\n\n result = pd.Series(arr, dtype=\"int64\")\n expected = pd.Series([10])\n tm.assert_series_equal(result, expected)\n\n\ndef test_dataframe_constructor_with_dtype():\n arr = DecimalArray([decimal.Decimal(\"10.0\")])\n\n result = pd.DataFrame({\"A\": arr}, dtype=DecimalDtype())\n expected = pd.DataFrame({\"A\": arr})\n tm.assert_frame_equal(result, expected)\n\n arr = DecimalArray([decimal.Decimal(\"10.0\")])\n result = pd.DataFrame({\"A\": arr}, dtype=\"int64\")\n expected = pd.DataFrame({\"A\": [10]})\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"frame\", [True, False])\ndef test_astype_dispatches(frame):\n # This is a dtype-specific test that ensures Series[decimal].astype\n # gets all the way through to ExtensionArray.astype\n # Designing a reliable smoke test that works for arbitrary data types\n # is difficult.\n data = pd.Series(DecimalArray([decimal.Decimal(2)]), name=\"a\")\n ctx = decimal.Context()\n ctx.prec = 5\n\n if frame:\n data = data.to_frame()\n\n result = data.astype(DecimalDtype(ctx))\n\n if frame:\n result = result[\"a\"]\n\n assert result.dtype.context.prec == ctx.prec\n\n\nclass TestArithmeticOps(BaseDecimal, base.BaseArithmeticOpsTests):\n def check_opname(self, s, op_name, other, exc=None):\n super().check_opname(s, op_name, other, exc=None)\n\n def test_arith_series_with_array(self, data, all_arithmetic_operators):\n op_name = all_arithmetic_operators\n s = pd.Series(data)\n\n context = decimal.getcontext()\n divbyzerotrap = context.traps[decimal.DivisionByZero]\n invalidoptrap = context.traps[decimal.InvalidOperation]\n context.traps[decimal.DivisionByZero] = 0\n context.traps[decimal.InvalidOperation] = 0\n\n # Decimal supports ops with int, but not float\n other = pd.Series([int(d * 100) for d in data])\n self.check_opname(s, op_name, other)\n\n if \"mod\" not in op_name:\n self.check_opname(s, op_name, s * 2)\n\n self.check_opname(s, op_name, 0)\n self.check_opname(s, op_name, 5)\n context.traps[decimal.DivisionByZero] = divbyzerotrap\n context.traps[decimal.InvalidOperation] = invalidoptrap\n\n def _check_divmod_op(self, s, op, other, exc=NotImplementedError):\n # We implement divmod\n super()._check_divmod_op(s, op, other, exc=None)\n\n def test_error(self):\n pass\n\n\nclass TestComparisonOps(BaseDecimal, base.BaseComparisonOpsTests):\n def check_opname(self, s, op_name, other, exc=None):\n super().check_opname(s, op_name, other, exc=None)\n\n def _compare_other(self, s, data, op_name, other):\n self.check_opname(s, op_name, other)\n\n def test_compare_scalar(self, data, all_compare_operators):\n op_name = all_compare_operators\n s = pd.Series(data)\n self._compare_other(s, data, op_name, 0.5)\n\n def test_compare_array(self, data, all_compare_operators):\n op_name = all_compare_operators\n s = pd.Series(data)\n\n alter = np.random.choice([-1, 0, 1], len(data))\n # Randomly double, halve or keep same value\n other = pd.Series(data) * [decimal.Decimal(pow(2.0, i)) for i in alter]\n self._compare_other(s, data, op_name, other)\n\n\nclass DecimalArrayWithoutFromSequence(DecimalArray):\n \"\"\"Helper class for testing error handling in _from_sequence.\"\"\"\n\n def _from_sequence(cls, scalars, dtype=None, copy=False):\n raise KeyError(\"For the test\")\n\n\nclass DecimalArrayWithoutCoercion(DecimalArrayWithoutFromSequence):\n @classmethod\n def _create_arithmetic_method(cls, op):\n return cls._create_method(op, coerce_to_dtype=False)\n\n\nDecimalArrayWithoutCoercion._add_arithmetic_ops()\n\n\ndef test_combine_from_sequence_raises():\n # https://github.com/pandas-dev/pandas/issues/22850\n ser = pd.Series(\n DecimalArrayWithoutFromSequence(\n [decimal.Decimal(\"1.0\"), decimal.Decimal(\"2.0\")]\n )\n )\n result = ser.combine(ser, operator.add)\n\n # note: object dtype\n expected = pd.Series(\n [decimal.Decimal(\"2.0\"), decimal.Decimal(\"4.0\")], dtype=\"object\"\n )\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"class_\", [DecimalArrayWithoutFromSequence, DecimalArrayWithoutCoercion]\n)\ndef test_scalar_ops_from_sequence_raises(class_):\n # op(EA, EA) should return an EA, or an ndarray if it's not possible\n # to return an EA with the return values.\n arr = class_([decimal.Decimal(\"1.0\"), decimal.Decimal(\"2.0\")])\n result = arr + arr\n expected = np.array(\n [decimal.Decimal(\"2.0\"), decimal.Decimal(\"4.0\")], dtype=\"object\"\n )\n tm.assert_numpy_array_equal(result, expected)\n\n\[email protected](\n \"reverse, expected_div, expected_mod\",\n [(False, [0, 1, 1, 2], [1, 0, 1, 0]), (True, [2, 1, 0, 0], [0, 0, 2, 2])],\n)\ndef test_divmod_array(reverse, expected_div, expected_mod):\n # https://github.com/pandas-dev/pandas/issues/22930\n arr = to_decimal([1, 2, 3, 4])\n if reverse:\n div, mod = divmod(2, arr)\n else:\n div, mod = divmod(arr, 2)\n expected_div = to_decimal(expected_div)\n expected_mod = to_decimal(expected_mod)\n\n tm.assert_extension_array_equal(div, expected_div)\n tm.assert_extension_array_equal(mod, expected_mod)\n\n\ndef test_ufunc_fallback(data):\n a = data[:5]\n s = pd.Series(a, index=range(3, 8))\n result = np.abs(s)\n expected = pd.Series(np.abs(a), index=range(3, 8))\n tm.assert_series_equal(result, expected)\n\n\ndef test_array_ufunc():\n a = to_decimal([1, 2, 3])\n result = np.exp(a)\n expected = to_decimal(np.exp(a._data))\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_array_ufunc_series():\n a = to_decimal([1, 2, 3])\n s = pd.Series(a)\n result = np.exp(s)\n expected = pd.Series(to_decimal(np.exp(a._data)))\n tm.assert_series_equal(result, expected)\n\n\ndef test_array_ufunc_series_scalar_other():\n # check _HANDLED_TYPES\n a = to_decimal([1, 2, 3])\n s = pd.Series(a)\n result = np.add(s, decimal.Decimal(1))\n expected = pd.Series(np.add(a, decimal.Decimal(1)))\n tm.assert_series_equal(result, expected)\n\n\ndef test_array_ufunc_series_defer():\n a = to_decimal([1, 2, 3])\n s = pd.Series(a)\n\n expected = pd.Series(to_decimal([2, 4, 6]))\n r1 = np.add(s, a)\n r2 = np.add(a, s)\n\n tm.assert_series_equal(r1, expected)\n tm.assert_series_equal(r2, expected)\n\n\ndef test_groupby_agg():\n # Ensure that the result of agg is inferred to be decimal dtype\n # https://github.com/pandas-dev/pandas/issues/29141\n\n data = make_data()[:5]\n df = pd.DataFrame(\n {\"id1\": [0, 0, 0, 1, 1], \"id2\": [0, 1, 0, 1, 1], \"decimals\": DecimalArray(data)}\n )\n\n # single key, selected column\n expected = pd.Series(to_decimal([data[0], data[3]]))\n result = df.groupby(\"id1\")[\"decimals\"].agg(lambda x: x.iloc[0])\n tm.assert_series_equal(result, expected, check_names=False)\n result = df[\"decimals\"].groupby(df[\"id1\"]).agg(lambda x: x.iloc[0])\n tm.assert_series_equal(result, expected, check_names=False)\n\n # multiple keys, selected column\n expected = pd.Series(\n to_decimal([data[0], data[1], data[3]]),\n index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 1)]),\n )\n result = df.groupby([\"id1\", \"id2\"])[\"decimals\"].agg(lambda x: x.iloc[0])\n tm.assert_series_equal(result, expected, check_names=False)\n result = df[\"decimals\"].groupby([df[\"id1\"], df[\"id2\"]]).agg(lambda x: x.iloc[0])\n tm.assert_series_equal(result, expected, check_names=False)\n\n # multiple columns\n expected = pd.DataFrame({\"id2\": [0, 1], \"decimals\": to_decimal([data[0], data[3]])})\n result = df.groupby(\"id1\").agg(lambda x: x.iloc[0])\n tm.assert_frame_equal(result, expected, check_names=False)\n\n\ndef test_groupby_agg_ea_method(monkeypatch):\n # Ensure that the result of agg is inferred to be decimal dtype\n # https://github.com/pandas-dev/pandas/issues/29141\n\n def DecimalArray__my_sum(self):\n return np.sum(np.array(self))\n\n monkeypatch.setattr(DecimalArray, \"my_sum\", DecimalArray__my_sum, raising=False)\n\n data = make_data()[:5]\n df = pd.DataFrame({\"id\": [0, 0, 0, 1, 1], \"decimals\": DecimalArray(data)})\n expected = pd.Series(to_decimal([data[0] + data[1] + data[2], data[3] + data[4]]))\n\n result = df.groupby(\"id\")[\"decimals\"].agg(lambda x: x.values.my_sum())\n tm.assert_series_equal(result, expected, check_names=False)\n s = pd.Series(DecimalArray(data))\n result = s.groupby(np.array([0, 0, 0, 1, 1])).agg(lambda x: x.values.my_sum())\n tm.assert_series_equal(result, expected, check_names=False)\n\n\ndef test_indexing_no_materialize(monkeypatch):\n # See https://github.com/pandas-dev/pandas/issues/29708\n # Ensure that indexing operations do not materialize (convert to a numpy\n # array) the ExtensionArray unnecessary\n\n def DecimalArray__array__(self, dtype=None):\n raise Exception(\"tried to convert a DecimalArray to a numpy array\")\n\n monkeypatch.setattr(DecimalArray, \"__array__\", DecimalArray__array__, raising=False)\n\n data = make_data()\n s = pd.Series(DecimalArray(data))\n df = pd.DataFrame({\"a\": s, \"b\": range(len(s))})\n\n # ensure the following operations do not raise an error\n s[s > 0.5]\n df[s > 0.5]\n s.at[0]\n df.at[0, \"a\"]\n\n\ndef test_to_numpy_keyword():\n # test the extra keyword\n values = [decimal.Decimal(\"1.1111\"), decimal.Decimal(\"2.2222\")]\n expected = np.array(\n [decimal.Decimal(\"1.11\"), decimal.Decimal(\"2.22\")], dtype=\"object\"\n )\n a = pd.array(values, dtype=\"decimal\")\n result = a.to_numpy(decimals=2)\n tm.assert_numpy_array_equal(result, expected)\n\n result = pd.Series(a).to_numpy(decimals=2)\n tm.assert_numpy_array_equal(result, expected)\n", "# -*- coding: utf-8 -*-\n\"\"\"Score, lagrange multiplier and conditional moment tests\nrobust to misspecification or without specification of higher moments\n\nCreated on Thu Oct 30 00:42:38 2014\n\nAuthor: Josef Perktold\nLicense: BSD-3\n\nNotes\n-----\n\nThis module is a mixture of very general and very specific functions for\nhypothesis testing in general models, targeted mainly to non-normal models.\n\nSome of the options or versions of these tests are mainly intented for\ncross-checking and to replicate different examples in references.\n\nWe need clean versions with good defaults for those functions that are\nintended for the user.\n\n\n\nReferences\n----------\n\nThe following references are collected after my intitial implementation and is\nmost likely not exactly what I used.\n\nThe main articles on which the functions are directly based upon, are Boos 1992,\nTauchen 1985 and Whitney 1985a. Wooldrige artificial regression is\nbased on several articles and his text book.\nBackground reading are the textbooks by Cameron and Trivedi, Wooldridge and\nDavidson and MacKinnon.\nNewey and MacFadden 1994 provide some of the theoretical background.\n\nPoisson dispersion tests are based on Dean 1992 and articles and text books by\nCameron and Trivedi.\n\nThe references currently do not include the literature on LM-test for\nspecification and diagnostic testing, like Pagan, Bera, Bera and Yoon and\nmany others, except those for the Poisson excess dispersion case and Pagan\nand Vella.\n\n\nBoos, Dennis D. 1992. “On Generalized Score Tests.” The American Statistician 46\n(4): 327–33. https://doi.org/10.2307/2685328.\n\nBreslow, Norman. 1989. “Score Tests in Overdispersed GLM’s.” In Statistical\nModelling, edited by Adriano Decarli, Brian J. Francis, Robert Gilchrist, and\nGilg U. H. Seeber, 64–74. Lecture Notes in Statistics 57. Springer New York.\nhttp://link.springer.com/chapter/10.1007/978-1-4612-3680-1_8.\n\nBreslow, Norman. 1990. “Tests of Hypotheses in Overdispersed Poisson Regression\nand Other Quasi- Likelihood Models.” Journal of the American Statistical\nAssociation 85 (410): 565–71. https://doi.org/10.2307/2289799.\n\nCameron, A. Colin, and Pravin K. Trivedi. 1986. “Econometric Models Based on\nCount Data. Comparisons and Applications of Some Estimators and Tests.” Journal\nof Applied Econometrics 1 (1): 29–53. https://doi.org/10.1002/jae.3950010104.\n\nCameron, A. Colin, and Pravin K. Trivedi. 1990a. “Conditional Moment Tests and\nOrthogonal Polynomials.” Indiana University, Department of Economics, Working\nPaper, 90–051.\n\nCameron, A. Colin, and Pravin K. Trivedi. 1990b. “Regression-Based Tests for\nOverdispersion in the Poisson Model.” Journal of Econometrics 46 (3): 347–64.\nhttps://doi.org/10.1016/0304-4076(90)90014-K.\n\nCameron, A. Colin, and Pravin K. Trivedi. Microeconometrics: methods and\napplications. Cambridge university press, 2005.\n\nCameron, A. Colin, and Pravin K. Trivedi. Regression analysis of count data.\nVol. 53. Cambridge university press, 2013.\n\nDavidson, Russell, and James G. MacKinnon. 1981. “Several Tests for Model\nSpecification in the Presence of Alternative Hypotheses.” Econometrica 49 (3):\n781–93. https://doi.org/10.2307/1911522.\n\nDavidson, Russell, and James G. MacKinnon. 1990. “Specification Tests Based on\nArtificial Regressions.” Journal of the American Statistical Association 85\n(409): 220–27. https://doi.org/10.2307/2289548.\n\nDavidson, Russell, and James G. MacKinnon. 1991. “Artificial Regressions and C\n(α) Tests.” Economics Letters 35 (2): 149–53.\nhttps://doi.org/10.1016/0165-1765(91)90162-E.\n\nDavidson, Russell, and James G. MacKinnon. Econometric theory and methods. Vol.\n5. New York: Oxford University Press, 2004.\n\nDean, C. B. 1992. “Testing for Overdispersion in Poisson and Binomial Regression\nModels.” Journal of the American Statistical Association 87 (418): 451–57.\nhttps://doi.org/10.2307/2290276.\n\nDean, C., and J. F. Lawless. 1989. “Tests for Detecting Overdispersion in\nPoisson Regression Models.” Journal of the American Statistical Association 84\n(406): 467–72. https://doi.org/10.2307/2289931.\n\nNewey, Whitney K. 1985a. “Generalized Method of Moments Specification Testing.”\nJournal of Econometrics 29 (3): 229–56.\nhttps://doi.org/10.1016/0304-4076(85)90154-X.\n\nNewey, Whitney K. 1985b. “Maximum Likelihood Specification Testing and\nConditional Moment Tests.” Econometrica 53 (5): 1047–70.\nhttps://doi.org/10.2307/1911011.\n\nNewey, Whitney K. and Kenneth D. West. 1987. “Hypothesis Testing with Efficient\nMethod of Moments Estimation.” International Economic Review 28 (3): 777–87.\nhttps://doi.org/10.2307/2526578.\n\nNewey, Whitney K. and Daniel McFadden. 1994 \"Large sample estimation and\nhypothesis testing.\" Handbook of econometrics 4: 2111-2245.\n\nPagan, Adrian, and Frank Vella. 1989. “Diagnostic Tests for Models Based on\nIndividual Data: A Survey.” Journal of Applied Econometrics 4 (S1): S29–59.\nhttps://doi.org/10.1002/jae.3950040504.\n\nTauchen, George. 1985. “Diagnostic Testing and Evaluation of Maximum Likelihood\nModels.” Journal of Econometrics 30 (1–2): 415–43.\nhttps://doi.org/10.1016/0304-4076(85)90149-6.\n\nWhite, Halbert. 1981. “Consequences and Detection of Misspecified Nonlinear\nRegression Models.” Journal of the American Statistical Association 76 (374):\n419–33. https://doi.org/10.2307/2287845.\n\nWhite, Halbert. 1983. “Maximum Likelihood Estimation of Misspecified Models.”\nEconometrica 51 (2): 513. https://doi.org/10.2307/1912004.\n\nWhite, Halbert. 1994. Estimation, Inference and Specification Analysis.\nCambridge: Cambridge University Press. https://doi.org/10.1017/CCOL0521252806.\n\nWooldridge, Jeffrey M. 1991. “Specification Testing and Quasi-Maximum-\nLikelihood Estimation.” Journal of Econometrics 48 (1–2): 29–55.\nhttps://doi.org/10.1016/0304-4076(91)90031-8.\n\nWooldridge, Jeffrey M. 1990. “A Unified Approach to Robust, Regression-Based\nSpecification Tests.” Econometric Theory 6 (1): 17–43.\n\nWooldridge, Jeffrey M. 1991a. “On the Application of Robust, Regression- Based\nDiagnostics to Models of Conditional Means and Conditional Variances.” Journal\nof Econometrics 47 (1): 5–46. https://doi.org/10.1016/0304-4076(91)90076-P.\n\nWooldridge, Jeffrey M. 1991b. “On the Application of Robust, Regression- Based\nDiagnostics to Models of Conditional Means and Conditional Variances.” Journal\nof Econometrics 47 (1): 5–46. https://doi.org/10.1016/0304-4076(91)90076-P.\n\nWooldridge, Jeffrey M. 1991c. “Specification Testing and Quasi-Maximum-\nLikelihood Estimation.” Journal of Econometrics 48 (1–2): 29–55.\nhttps://doi.org/10.1016/0304-4076(91)90031-8.\n\nWooldridge, Jeffrey M. 1994. “On the Limits of GLM for Specification Testing: A\nComment on Gurmu and Trivedi.” Econometric Theory 10 (2): 409–18.\nhttps://doi.org/10.2307/3532875.\n\nWooldridge, Jeffrey M. 1997. “Quasi-Likelihood Methods for Count Data.” Handbook\nof Applied Econometrics 2: 352–406.\n\nWooldridge, Jeffrey M. Econometric analysis of cross section and panel data. MIT\npress, 2010.\n\n\"\"\"\n\nimport numpy as np\nfrom scipy import stats\n\nfrom statsmodels.tools.decorators import cache_readonly\nfrom statsmodels.regression.linear_model import OLS\n\n\nclass ResultsGeneric(object):\n\n\n def __init__(self, **kwds):\n self.__dict__.update(kwds)\n\n\nclass TestResults(ResultsGeneric):\n\n def summary(self):\n txt = 'Specification Test (LM, score)\\n'\n stat = [self.c1, self.c2, self.c3]\n pval = [self.pval1, self.pval2, self.pval3]\n description = ['nonrobust', 'dispersed', 'HC']\n\n for row in zip(description, stat, pval):\n txt += '%-12s statistic = %6.4f pvalue = %6.5f\\n' % row\n\n txt += '\\nAssumptions:\\n'\n txt += 'nonrobust: variance is correctly specified\\n'\n txt += 'dispersed: variance correctly specified up to scale factor\\n'\n txt += 'HC : robust to any heteroscedasticity\\n'\n txt += 'test is not robust to correlation across observations'\n\n return txt\n\n\ndef lm_test_glm(result, exog_extra, mean_deriv=None):\n '''score/lagrange multiplier test for GLM\n\n Wooldridge procedure for test of mean function in GLM\n\n Parameters\n ----------\n results : GLMResults instance\n results instance with the constrained model\n exog_extra : ndarray or None\n additional exogenous variables for variable addition test\n This can be set to None if mean_deriv is provided.\n mean_deriv : None or ndarray\n Extra moment condition that correspond to the partial derivative of\n a mean function with respect to some parameters.\n\n Returns\n -------\n test_results : Results instance\n The results instance has the following attributes which are score\n statistic and p-value for 3 versions of the score test.\n\n c1, pval1 : nonrobust score_test results\n c2, pval2 : score test results robust to over or under dispersion\n c3, pval3 : score test results fully robust to any heteroscedasticity\n\n The test results instance also has a simple summary method.\n\n Notes\n -----\n TODO: add `df` to results and make df detection more robust\n\n This implements the auxiliary regression procedure of Wooldridge,\n implemented based on the presentation in chapter 8 in Handbook of\n Applied Econometrics 2.\n\n References\n ----------\n Wooldridge, Jeffrey M. 1997. “Quasi-Likelihood Methods for Count Data.”\n Handbook of Applied Econometrics 2: 352–406.\n\n and other articles and text book by Wooldridge\n\n '''\n\n if hasattr(result, '_result'):\n res = result._result\n else:\n res = result\n\n mod = result.model\n nobs = mod.endog.shape[0]\n\n #mean_func = mod.family.link.inverse\n dlinkinv = mod.family.link.inverse_deriv\n\n # derivative of mean function w.r.t. beta (linear params)\n dm = lambda x, linpred: dlinkinv(linpred)[:,None] * x\n\n var_func = mod.family.variance\n\n x = result.model.exog\n x2 = exog_extra\n\n # test omitted\n lin_pred = res.predict(linear=True)\n dm_incl = dm(x, lin_pred)\n if x2 is not None:\n dm_excl = dm(x2, lin_pred)\n if mean_deriv is not None:\n # allow both and stack\n dm_excl = np.column_stack((dm_excl, mean_deriv))\n elif mean_deriv is not None:\n dm_excl = mean_deriv\n else:\n raise ValueError('either exog_extra or mean_deriv have to be provided')\n\n # TODO check for rank or redundant, note OLS calculates the rank\n k_constraint = dm_excl.shape[1]\n fittedvalues = res.predict() # discrete has linpred instead of mean\n v = var_func(fittedvalues)\n std = np.sqrt(v)\n res_ols1 = OLS(res.resid_response / std, np.column_stack((dm_incl, dm_excl)) / std[:, None]).fit()\n\n # case: nonrobust assumes variance implied by distribution is correct\n c1 = res_ols1.ess\n pval1 = stats.chi2.sf(c1, k_constraint)\n #print c1, stats.chi2.sf(c1, 2)\n\n # case: robust to dispersion\n c2 = nobs * res_ols1.rsquared\n pval2 = stats.chi2.sf(c2, k_constraint)\n #print c2, stats.chi2.sf(c2, 2)\n\n # case: robust to heteroscedasticity\n from statsmodels.stats.multivariate_tools import partial_project\n pp = partial_project(dm_excl / std[:,None], dm_incl / std[:,None])\n resid_p = res.resid_response / std\n res_ols3 = OLS(np.ones(nobs), pp.resid * resid_p[:,None]).fit()\n #c3 = nobs * res_ols3.rsquared # this is Wooldridge\n c3b = res_ols3.ess # simpler if endog is ones\n pval3 = stats.chi2.sf(c3b, k_constraint)\n\n tres = TestResults(c1=c1, pval1=pval1,\n c2=c2, pval2=pval2,\n c3=c3b, pval3=pval3)\n\n return tres\n\n\ndef cm_test_robust(resid, resid_deriv, instruments, weights=1):\n '''score/lagrange multiplier of Wooldridge\n\n generic version of Wooldridge procedure for test of conditional moments\n\n Limitation: This version allows only for one unconditional moment\n restriction, i.e. resid is scalar for each observation.\n Another limitation is that it assumes independent observations, no\n correlation in residuals and weights cannot be replaced by cross-observation\n whitening.\n\n Parameters\n ----------\n resid : ndarray, (nobs, )\n conditional moment restriction, E(r | x, params) = 0\n resid_deriv : ndarray, (nobs, k_params)\n derivative of conditional moment restriction with respect to parameters\n instruments : ndarray, (nobs, k_instruments)\n indicator variables of Wooldridge, multiplies the conditional momen\n restriction\n weights : ndarray\n This is a weights function as used in WLS. The moment\n restrictions are multiplied by weights. This corresponds to the\n inverse of the variance in a heteroskedastic model.\n\n Returns\n -------\n test_results : Results instance\n ??? TODO\n\n Notes\n -----\n This implements the auxiliary regression procedure of Wooldridge,\n implemented based on procedure 2.1 in Wooldridge 1990.\n\n Wooldridge allows for multivariate conditional moments (`resid`)\n TODO: check dimensions for multivariate case for extension\n\n References\n ----------\n Wooldridge\n Wooldridge\n and more Wooldridge\n\n '''\n # notation: Wooldridge uses too mamny Greek letters\n # instruments is capital lambda\n # resid is small phi\n # resid_deriv is capital phi\n # weights is C\n\n\n nobs = resid.shape[0]\n\n from statsmodels.stats.multivariate_tools import partial_project\n\n w_sqrt = np.sqrt(weights)\n if np.size(weights) > 1:\n w_sqrt = w_sqrt[:,None]\n pp = partial_project(instruments * w_sqrt, resid_deriv * w_sqrt)\n mom_resid = pp.resid\n\n moms_test = mom_resid * resid[:, None] * w_sqrt\n\n # we get this here in case we extend resid to be more than 1-D\n k_constraint = moms_test.shape[1]\n\n # use OPG variance as in Wooldridge 1990. This might generalize\n cov = moms_test.T.dot(moms_test)\n diff = moms_test.sum(0)\n\n # see Wooldridge last page in appendix\n stat = diff.dot(np.linalg.solve(cov, diff))\n\n # for checking, this corresponds to nobs * rsquared of auxiliary regression\n stat2 = OLS(np.ones(nobs), moms_test).fit().ess\n pval = stats.chi2.sf(stat, k_constraint)\n\n return stat, pval, stat2\n\n\ndef lm_robust(score, constraint_matrix, score_deriv_inv, cov_score,\n cov_params=None):\n '''general formula for score/LM test\n\n generalized score or lagrange multiplier test for implicit constraints\n\n `r(params) = 0`, with gradient `R = d r / d params`\n\n linear constraints are given by `R params - q = 0`\n\n It is assumed that all arrays are evaluated at the constrained estimates.\n\n Parameters\n ----------\n score : ndarray, 1-D\n derivative of objective function at estimated parameters\n of constrained model\n constraint_matrix R : ndarray\n Linear restriction matrix or Jacobian of nonlinear constraints\n hessian_inv, Ainv : ndarray, symmetric, square\n inverse of second derivative of objective function\n TODO: could be OPG or any other estimator if information matrix\n equality holds\n cov_score B : ndarray, symmetric, square\n covariance matrix of the score. This is the inner part of a sandwich\n estimator.\n cov_params V : ndarray, symmetric, square\n covariance of full parameter vector evaluated at constrained parameter\n estimate. This can be specified instead of cov_score B.\n\n Returns\n -------\n lm_stat : float\n score/lagrange multiplier statistic\n\n Notes\n -----\n\n '''\n # shorthand alias\n R, Ainv, B, V = constraint_matrix, score_deriv_inv, cov_score, cov_params\n\n tmp = R.dot(Ainv)\n wscore = tmp.dot(score) # C Ainv score\n\n if B is None and V is None:\n # only Ainv is given, so we assume information matrix identity holds\n # computational short cut, should be same if Ainv == inv(B)\n lm_stat = score.dot(Ainv.dot(score))\n else:\n # information matrix identity does not hold\n if V is None:\n inner = tmp.dot(B).dot(tmp.T)\n else:\n inner = R.dot(V).dot(R.T)\n\n #lm_stat2 = wscore.dot(np.linalg.pinv(inner).dot(wscore))\n # Let's assume inner is invertible, TODO: check if usecase for pinv exists\n lm_stat = wscore.dot(np.linalg.solve(inner, wscore))\n\n return lm_stat#, lm_stat2\n\n\ndef lm_robust_subset(score, k_constraints, score_deriv_inv, cov_score):\n '''general formula for score/LM test\n\n generalized score or lagrange multiplier test for constraints on a subset\n of parameters\n\n `params_1 = value`, where params_1 is a subset of the unconstrained\n parameter vector.\n\n It is assumed that all arrays are evaluated at the constrained estimates.\n\n Parameters\n ----------\n score : ndarray, 1-D\n derivative of objective function at estimated parameters\n of constrained model\n k_constraint : int\n number of constraints\n score_deriv_inv : ndarray, symmetric, square\n inverse of second derivative of objective function\n TODO: could be OPG or any other estimator if information matrix\n equality holds\n cov_score B : ndarray, symmetric, square\n covariance matrix of the score. This is the inner part of a sandwich\n estimator.\n not cov_params V : ndarray, symmetric, square\n covariance of full parameter vector evaluated at constrained parameter\n estimate. This can be specified instead of cov_score B.\n\n Returns\n -------\n lm_stat : float\n score/lagrange multiplier statistic\n p-value : float\n p-value of the LM test based on chisquare distribution\n\n Notes\n -----\n The implementation is based on Boos 1992 section 4.1. The same derivation\n is also in other articles and in text books.\n\n '''\n\n # Notation in Boos\n # score `S = sum (s_i)\n # score_obs `s_i`\n # score_deriv `I` is derivative of score (hessian)\n # `D` is covariance matrix of score, OPG product given independent observations\n\n #k_params = len(score)\n\n # Note: I reverse order between constraint and unconstrained compared to Boos\n\n # submatrices of score_deriv/hessian\n # these are I22 and I12 in Boos\n #h_uu = score_deriv[-k_constraints:, -k_constraints:]\n h_uu = score_deriv_inv[:-k_constraints, :-k_constraints]\n h_cu = score_deriv_inv[-k_constraints:, :-k_constraints]\n\n # TODO: pinv or solve ?\n tmp_proj = h_cu.dot(np.linalg.inv(h_uu))\n tmp = np.column_stack((-tmp_proj, np.eye(k_constraints))) #, tmp_proj))\n\n cov_score_constraints = tmp.dot(cov_score.dot(tmp.T))\n\n #lm_stat2 = wscore.dot(np.linalg.pinv(inner).dot(wscore))\n # Let's assume inner is invertible, TODO: check if usecase for pinv exists\n lm_stat = score.dot(np.linalg.solve(cov_score_constraints, score))\n pval = stats.chi2.sf(lm_stat, k_constraints)\n\n# # check second calculation Boos referencing Kent 1982 and Engle 1984\n# # we can use this when robust_cov_params of full model is available\n# #h_inv = np.linalg.inv(score_deriv)\n# hinv = score_deriv_inv\n# v = h_inv.dot(cov_score.dot(h_inv)) # this is robust cov_params\n# v_cc = v[:k_constraints, :k_constraints]\n# h_cc = score_deriv[:k_constraints, :k_constraints]\n# # brute force calculation:\n# h_resid_cu = h_cc - h_cu.dot(np.linalg.solve(h_uu, h_cu))\n# cov_s_c = h_resid_cu.dot(v_cc.dot(h_resid_cu))\n# diff = np.max(np.abs(cov_s_c - cov_score_constraints))\n return lm_stat, pval #, lm_stat2\n\n\ndef lm_robust_subset_parts(score, k_constraints,\n score_deriv_uu, score_deriv_cu,\n cov_score_cc, cov_score_cu, cov_score_uu):\n \"\"\"robust generalized score tests on subset of parameters\n\n This is the same as lm_robust_subset with arguments in parts of\n partitioned matrices.\n This can be useful, when we have the parts based on different estimation\n procedures, i.e. when we do not have the full unconstrained model.\n\n Calculates mainly the covariance of the constraint part of the score.\n\n Parameters\n ----------\n score : ndarray, 1-D\n derivative of objective function at estimated parameters\n of constrained model. These is the score component for the restricted\n part under hypothesis. The unconstrained part of the score is assumed\n to be zero.\n k_constraint : int\n number of constraints\n score_deriv_uu : ndarray, symmetric, square\n first derivative of moment equation or second derivative of objective\n function for the unconstrained part\n TODO: could be OPG or any other estimator if information matrix\n equality holds\n score_deriv_cu : ndarray\n first cross derivative of moment equation or second cross\n derivative of objective function between.\n cov_score_cc : ndarray\n covariance matrix of the score for the unconstrained part.\n This is the inner part of a sandwich estimator.\n cov_score_cu : ndarray\n covariance matrix of the score for the off-diagonal block, i.e.\n covariance between constrained and unconstrained part.\n cov_score_uu : ndarray\n covariance matrix of the score for the unconstrained part.\n\n Returns\n -------\n lm_stat : float\n score/lagrange multiplier statistic\n p-value : float\n p-value of the LM test based on chisquare distribution\n\n Notes\n -----\n TODO: these function should just return the covariance of the score\n instead of calculating the score/lm test.\n\n Implementation similar to lm_robust_subset and is based on Boos 1992,\n section 4.1 in the form attributed to Breslow (1990). It does not use the\n computation attributed to Kent (1982) and Engle (1984).\n \"\"\"\n\n tmp_proj = np.linalg.solve(score_deriv_uu, score_deriv_cu.T).T\n tmp = tmp_proj.dot(cov_score_cu.T)\n\n # this needs to make a copy of cov_score_cc for further inplace modification\n cov = cov_score_cc - tmp\n cov -= tmp.T\n cov += tmp_proj.dot(cov_score_uu).dot(tmp_proj.T)\n\n lm_stat = score.dot(np.linalg.solve(cov, score))\n pval = stats.chi2.sf(lm_stat, k_constraints)\n return lm_stat, pval\n\n\ndef lm_robust_reparameterized(score, params_deriv, score_deriv, cov_score):\n \"\"\"robust generalized score test for transformed parameters\n\n The parameters are given by a nonlinear transformation of the estimated\n reduced parameters\n\n `params = g(params_reduced)` with jacobian `G = d g / d params_reduced`\n\n score and other arrays are for full parameter space `params`\n\n Parameters\n ----------\n score : ndarray, 1-D\n derivative of objective function at estimated parameters\n of constrained model\n params_deriv : ndarray\n Jacobian G of the parameter trasnformation\n score_deriv : ndarray, symmetric, square\n second derivative of objective function\n TODO: could be OPG or any other estimator if information matrix\n equality holds\n cov_score B : ndarray, symmetric, square\n covariance matrix of the score. This is the inner part of a sandwich\n estimator.\n\n Returns\n -------\n lm_stat : float\n score/lagrange multiplier statistic\n p-value : float\n p-value of the LM test based on chisquare distribution\n\n Notes\n -----\n Boos 1992, section 4.3, expression for T_{GS} just before example 6\n \"\"\"\n # Boos notation\n # params_deriv G\n\n k_params, k_reduced = params_deriv.shape\n k_constraints = k_params - k_reduced\n\n G = params_deriv # shortcut alias\n\n tmp_c0 = np.linalg.pinv(G.T.dot(score_deriv.dot(G)))\n tmp_c1 = score_deriv.dot(G.dot(tmp_c0.dot(G.T)))\n tmp_c = np.eye(k_params) - tmp_c1\n\n cov = tmp_c.dot(cov_score.dot(tmp_c.T)) # warning: reduced rank\n\n lm_stat = score.dot(np.linalg.pinv(cov).dot(score))\n pval = stats.chi2.sf(lm_stat, k_constraints)\n return lm_stat, pval\n\n\ndef dispersion_poisson(results):\n \"\"\"Score/LM type tests for Poisson variance assumptions\n\n Null Hypothesis is\n\n H0: var(y) = E(y) and assuming E(y) is correctly specified\n H1: var(y) ~= E(y)\n\n The tests are based on the constrained model, i.e. the Poisson model.\n The tests differ in their assumed alternatives, and in their maintained\n assumptions.\n\n Parameters\n ----------\n results : Poisson results instance\n This can be a results instance for either a discrete Poisson or a GLM\n with family Poisson.\n\n Returns\n -------\n res : ndarray, shape (7, 2)\n each row contains the test statistic and p-value for one of the 7 tests\n computed here.\n description : 2-D list of strings\n Each test has two strings a descriptive name and a string for the\n alternative hypothesis.\n \"\"\"\n\n if hasattr(results, '_results'):\n results = results._results\n\n endog = results.model.endog\n nobs = endog.shape[0] #TODO: use attribute, may need to be added\n fitted = results.predict()\n #fitted = results.fittedvalues # discrete has linear prediction\n #this assumes Poisson\n resid2 = results.resid_response**2\n var_resid_endog = (resid2 - endog)\n var_resid_fitted = (resid2 - fitted)\n std1 = np.sqrt(2 * (fitted**2).sum())\n\n var_resid_endog_sum = var_resid_endog.sum()\n dean_a = var_resid_fitted.sum() / std1\n dean_b = var_resid_endog_sum / std1\n dean_c = (var_resid_endog / fitted).sum() / np.sqrt(2 * nobs)\n\n pval_dean_a = stats.norm.sf(np.abs(dean_a))\n pval_dean_b = stats.norm.sf(np.abs(dean_b))\n pval_dean_c = stats.norm.sf(np.abs(dean_c))\n\n results_all = [[dean_a, pval_dean_a],\n [dean_b, pval_dean_b],\n [dean_c, pval_dean_c]]\n description = [['Dean A', 'mu (1 + a mu)'],\n ['Dean B', 'mu (1 + a mu)'],\n ['Dean C', 'mu (1 + a)']]\n\n # Cameron Trived auxiliary regression page 78 count book 1989\n endog_v = var_resid_endog / fitted\n res_ols_nb2 = OLS(endog_v, fitted).fit(use_t=False)\n stat_ols_nb2 = res_ols_nb2.tvalues[0]\n pval_ols_nb2 = res_ols_nb2.pvalues[0]\n results_all.append([stat_ols_nb2, pval_ols_nb2])\n description.append(['CT nb2', 'mu (1 + a mu)'])\n\n res_ols_nb1 = OLS(endog_v, fitted).fit(use_t=False)\n stat_ols_nb1 = res_ols_nb1.tvalues[0]\n pval_ols_nb1 = res_ols_nb1.pvalues[0]\n results_all.append([stat_ols_nb1, pval_ols_nb1])\n description.append(['CT nb1', 'mu (1 + a)'])\n\n endog_v = var_resid_endog / fitted\n res_ols_nb2 = OLS(endog_v, fitted).fit(cov_type='HC1', use_t=False)\n stat_ols_hc1_nb2 = res_ols_nb2.tvalues[0]\n pval_ols_hc1_nb2 = res_ols_nb2.pvalues[0]\n results_all.append([stat_ols_hc1_nb2, pval_ols_hc1_nb2])\n description.append(['CT nb2 HC1', 'mu (1 + a mu)'])\n\n res_ols_nb1 = OLS(endog_v, np.ones(len(endog_v))).fit(cov_type='HC1',\n use_t=False)\n stat_ols_hc1_nb1 = res_ols_nb1.tvalues[0]\n pval_ols_hc1_nb1 = res_ols_nb1.pvalues[0]\n results_all.append([stat_ols_hc1_nb1, pval_ols_hc1_nb1])\n description.append(['CT nb1 HC1', 'mu (1 + a)'])\n\n return np.array(results_all), description\n\n\ndef dispersion_poisson_generic(results, exog_new_test, exog_new_control=None,\n include_score=False, use_endog=True,\n cov_type='HC1', cov_kwds=None, use_t=False):\n \"\"\"A variable addition test for the variance function\n\n This uses an artificial regression to calculate a variant of an LM or\n generalized score test for the specification of the variance assumption\n in a Poisson model. The performed test is a Wald test on the coefficients\n of the `exog_new_test`.\n\n Warning: insufficiently tested, especially for options\n \"\"\"\n\n if hasattr(results, '_results'):\n results = results._results\n\n endog = results.model.endog\n nobs = endog.shape[0] #TODO: use attribute, may need to be added\n # fitted = results.fittedvalues # generic has linpred as fittedvalues\n fitted = results.predict()\n resid2 = results.resid_response**2\n #the following assumes Poisson\n if use_endog:\n var_resid = (resid2 - endog)\n else:\n var_resid = (resid2 - fitted)\n\n endog_v = var_resid / fitted\n\n k_constraints = exog_new_test.shape[1]\n ex_list = [exog_new_test]\n if include_score:\n score_obs = results.model.score_obs(results.params)\n ex_list.append(score_obs)\n\n if exog_new_control is not None:\n ex_list.append(score_obs)\n\n if len(ex_list) > 1:\n ex = np.column_stack(ex_list)\n use_wald = True\n else:\n ex = ex_list[0] # no control variables in exog\n use_wald = False\n\n res_ols = OLS(endog_v, ex).fit(cov_type=cov_type, cov_kwds=cov_kwds,\n use_t=use_t)\n\n if use_wald:\n # we have controls and need to test coefficients\n k_vars = ex.shape[1]\n constraints = np.eye(k_constraints, k_vars)\n ht = res_ols.wald_test(constraints)\n stat_ols = ht.statistic\n pval_ols = ht.pvalue\n else:\n # we do not have controls and can use overall fit\n nobs = endog_v.shape[0]\n rsquared_noncentered = 1 - res_ols.ssr/res_ols.uncentered_tss\n stat_ols = nobs * rsquared_noncentered\n pval_ols = stats.chi2.sf(stat_ols, k_constraints)\n\n return stat_ols, pval_ols\n\n\ndef conditional_moment_test_generic(mom_test, mom_test_deriv,\n mom_incl, mom_incl_deriv,\n var_mom_all=None,\n cov_type='OPG', cov_kwds=None):\n \"\"\"generic conditional moment test\n\n This is mainly intended as internal function in support of diagnostic\n and specification tests. It has no conversion and checking of correct\n arguments.\n\n Parameters\n ----------\n mom_test : ndarray, 2-D (nobs, k_constraints)\n moment conditions that will be tested to be zero\n mom_test_deriv : ndarray, 2-D, square (k_constraints, k_constraints)\n derivative of moment conditions under test with respect to the\n parameters of the model summed over observations.\n mom_incl : ndarray, 2-D (nobs, k_params)\n moment conditions that where use in estimation, assumed to be zero\n This is score_obs in the case of (Q)MLE\n mom_incl_deriv : ndarray, 2-D, square (k_params, k_params)\n derivative of moment conditions of estimator summed over observations\n This is the information matrix or Hessian in the case of (Q)MLE.\n var_mom_all : None, or ndarray, 2-D, (k, k) with k = k_constraints + k_params\n Expected product or variance of the joint (column_stacked) moment\n conditions. The stacking should have the variance of the moment\n conditions under test in the first k_constraint rows and columns.\n If it is not None, then it will be estimated based on cov_type.\n I think: This is the Hessian of the extended or alternative model\n under full MLE and score test assuming information matrix identity\n holds.\n\n Returns\n -------\n results\n\n Notes\n -----\n TODO: cov_type other than OPG is missing\n initial implementation based on Cameron Trived countbook 1998 p.48, p.56\n\n also included: mom_incl can be None if expected mom_test_deriv is zero.\n\n References\n ----------\n Cameron and Trivedi 1998 count book\n Wooldridge ???\n Pagan and Vella 1989\n \"\"\"\n if cov_type != 'OPG':\n raise NotImplementedError\n\n k_constraints = mom_test.shape[1]\n\n if mom_incl is None:\n # assume mom_test_deriv is zero, do not include effect of mom_incl\n if var_mom_all is None:\n var_cm = mom_test.T.dot(mom_test)\n else:\n var_cm = var_mom_all\n\n else:\n # take into account he effect of parameter estimates on mom_test\n if var_mom_all is None:\n mom_all = np.column_stack((mom_test, mom_incl))\n # TODO: replace with inner sandwich covariance estimator\n var_mom_all = mom_all.T.dot(mom_all)\n\n tmp = mom_test_deriv.dot(np.linalg.pinv(mom_incl_deriv))\n h = np.column_stack((np.eye(k_constraints), -tmp))\n\n var_cm = h.dot(var_mom_all.dot(h.T))\n\n # calculate test results with chisquare\n var_cm_inv = np.linalg.pinv(var_cm)\n mom_test_sum = mom_test.sum(0)\n statistic = mom_test_sum.dot(var_cm_inv.dot(mom_test_sum))\n pval = stats.chi2.sf(statistic, k_constraints)\n\n # normal test of individual components\n se = np.sqrt(np.diag(var_cm))\n tvalues = mom_test_sum / se\n pvalues = stats.norm.sf(np.abs(tvalues))\n\n res = ResultsGeneric(var_cm=var_cm,\n stat_cmt=statistic,\n pval_cmt=pval,\n tvalues=tvalues,\n pvalues=pvalues)\n\n return res\n\n\ndef conditional_moment_test_regression(mom_test, mom_test_deriv=None,\n mom_incl=None, mom_incl_deriv=None,\n var_mom_all=None, demean=False,\n cov_type='OPG', cov_kwds=None):\n \"\"\"generic conditional moment test based artificial regression\n\n this is very experimental, no options implemented yet\n\n so far\n OPG regression, or\n artificial regression with Robust Wald test\n\n The latter is (as far as I can see) the same as an overidentifying test\n in GMM where the test statistic is the value of the GMM objective function\n and it is assumed that parameters were estimated with optimial GMM, i.e.\n the weight matrix equal to the expectation of the score variance.\n \"\"\"\n # so far coded from memory\n nobs, k_constraints = mom_test.shape\n\n endog = np.ones(nobs)\n if mom_incl is not None:\n ex = np.column_stack((mom_test, mom_incl))\n else:\n ex = mom_test\n if demean:\n ex -= ex.mean(0)\n if cov_type == 'OPG':\n res = OLS(endog, ex).fit()\n\n statistic = nobs * res.rsquared\n pval = stats.chi2.sf(statistic, k_constraints)\n else:\n res = OLS(endog, ex).fit(cov_type=cov_type, cov_kwds=cov_kwds)\n tres = res.wald_test(np.eye(ex.shape[1]))\n statistic = tres.statistic\n pval = tres.pvalue\n\n return statistic, pval\n\n\nclass CMTNewey(object):\n \"\"\"generic moment test for GMM\n\n This is a class to calculate and hold the various results\n\n This is based on Newey 1985 on GMM.\n Lemma 1:\n Theorem 1\n\n The main method is `chisquare` which returns the result of the\n conditional moment test.\n\n Warning: name of class and methods will likely be changed\n\n Parameters\n ----------\n moments : ndarray, 1-D\n moments that are tested to be zero. They do not need to be derived\n from a likelihood function.\n moments_deriv : ndarray\n derivative of the moment function with respect to the parameters that\n are estimated\n cov_moments : ndarray\n An estimate for the joint (expected) covariance of all moments. This\n can be a heteroscedasticity or correlation robust covariance estimate,\n i.e. the inner part of a sandwich covariance.\n weights : ndarray\n Weights used in the GMM estimation.\n transf_mt : ndarray\n This defines the test moments where `transf_mt` is the matrix that\n defines a Linear combination of moments that have expected value equal\n to zero under the Null hypothesis.\n\n Notes\n -----\n The one letter names in Newey 1985 are\n\n moments, g :\n cov_moments, V :\n moments_deriv, H :\n weights, W :\n transf_mt, L :\n linear transformation to get the test condition from the moments\n\n not used, add as argument to methods or __init__?\n K cov for misspecification\n or mispecification_deriv\n\n This follows the GMM version in Newey 1985a, not the MLE version in\n Newey 1985b. Newey uses the generalized information matrix equality in the\n MLE version Newey (1985b).\n\n Newey 1985b Lemma 1 does not impose correctly specified likelihood, but\n assumes it in the following. Lemma 1 in both articles are essentially the\n same assuming D = H' W.\n\n References\n ----------\n - Newey 1985a, Generalized Method of Moment specification testing,\n Journal of Econometrics\n - Newey 1985b, Maximum Likelihood Specification Testing and Conditional\n Moment Tests, Econometrica\n \"\"\"\n\n def __init__(self, moments, cov_moments, moments_deriv,\n weights, transf_mt):\n\n self.moments = moments\n self.cov_moments = cov_moments\n self.moments_deriv = moments_deriv\n self.weights = weights\n self.transf_mt = transf_mt\n\n # derived quantities\n self.moments_constraint = transf_mt.dot(moments)\n self.htw = moments_deriv.T.dot(weights) # H'W\n\n # TODO check these\n self.k_moments = self.moments.shape[-1] # in this case only 1-D\n # assuming full rank of L'\n self.k_constraints = self.transf_mt.shape[0]\n\n @cache_readonly\n def asy_transf_params(self):\n\n moments_deriv = self.moments_deriv # H\n #weights = self.weights # W\n\n htw = self.htw # moments_deriv.T.dot(weights) # H'W\n res = np.linalg.solve(htw.dot(moments_deriv), htw)\n #res = np.linalg.pinv(htw.dot(moments_deriv)).dot(htw)\n return -res\n\n @cache_readonly\n def project_w(self):\n # P_w = I - H (H' W H)^{-1} H' W\n moments_deriv = self.moments_deriv # H\n\n res = moments_deriv.dot(self.asy_transf_params)\n res += np.eye(res.shape[0])\n return res\n\n @cache_readonly\n def asy_transform_mom_constraints(self):\n # L P_w\n res = self.transf_mt.dot(self.project_w)\n return res\n\n @cache_readonly\n def asy_cov_moments(self):\n \"\"\"\n\n `sqrt(T) * g_T(b_0) asy N(K delta, V)`\n\n mean is not implemented,\n V is the same as cov_moments in __init__ argument\n \"\"\"\n\n return self.cov_moments\n\n @cache_readonly\n def cov_mom_constraints(self):\n\n # linear transformation\n transf = self.asy_transform_mom_constraints\n\n return transf.dot(self.asy_cov_moments).dot(transf.T)\n\n @cache_readonly\n def rank_cov_mom_constraints(self):\n return np.linalg.matrix_rank(self.cov_mom_constraints)\n\n def ztest(self):\n \"\"\"statistic, p-value and degrees of freedom of separate moment test\n\n currently two sided test only\n\n TODO: This can use generic ztest/ttest features and return\n ContrastResults\n \"\"\"\n diff = self.moments_constraint\n bse = np.sqrt(np.diag(self.cov_mom_constraints))\n\n # Newey uses a generalized inverse\n stat = diff / bse\n pval = stats.norm.sf(np.abs(stat))*2\n return stat, pval\n\n @cache_readonly\n def chisquare(self):\n \"\"\"statistic, p-value and degrees of freedom of joint moment test\n \"\"\"\n diff = self.moments_constraint\n cov = self.cov_mom_constraints\n\n # Newey uses a generalized inverse\n stat = diff.T.dot(np.linalg.pinv(cov).dot(diff))\n df = self.rank_cov_mom_constraints\n pval = stats.chi2.sf(stat, df) # Theorem 1\n return stat, pval, df\n\n\nclass CMTTauchen(object):\n \"\"\"generic moment tests or conditional moment tests for Quasi-MLE\n\n This is a generic class based on Tauchen 1985\n\n The main method is `chisquare` which returns the result of the\n conditional moment test.\n\n Warning: name of class and of methods will likely be changed\n\n Parameters\n ----------\n score : ndarray, 1-D\n moment condition used in estimation, score of log-likelihood function\n score_deriv : ndarray\n derivative of score function with respect to the parameters that are\n estimated. This is the Hessian in quasi-maximum likelihood\n moments : ndarray, 1-D\n moments that are tested to be zero. They do not need to be derived\n from a likelihood function.\n moments_deriv : ndarray\n derivative of the moment function with respect to the parameters that\n are estimated\n cov_moments : ndarray\n An estimate for the joint (expected) covariance of score and test\n moments. This can be a heteroscedasticity or correlation robust\n covariance estimate, i.e. the inner part of a sandwich covariance.\n \"\"\"\n\n def __init__(self, score, score_deriv, moments, moments_deriv, cov_moments):\n self.score = score\n self.score_deriv = score_deriv\n self.moments = moments\n self.moments_deriv = moments_deriv\n self.cov_moments_all = cov_moments\n\n self.k_moments_test = moments.shape[-1]\n self.k_params = score.shape[-1]\n self.k_moments_all = self.k_params + self.k_moments_test\n\n @cache_readonly\n def cov_params_all(self):\n m_deriv = np.zeros((self.k_moments_all, self.k_moments_all))\n m_deriv[:self.k_params, :self.k_params] = self.score_deriv\n m_deriv[self.k_params:, :self.k_params] = self.moments_deriv\n m_deriv[self.k_params:, self.k_params:] = np.eye(self.k_moments_test)\n\n m_deriv_inv = np.linalg.inv(m_deriv)\n cov = m_deriv_inv.dot(self.cov_moments_all.dot(m_deriv_inv.T)) # K_inv J K_inv\n return cov\n\n @cache_readonly\n def cov_mom_constraints(self):\n return self.cov_params_all[self.k_params:, self.k_params:]\n\n @cache_readonly\n def rank_cov_mom_constraints(self):\n return np.linalg.matrix_rank(self.cov_mom_constraints)\n\n # TODO: not DRY, just copied from CMTNewey\n def ztest(self):\n \"\"\"statistic, p-value and degrees of freedom of separate moment test\n\n currently two sided test only\n\n TODO: This can use generic ztest/ttest features and return\n ContrastResults\n \"\"\"\n diff = self.moments_constraint\n bse = np.sqrt(np.diag(self.cov_mom_constraints))\n\n # Newey uses a generalized inverse\n stat = diff / bse\n pval = stats.norm.sf(np.abs(stat))*2\n return stat, pval\n\n @cache_readonly\n def chisquare(self):\n \"\"\"statistic, p-value and degrees of freedom of joint moment test\n \"\"\"\n diff = self.moments #_constraints\n cov = self.cov_mom_constraints\n\n # Newey uses a generalized inverse, we use it also here\n stat = diff.T.dot(np.linalg.pinv(cov).dot(diff))\n #df = self.k_moments_test\n # We allow for redundant mom_constraints:\n df = self.rank_cov_mom_constraints\n pval = stats.chi2.sf(stat, df)\n return stat, pval, df\n", "import warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..doctools import document\nfrom ..exceptions import PlotnineWarning\nfrom .smoothers import predictdf\nfrom .stat import stat\n\n\n@document\nclass stat_smooth(stat):\n \"\"\"\n Calculate a smoothed conditional mean\n\n {usage}\n\n Parameters\n ----------\n {common_parameters}\n\n method : str or callable, optional (default: 'auto')\n The available methods are::\n\n 'auto' # Use loess if (n<1000), glm otherwise\n 'lm', 'ols' # Linear Model\n 'wls' # Weighted Linear Model\n 'rlm' # Robust Linear Model\n 'glm' # Generalized linear Model\n 'gls' # Generalized Least Squares\n 'lowess' # Locally Weighted Regression (simple)\n 'loess' # Locally Weighted Regression\n 'mavg' # Moving Average\n 'gpr' # Gaussian Process Regressor\n\n If a `callable` is passed, it must have the signature::\n\n def my_smoother(data, xseq, **params):\n # * data - has the x and y values for the model\n # * xseq - x values to be predicted\n # * params - stat parameters\n #\n # It must return a new dataframe. Below is the\n # template used internally by Plotnine\n\n # Input data into the model\n x, y = data['x'], data['y']\n\n # Create and fit a model\n model = Model(x, y)\n results = Model.fit()\n\n # Create output data by getting predictions on\n # the xseq values\n data = pd.DataFrame({\n 'x': xseq,\n 'y': results.predict(xseq)})\n\n # Compute confidence intervals, this depends on\n # the model. However, given standard errors and the\n # degrees of freedom we can compute the confidence\n # intervals using the t-distribution.\n #\n # For an alternative, implement confidence interals by\n # the bootstrap method\n if params['se']:\n from plotnine.utils.smoothers import tdist_ci\n y = data['y'] # The predicted value\n df = 123 # Degrees of freedom\n stderr = results.stderr # Standard error\n level = params['level'] # The parameter value\n low, high = tdist_ci(y, df, stderr, level)\n data['se'] = stderr\n data['ymin'] = low\n data['ymax'] = high\n\n return data\n\n For *loess* smoothing you must install the `scikit-misc` package.\n You can install it using with ``pip install scikit-misc`` or\n ``pip install plotnine[all]``.\n formula : formula_like\n An object that can be used to construct a patsy design matrix.\n This is usually a string. You can only use a formula if ``method``\n is one of *lm*, *ols*, *wls*, *glm*, *rlm* or *gls*, and in the\n :ref:`formula <patsy:formulas>` you may refer to the ``x`` and\n ``y`` aesthetic variables.\n se : bool (default: True)\n If :py:`True` draw confidence interval around the smooth line.\n n : int (default: 80)\n Number of points to evaluate the smoother at. Some smoothers\n like *mavg* do not support this.\n fullrange : bool (default: False)\n If :py:`True` the fit will span the full range of the plot.\n level : float (default: 0.95)\n Level of confidence to use if :py:`se=True`.\n span : float (default: 2/3.)\n Controls the amount of smoothing for the *loess* smoother.\n Larger number means more smoothing. It should be in the\n ``(0, 1)`` range.\n method_args : dict (default: {})\n Additional arguments passed on to the modelling method.\n\n See Also\n --------\n statsmodels.regression.linear_model.OLS\n statsmodels.regression.linear_model.WLS\n statsmodels.robust.robust_linear_model.RLM\n statsmodels.genmod.generalized_linear_model.GLM\n statsmodels.regression.linear_model.GLS\n statsmodels.nonparametric.smoothers_lowess.lowess\n skmisc.loess.loess\n pandas.DataFrame.rolling\n sklearn.gaussian_process.GaussianProcessRegressor\n\n Notes\n -----\n :class:`~plotnine.geoms.geom_smooth` and :class:`.stat_smooth` are\n effectively aliases, they both use the same arguments.\n Use :class:`~plotnine.geoms.geom_smooth` unless\n you want to display the results with a non-standard geom.\n \"\"\"\n\n _aesthetics_doc = \"\"\"\n {aesthetics_table}\n\n .. rubric:: Options for computed aesthetics\n\n ::\n\n 'se' # Standard error of points in bin\n 'ymin' # Lower confidence limit\n 'ymax' # Upper confidence limit\n\n Calculated aesthetics are accessed using the `after_stat` function.\n e.g. :py:`after_stat('se')`.\n \"\"\"\n\n REQUIRED_AES = {'x', 'y'}\n DEFAULT_PARAMS = {'geom': 'smooth', 'position': 'identity',\n 'na_rm': False,\n 'method': 'auto', 'se': True, 'n': 80,\n 'formula': None,\n 'fullrange': False, 'level': 0.95,\n 'span': 0.75, 'method_args': {}}\n CREATES = {'se', 'ymin', 'ymax'}\n\n def setup_data(self, data):\n \"\"\"\n Overide to modify data before compute_layer is called\n \"\"\"\n data = data[np.isfinite(data['x']) &\n np.isfinite(data['y'])]\n return data\n\n def setup_params(self, data):\n params = self.params.copy()\n # Use loess/lowess for small datasets\n # and glm for large\n if params['method'] == 'auto':\n max_group = data['group'].value_counts().max()\n if max_group < 1000:\n try:\n from skmisc.loess import loess # noqa: F401\n params['method'] = 'loess'\n except ImportError:\n params['method'] = 'lowess'\n else:\n params['method'] = 'glm'\n\n if params['method'] == 'mavg':\n if 'window' not in params['method_args']:\n window = len(data) // 10\n warnings.warn(\n \"No 'window' specified in the method_args. \"\n \"Using window = {}. \"\n \"The same window is used for all groups or \"\n \"facets\".format(window), PlotnineWarning)\n params['method_args']['window'] = window\n\n if params['formula']:\n allowed = {'lm', 'ols', 'wls', 'glm', 'rlm', 'gls'}\n if params['method'] not in allowed:\n raise ValueError(\n \"You can only use a formula with `method` is \"\n \"one of {}\".format(allowed)\n )\n params['enviroment'] = self.environment\n\n return params\n\n @classmethod\n def compute_group(cls, data, scales, **params):\n data = data.sort_values('x')\n n = params['n']\n\n x_unique = data['x'].unique()\n\n if len(x_unique) < 2:\n warnings.warn(\n \"Smoothing requires 2 or more points. Got {}. \"\n \"Not enough points for smoothing. If this message \"\n \"a surprise, make sure the column mapped to the x \"\n \"aesthetic has the right dtype.\".format(len(x_unique)),\n PlotnineWarning\n )\n # Not enough data to fit\n return pd.DataFrame()\n\n if data['x'].dtype.kind == 'i':\n if params['fullrange']:\n xseq = scales.x.dimension()\n else:\n xseq = np.sort(x_unique)\n else:\n if params['fullrange']:\n rangee = scales.x.dimension()\n else:\n rangee = [data['x'].min(), data['x'].max()]\n xseq = np.linspace(rangee[0], rangee[1], n)\n\n return predictdf(data, xseq, **params)\n", "import functools\nimport operator\nimport sys\nimport warnings\nimport numbers\nfrom collections import namedtuple\nimport inspect\nimport math\n\nimport numpy as np\n\ntry:\n from numpy.random import Generator as Generator\nexcept ImportError:\n class Generator(): # type: ignore[no-redef]\n pass\n\n\ndef _lazywhere(cond, arrays, f, fillvalue=None, f2=None):\n \"\"\"\n np.where(cond, x, fillvalue) always evaluates x even where cond is False.\n This one only evaluates f(arr1[cond], arr2[cond], ...).\n \n Examples\n --------\n\n >>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])\n >>> def f(a, b):\n ... return a*b\n >>> _lazywhere(a > 2, (a, b), f, np.nan)\n array([ nan, nan, 21., 32.])\n\n Notice, it assumes that all `arrays` are of the same shape, or can be\n broadcasted together.\n\n \"\"\"\n if fillvalue is None:\n if f2 is None:\n raise ValueError(\"One of (fillvalue, f2) must be given.\")\n else:\n fillvalue = np.nan\n else:\n if f2 is not None:\n raise ValueError(\"Only one of (fillvalue, f2) can be given.\")\n\n arrays = np.broadcast_arrays(*arrays)\n temp = tuple(np.extract(cond, arr) for arr in arrays)\n tcode = np.mintypecode([a.dtype.char for a in arrays])\n out = np.full(np.shape(arrays[0]), fill_value=fillvalue, dtype=tcode)\n np.place(out, cond, f(*temp))\n if f2 is not None:\n temp = tuple(np.extract(~cond, arr) for arr in arrays)\n np.place(out, ~cond, f2(*temp))\n\n return out\n\n\ndef _lazyselect(condlist, choicelist, arrays, default=0):\n \"\"\"\n Mimic `np.select(condlist, choicelist)`.\n\n Notice, it assumes that all `arrays` are of the same shape or can be\n broadcasted together.\n\n All functions in `choicelist` must accept array arguments in the order\n given in `arrays` and must return an array of the same shape as broadcasted\n `arrays`.\n\n Examples\n --------\n >>> x = np.arange(6)\n >>> np.select([x <3, x > 3], [x**2, x**3], default=0)\n array([ 0, 1, 4, 0, 64, 125])\n\n >>> _lazyselect([x < 3, x > 3], [lambda x: x**2, lambda x: x**3], (x,))\n array([ 0., 1., 4., 0., 64., 125.])\n\n >>> a = -np.ones_like(x)\n >>> _lazyselect([x < 3, x > 3],\n ... [lambda x, a: x**2, lambda x, a: a * x**3],\n ... (x, a), default=np.nan)\n array([ 0., 1., 4., nan, -64., -125.])\n\n \"\"\"\n arrays = np.broadcast_arrays(*arrays)\n tcode = np.mintypecode([a.dtype.char for a in arrays])\n out = np.full(np.shape(arrays[0]), fill_value=default, dtype=tcode)\n for index in range(len(condlist)):\n func, cond = choicelist[index], condlist[index]\n if np.all(cond is False):\n continue\n cond, _ = np.broadcast_arrays(cond, arrays[0])\n temp = tuple(np.extract(cond, arr) for arr in arrays)\n np.place(out, cond, func(*temp))\n return out\n\n\ndef _aligned_zeros(shape, dtype=float, order=\"C\", align=None):\n \"\"\"Allocate a new ndarray with aligned memory.\n\n Primary use case for this currently is working around a f2py issue\n in NumPy 1.9.1, where dtype.alignment is such that np.zeros() does\n not necessarily create arrays aligned up to it.\n\n \"\"\"\n dtype = np.dtype(dtype)\n if align is None:\n align = dtype.alignment\n if not hasattr(shape, '__len__'):\n shape = (shape,)\n size = functools.reduce(operator.mul, shape) * dtype.itemsize\n buf = np.empty(size + align + 1, np.uint8)\n offset = buf.__array_interface__['data'][0] % align\n if offset != 0:\n offset = align - offset\n # Note: slices producing 0-size arrays do not necessarily change\n # data pointer --- so we use and allocate size+1\n buf = buf[offset:offset+size+1][:-1]\n data = np.ndarray(shape, dtype, buf, order=order)\n data.fill(0)\n return data\n\n\ndef _prune_array(array):\n \"\"\"Return an array equivalent to the input array. If the input\n array is a view of a much larger array, copy its contents to a\n newly allocated array. Otherwise, return the input unchanged.\n \"\"\"\n if array.base is not None and array.size < array.base.size // 2:\n return array.copy()\n return array\n\n\ndef prod(iterable):\n \"\"\"\n Product of a sequence of numbers.\n\n Faster than np.prod for short lists like array shapes, and does\n not overflow if using Python integers.\n \"\"\"\n product = 1\n for x in iterable:\n product *= x\n return product\n\n\ndef float_factorial(n: int) -> float:\n \"\"\"Compute the factorial and return as a float\n\n Returns infinity when result is too large for a double\n \"\"\"\n return float(math.factorial(n)) if n < 171 else np.inf\n\n\nclass DeprecatedImport(object):\n \"\"\"\n Deprecated import with redirection and warning.\n\n Examples\n --------\n Suppose you previously had in some module::\n\n from foo import spam\n\n If this has to be deprecated, do::\n\n spam = DeprecatedImport(\"foo.spam\", \"baz\")\n\n to redirect users to use \"baz\" module instead.\n\n \"\"\"\n\n def __init__(self, old_module_name, new_module_name):\n self._old_name = old_module_name\n self._new_name = new_module_name\n __import__(self._new_name)\n self._mod = sys.modules[self._new_name]\n\n def __dir__(self):\n return dir(self._mod)\n\n def __getattr__(self, name):\n warnings.warn(\"Module %s is deprecated, use %s instead\"\n % (self._old_name, self._new_name),\n DeprecationWarning)\n return getattr(self._mod, name)\n\n\n# copy-pasted from scikit-learn utils/validation.py\ndef check_random_state(seed):\n \"\"\"Turn seed into a np.random.RandomState instance\n\n If seed is None (or np.random), return the RandomState singleton used\n by np.random.\n If seed is an int, return a new RandomState instance seeded with seed.\n If seed is already a RandomState instance, return it.\n If seed is a new-style np.random.Generator, return it.\n Otherwise, raise ValueError.\n \"\"\"\n if seed is None or seed is np.random:\n return np.random.mtrand._rand\n if isinstance(seed, (numbers.Integral, np.integer)):\n return np.random.RandomState(seed)\n if isinstance(seed, np.random.RandomState):\n return seed\n try:\n # Generator is only available in numpy >= 1.17\n if isinstance(seed, np.random.Generator):\n return seed\n except AttributeError:\n pass\n raise ValueError('%r cannot be used to seed a numpy.random.RandomState'\n ' instance' % seed)\n\n\ndef _asarray_validated(a, check_finite=True,\n sparse_ok=False, objects_ok=False, mask_ok=False,\n as_inexact=False):\n \"\"\"\n Helper function for SciPy argument validation.\n\n Many SciPy linear algebra functions do support arbitrary array-like\n input arguments. Examples of commonly unsupported inputs include\n matrices containing inf/nan, sparse matrix representations, and\n matrices with complicated elements.\n\n Parameters\n ----------\n a : array_like\n The array-like input.\n check_finite : bool, optional\n Whether to check that the input matrices contain only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n Default: True\n sparse_ok : bool, optional\n True if scipy sparse matrices are allowed.\n objects_ok : bool, optional\n True if arrays with dype('O') are allowed.\n mask_ok : bool, optional\n True if masked arrays are allowed.\n as_inexact : bool, optional\n True to convert the input array to a np.inexact dtype.\n\n Returns\n -------\n ret : ndarray\n The converted validated array.\n\n \"\"\"\n if not sparse_ok:\n import scipy.sparse\n if scipy.sparse.issparse(a):\n msg = ('Sparse matrices are not supported by this function. '\n 'Perhaps one of the scipy.sparse.linalg functions '\n 'would work instead.')\n raise ValueError(msg)\n if not mask_ok:\n if np.ma.isMaskedArray(a):\n raise ValueError('masked arrays are not supported')\n toarray = np.asarray_chkfinite if check_finite else np.asarray\n a = toarray(a)\n if not objects_ok:\n if a.dtype is np.dtype('O'):\n raise ValueError('object arrays are not supported')\n if as_inexact:\n if not np.issubdtype(a.dtype, np.inexact):\n a = toarray(a, dtype=np.float_)\n return a\n\n\n# Add a replacement for inspect.getfullargspec()/\n# The version below is borrowed from Django,\n# https://github.com/django/django/pull/4846.\n\n# Note an inconsistency between inspect.getfullargspec(func) and\n# inspect.signature(func). If `func` is a bound method, the latter does *not*\n# list `self` as a first argument, while the former *does*.\n# Hence, cook up a common ground replacement: `getfullargspec_no_self` which\n# mimics `inspect.getfullargspec` but does not list `self`.\n#\n# This way, the caller code does not need to know whether it uses a legacy\n# .getfullargspec or a bright and shiny .signature.\n\nFullArgSpec = namedtuple('FullArgSpec',\n ['args', 'varargs', 'varkw', 'defaults',\n 'kwonlyargs', 'kwonlydefaults', 'annotations'])\n\ndef getfullargspec_no_self(func):\n \"\"\"inspect.getfullargspec replacement using inspect.signature.\n\n If func is a bound method, do not list the 'self' parameter.\n\n Parameters\n ----------\n func : callable\n A callable to inspect\n\n Returns\n -------\n fullargspec : FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,\n kwonlydefaults, annotations)\n\n NOTE: if the first argument of `func` is self, it is *not*, I repeat\n *not*, included in fullargspec.args.\n This is done for consistency between inspect.getargspec() under\n Python 2.x, and inspect.signature() under Python 3.x.\n\n \"\"\"\n sig = inspect.signature(func)\n args = [\n p.name for p in sig.parameters.values()\n if p.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD,\n inspect.Parameter.POSITIONAL_ONLY]\n ]\n varargs = [\n p.name for p in sig.parameters.values()\n if p.kind == inspect.Parameter.VAR_POSITIONAL\n ]\n varargs = varargs[0] if varargs else None\n varkw = [\n p.name for p in sig.parameters.values()\n if p.kind == inspect.Parameter.VAR_KEYWORD\n ]\n varkw = varkw[0] if varkw else None\n defaults = tuple(\n p.default for p in sig.parameters.values()\n if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and\n p.default is not p.empty)\n ) or None\n kwonlyargs = [\n p.name for p in sig.parameters.values()\n if p.kind == inspect.Parameter.KEYWORD_ONLY\n ]\n kwdefaults = {p.name: p.default for p in sig.parameters.values()\n if p.kind == inspect.Parameter.KEYWORD_ONLY and\n p.default is not p.empty}\n annotations = {p.name: p.annotation for p in sig.parameters.values()\n if p.annotation is not p.empty}\n return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,\n kwdefaults or None, annotations)\n\n\nclass MapWrapper(object):\n \"\"\"\n Parallelisation wrapper for working with map-like callables, such as\n `multiprocessing.Pool.map`.\n\n Parameters\n ----------\n pool : int or map-like callable\n If `pool` is an integer, then it specifies the number of threads to\n use for parallelization. If ``int(pool) == 1``, then no parallel\n processing is used and the map builtin is used.\n If ``pool == -1``, then the pool will utilize all available CPUs.\n If `pool` is a map-like callable that follows the same\n calling sequence as the built-in map function, then this callable is\n used for parallelization.\n \"\"\"\n def __init__(self, pool=1):\n self.pool = None\n self._mapfunc = map\n self._own_pool = False\n\n if callable(pool):\n self.pool = pool\n self._mapfunc = self.pool\n else:\n from multiprocessing import Pool\n # user supplies a number\n if int(pool) == -1:\n # use as many processors as possible\n self.pool = Pool()\n self._mapfunc = self.pool.map\n self._own_pool = True\n elif int(pool) == 1:\n pass\n elif int(pool) > 1:\n # use the number of processors requested\n self.pool = Pool(processes=int(pool))\n self._mapfunc = self.pool.map\n self._own_pool = True\n else:\n raise RuntimeError(\"Number of workers specified must be -1,\"\n \" an int >= 1, or an object with a 'map' method\")\n\n def __enter__(self):\n return self\n\n def terminate(self):\n if self._own_pool:\n self.pool.terminate()\n\n def join(self):\n if self._own_pool:\n self.pool.join()\n\n def close(self):\n if self._own_pool:\n self.pool.close()\n\n def __exit__(self, exc_type, exc_value, traceback):\n if self._own_pool:\n self.pool.close()\n self.pool.terminate()\n\n def __call__(self, func, iterable):\n # only accept one iterable because that's all Pool.map accepts\n try:\n return self._mapfunc(func, iterable)\n except TypeError as e:\n # wrong number of arguments\n raise TypeError(\"The map-like callable must be of the\"\n \" form f(func, iterable)\") from e\n\n\ndef rng_integers(gen, low, high=None, size=None, dtype='int64',\n endpoint=False):\n \"\"\"\n Return random integers from low (inclusive) to high (exclusive), or if\n endpoint=True, low (inclusive) to high (inclusive). Replaces\n `RandomState.randint` (with endpoint=False) and\n `RandomState.random_integers` (with endpoint=True).\n\n Return random integers from the \"discrete uniform\" distribution of the\n specified dtype. If high is None (the default), then results are from\n 0 to low.\n\n Parameters\n ----------\n gen: {None, np.random.RandomState, np.random.Generator}\n Random number generator. If None, then the np.random.RandomState\n singleton is used.\n low: int or array-like of ints\n Lowest (signed) integers to be drawn from the distribution (unless\n high=None, in which case this parameter is 0 and this value is used\n for high).\n high: int or array-like of ints\n If provided, one above the largest (signed) integer to be drawn from\n the distribution (see above for behavior if high=None). If array-like,\n must contain integer values.\n size: None\n Output shape. If the given shape is, e.g., (m, n, k), then m * n * k\n samples are drawn. Default is None, in which case a single value is\n returned.\n dtype: {str, dtype}, optional\n Desired dtype of the result. All dtypes are determined by their name,\n i.e., 'int64', 'int', etc, so byteorder is not available and a specific\n precision may have different C types depending on the platform.\n The default value is np.int_.\n endpoint: bool, optional\n If True, sample from the interval [low, high] instead of the default\n [low, high) Defaults to False.\n\n Returns\n -------\n out: int or ndarray of ints\n size-shaped array of random integers from the appropriate distribution,\n or a single such random int if size not provided.\n \"\"\"\n if isinstance(gen, Generator):\n return gen.integers(low, high=high, size=size, dtype=dtype,\n endpoint=endpoint)\n else:\n if gen is None:\n # default is RandomState singleton used by np.random.\n gen = np.random.mtrand._rand\n if endpoint:\n # inclusive of endpoint\n # remember that low and high can be arrays, so don't modify in\n # place\n if high is None:\n return gen.randint(low + 1, size=size, dtype=dtype)\n if high is not None:\n return gen.randint(low, high=high + 1, size=size, dtype=dtype)\n\n # exclusive\n return gen.randint(low, high=high, size=size, dtype=dtype)\n", "import numpy as np\nfrom numpy.testing import assert_almost_equal\nimport pytest\n\nfrom statsmodels.datasets import heart\nfrom statsmodels.tools import add_constant\nfrom statsmodels.emplike.aft_el import emplikeAFT\nfrom .results.el_results import AFTRes\n\n\nclass GenRes(object):\n @classmethod\n def setup_class(cls):\n data = heart.load(as_pandas=False)\n endog = np.log10(data.endog)\n exog = add_constant(data.exog)\n cls.mod1 = emplikeAFT(endog, exog, data.censors)\n cls.res1 = cls.mod1.fit()\n cls.res2 = AFTRes()\n\n\nclass Test_AFTModel(GenRes):\n\n def test_params(self):\n assert_almost_equal(self.res1.params(), self.res2.test_params,\n decimal=4)\n\n def test_beta0(self):\n assert_almost_equal(self.res1.test_beta([4], [0]),\n self.res2.test_beta0, decimal=4)\n\n def test_beta1(self):\n assert_almost_equal(self.res1.test_beta([-.04], [1]),\n self.res2.test_beta1, decimal=4)\n\n def test_beta_vect(self):\n assert_almost_equal(self.res1.test_beta([3.5, -.035], [0, 1]),\n self.res2.test_joint, decimal=4)\n\n @pytest.mark.slow\n def test_betaci(self):\n ci = self.res1.ci_beta(1, -.06, 0)\n ll = ci[0]\n ul = ci[1]\n ll_pval = self.res1.test_beta([ll], [1])[1]\n ul_pval = self.res1.test_beta([ul], [1])[1]\n assert_almost_equal(ul_pval, .050000, decimal=4)\n assert_almost_equal(ll_pval, .05000, decimal=4)\n", "# Author: Travis Oliphant\n# 1999 -- 2002\n\nimport operator\nimport math\nimport timeit\nfrom scipy.spatial import cKDTree\nfrom . import sigtools, dlti\nfrom ._upfirdn import upfirdn, _output_len, _upfirdn_modes\nfrom scipy import linalg, fft as sp_fft\nfrom scipy.fft._helper import _init_nd_shape_and_axes\nfrom scipy._lib._util import prod as _prod\nimport numpy as np\nfrom scipy.special import lambertw\nfrom .windows import get_window\nfrom ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext\nfrom .filter_design import cheby1, _validate_sos\nfrom .fir_filter_design import firwin\nfrom ._sosfilt import _sosfilt\nimport warnings\n\n\n__all__ = ['correlate', 'correlation_lags', 'correlate2d',\n 'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve',\n 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',\n 'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',\n 'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',\n 'residuez', 'resample', 'resample_poly', 'detrend',\n 'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',\n 'filtfilt', 'decimate', 'vectorstrength']\n\n\n_modedict = {'valid': 0, 'same': 1, 'full': 2}\n\n_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,\n 'symmetric': 1, 'reflect': 4}\n\n\ndef _valfrommode(mode):\n try:\n return _modedict[mode]\n except KeyError as e:\n raise ValueError(\"Acceptable mode flags are 'valid',\"\n \" 'same', or 'full'.\") from e\n\n\ndef _bvalfromboundary(boundary):\n try:\n return _boundarydict[boundary] << 2\n except KeyError as e:\n raise ValueError(\"Acceptable boundary flags are 'fill', 'circular' \"\n \"(or 'wrap'), and 'symmetric' (or 'symm').\") from e\n\n\ndef _inputs_swap_needed(mode, shape1, shape2, axes=None):\n \"\"\"Determine if inputs arrays need to be swapped in `\"valid\"` mode.\n\n If in `\"valid\"` mode, returns whether or not the input arrays need to be\n swapped depending on whether `shape1` is at least as large as `shape2` in\n every calculated dimension.\n\n This is important for some of the correlation and convolution\n implementations in this module, where the larger array input needs to come\n before the smaller array input when operating in this mode.\n\n Note that if the mode provided is not 'valid', False is immediately\n returned.\n\n \"\"\"\n if mode != 'valid':\n return False\n\n if not shape1:\n return False\n\n if axes is None:\n axes = range(len(shape1))\n\n ok1 = all(shape1[i] >= shape2[i] for i in axes)\n ok2 = all(shape2[i] >= shape1[i] for i in axes)\n\n if not (ok1 or ok2):\n raise ValueError(\"For 'valid' mode, one must be at least \"\n \"as large as the other in every dimension\")\n\n return not ok1\n\n\ndef correlate(in1, in2, mode='full', method='auto'):\n r\"\"\"\n Cross-correlate two N-dimensional arrays.\n\n Cross-correlate `in1` and `in2`, with the output size determined by the\n `mode` argument.\n\n Parameters\n ----------\n in1 : array_like\n First input.\n in2 : array_like\n Second input. Should have the same number of dimensions as `in1`.\n mode : str {'full', 'valid', 'same'}, optional\n A string indicating the size of the output:\n\n ``full``\n The output is the full discrete linear cross-correlation\n of the inputs. (Default)\n ``valid``\n The output consists only of those elements that do not\n rely on the zero-padding. In 'valid' mode, either `in1` or `in2`\n must be at least as large as the other in every dimension.\n ``same``\n The output is the same size as `in1`, centered\n with respect to the 'full' output.\n method : str {'auto', 'direct', 'fft'}, optional\n A string indicating which method to use to calculate the correlation.\n\n ``direct``\n The correlation is determined directly from sums, the definition of\n correlation.\n ``fft``\n The Fast Fourier Transform is used to perform the correlation more\n quickly (only available for numerical arrays.)\n ``auto``\n Automatically chooses direct or Fourier method based on an estimate\n of which is faster (default). See `convolve` Notes for more detail.\n\n .. versionadded:: 0.19.0\n\n Returns\n -------\n correlate : array\n An N-dimensional array containing a subset of the discrete linear\n cross-correlation of `in1` with `in2`.\n\n See Also\n --------\n choose_conv_method : contains more documentation on `method`.\n correlation_lags : calculates the lag / displacement indices array for 1D\n cross-correlation.\n\n Notes\n -----\n The correlation z of two d-dimensional arrays x and y is defined as::\n\n z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...])\n\n This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')``\n then\n\n .. math::\n\n z[k] = (x * y)(k - N + 1)\n = \\sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*}\n\n for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2`\n\n where :math:`||x||` is the length of ``x``, :math:`N = \\max(||x||,||y||)`,\n and :math:`y_m` is 0 when m is outside the range of y.\n\n ``method='fft'`` only works for numerical arrays as it relies on\n `fftconvolve`. In certain cases (i.e., arrays of objects or when\n rounding integers can lose precision), ``method='direct'`` is always used.\n\n When using \"same\" mode with even-length inputs, the outputs of `correlate`\n and `correlate2d` differ: There is a 1-index offset between them.\n\n Examples\n --------\n Implement a matched filter using cross-correlation, to recover a signal\n that has passed through a noisy channel.\n\n >>> from scipy import signal\n >>> import matplotlib.pyplot as plt\n\n >>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)\n >>> sig_noise = sig + np.random.randn(len(sig))\n >>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128\n\n >>> clock = np.arange(64, len(sig), 128)\n >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)\n >>> ax_orig.plot(sig)\n >>> ax_orig.plot(clock, sig[clock], 'ro')\n >>> ax_orig.set_title('Original signal')\n >>> ax_noise.plot(sig_noise)\n >>> ax_noise.set_title('Signal with noise')\n >>> ax_corr.plot(corr)\n >>> ax_corr.plot(clock, corr[clock], 'ro')\n >>> ax_corr.axhline(0.5, ls=':')\n >>> ax_corr.set_title('Cross-correlated with rectangular pulse')\n >>> ax_orig.margins(0, 0.1)\n >>> fig.tight_layout()\n >>> plt.show()\n\n Compute the cross-correlation of a noisy signal with the original signal.\n\n >>> x = np.arange(128) / 128\n >>> sig = np.sin(2 * np.pi * x)\n >>> sig_noise = sig + np.random.randn(len(sig))\n >>> corr = signal.correlate(sig_noise, sig)\n >>> lags = signal.correlation_lags(len(sig), len(sig_noise))\n >>> corr /= np.max(corr)\n\n >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, figsize=(4.8, 4.8))\n >>> ax_orig.plot(sig)\n >>> ax_orig.set_title('Original signal')\n >>> ax_orig.set_xlabel('Sample Number')\n >>> ax_noise.plot(sig_noise)\n >>> ax_noise.set_title('Signal with noise')\n >>> ax_noise.set_xlabel('Sample Number')\n >>> ax_corr.plot(lags, corr)\n >>> ax_corr.set_title('Cross-correlated signal')\n >>> ax_corr.set_xlabel('Lag')\n >>> ax_orig.margins(0, 0.1)\n >>> ax_noise.margins(0, 0.1)\n >>> ax_corr.margins(0, 0.1)\n >>> fig.tight_layout()\n >>> plt.show()\n \"\"\"\n in1 = np.asarray(in1)\n in2 = np.asarray(in2)\n\n if in1.ndim == in2.ndim == 0:\n return in1 * in2.conj()\n elif in1.ndim != in2.ndim:\n raise ValueError(\"in1 and in2 should have the same dimensionality\")\n\n # Don't use _valfrommode, since correlate should not accept numeric modes\n try:\n val = _modedict[mode]\n except KeyError as e:\n raise ValueError(\"Acceptable mode flags are 'valid',\"\n \" 'same', or 'full'.\") from e\n\n # this either calls fftconvolve or this function with method=='direct'\n if method in ('fft', 'auto'):\n return convolve(in1, _reverse_and_conj(in2), mode, method)\n\n elif method == 'direct':\n # fastpath to faster numpy.correlate for 1d inputs when possible\n if _np_conv_ok(in1, in2, mode):\n return np.correlate(in1, in2, mode)\n\n # _correlateND is far slower when in2.size > in1.size, so swap them\n # and then undo the effect afterward if mode == 'full'. Also, it fails\n # with 'valid' mode if in2 is larger than in1, so swap those, too.\n # Don't swap inputs for 'same' mode, since shape of in1 matters.\n swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or\n _inputs_swap_needed(mode, in1.shape, in2.shape))\n\n if swapped_inputs:\n in1, in2 = in2, in1\n\n if mode == 'valid':\n ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]\n out = np.empty(ps, in1.dtype)\n\n z = sigtools._correlateND(in1, in2, out, val)\n\n else:\n ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]\n\n # zero pad input\n in1zpadded = np.zeros(ps, in1.dtype)\n sc = tuple(slice(0, i) for i in in1.shape)\n in1zpadded[sc] = in1.copy()\n\n if mode == 'full':\n out = np.empty(ps, in1.dtype)\n elif mode == 'same':\n out = np.empty(in1.shape, in1.dtype)\n\n z = sigtools._correlateND(in1zpadded, in2, out, val)\n\n if swapped_inputs:\n # Reverse and conjugate to undo the effect of swapping inputs\n z = _reverse_and_conj(z)\n\n return z\n\n else:\n raise ValueError(\"Acceptable method flags are 'auto',\"\n \" 'direct', or 'fft'.\")\n\n\ndef correlation_lags(in1_len, in2_len, mode='full'):\n r\"\"\"\n Calculates the lag / displacement indices array for 1D cross-correlation.\n\n Parameters\n ----------\n in1_size : int\n First input size.\n in2_size : int\n Second input size.\n mode : str {'full', 'valid', 'same'}, optional\n A string indicating the size of the output.\n See the documentation `correlate` for more information.\n\n See Also\n --------\n correlate : Compute the N-dimensional cross-correlation.\n\n Returns\n -------\n lags : array\n Returns an array containing cross-correlation lag/displacement indices.\n Indices can be indexed with the np.argmax of the correlation to return\n the lag/displacement.\n\n Notes\n -----\n Cross-correlation for continuous functions :math:`f` and :math:`g` is\n defined as:\n\n .. math ::\n\n \\left ( f\\star g \\right )\\left ( \\tau \\right )\n \\triangleq \\int_{t_0}^{t_0 +T}\n \\overline{f\\left ( t \\right )}g\\left ( t+\\tau \\right )dt\n\n Where :math:`\\tau` is defined as the displacement, also known as the lag.\n\n Cross correlation for discrete functions :math:`f` and :math:`g` is\n defined as:\n\n .. math ::\n \\left ( f\\star g \\right )\\left [ n \\right ]\n \\triangleq \\sum_{-\\infty}^{\\infty}\n \\overline{f\\left [ m \\right ]}g\\left [ m+n \\right ]\n\n Where :math:`n` is the lag.\n\n Examples\n --------\n Cross-correlation of a signal with its time-delayed self.\n\n >>> from scipy import signal\n >>> rng = np.random.RandomState(0)\n >>> x = rng.standard_normal(1000)\n >>> y = np.concatenate([rng.standard_normal(100), x])\n >>> correlation = signal.correlate(x, y, mode=\"full\")\n >>> lags = signal.correlation_lags(x.size, y.size, mode=\"full\")\n >>> lag = lags[np.argmax(correlation)]\n \"\"\"\n\n # calculate lag ranges in different modes of operation\n if mode == \"full\":\n # the output is the full discrete linear convolution\n # of the inputs. (Default)\n lags = np.arange(-in2_len + 1, in1_len)\n elif mode == \"same\":\n # the output is the same size as `in1`, centered\n # with respect to the 'full' output.\n # calculate the full output\n lags = np.arange(-in2_len + 1, in1_len)\n # determine the midpoint in the full output\n mid = lags.size // 2\n # determine lag_bound to be used with respect\n # to the midpoint\n lag_bound = in1_len // 2\n # calculate lag ranges for even and odd scenarios\n if in1_len % 2 == 0:\n lags = lags[(mid-lag_bound):(mid+lag_bound)]\n else:\n lags = lags[(mid-lag_bound):(mid+lag_bound)+1]\n elif mode == \"valid\":\n # the output consists only of those elements that do not\n # rely on the zero-padding. In 'valid' mode, either `in1` or `in2`\n # must be at least as large as the other in every dimension.\n\n # the lag_bound will be either negative or positive\n # this let's us infer how to present the lag range\n lag_bound = in1_len - in2_len\n if lag_bound >= 0:\n lags = np.arange(lag_bound + 1)\n else:\n lags = np.arange(lag_bound, 1)\n return lags\n\n\ndef _centered(arr, newshape):\n # Return the center newshape portion of the array.\n newshape = np.asarray(newshape)\n currshape = np.array(arr.shape)\n startind = (currshape - newshape) // 2\n endind = startind + newshape\n myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]\n return arr[tuple(myslice)]\n\n\ndef _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False):\n \"\"\"Handle the axes argument for frequency-domain convolution.\n\n Returns the inputs and axes in a standard form, eliminating redundant axes,\n swapping the inputs if necessary, and checking for various potential\n errors.\n\n Parameters\n ----------\n in1 : array\n First input.\n in2 : array\n Second input.\n mode : str {'full', 'valid', 'same'}, optional\n A string indicating the size of the output.\n See the documentation `fftconvolve` for more information.\n axes : list of ints\n Axes over which to compute the FFTs.\n sorted_axes : bool, optional\n If `True`, sort the axes.\n Default is `False`, do not sort.\n\n Returns\n -------\n in1 : array\n The first input, possible swapped with the second input.\n in2 : array\n The second input, possible swapped with the first input.\n axes : list of ints\n Axes over which to compute the FFTs.\n\n \"\"\"\n s1 = in1.shape\n s2 = in2.shape\n noaxes = axes is None\n\n _, axes = _init_nd_shape_and_axes(in1, shape=None, axes=axes)\n\n if not noaxes and not len(axes):\n raise ValueError(\"when provided, axes cannot be empty\")\n\n # Axes of length 1 can rely on broadcasting rules for multipy,\n # no fft needed.\n axes = [a for a in axes if s1[a] != 1 and s2[a] != 1]\n\n if sorted_axes:\n axes.sort()\n\n if not all(s1[a] == s2[a] or s1[a] == 1 or s2[a] == 1\n for a in range(in1.ndim) if a not in axes):\n raise ValueError(\"incompatible shapes for in1 and in2:\"\n \" {0} and {1}\".format(s1, s2))\n\n # Check that input sizes are compatible with 'valid' mode.\n if _inputs_swap_needed(mode, s1, s2, axes=axes):\n # Convolution is commutative; order doesn't have any effect on output.\n in1, in2 = in2, in1\n\n return in1, in2, axes\n\n\ndef _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=False):\n \"\"\"Convolve two arrays in the frequency domain.\n\n This function implements only base the FFT-related operations.\n Specifically, it converts the signals to the frequency domain, multiplies\n them, then converts them back to the time domain. Calculations of axes,\n shapes, convolution mode, etc. are implemented in higher level-functions,\n such as `fftconvolve` and `oaconvolve`. Those functions should be used\n instead of this one.\n\n Parameters\n ----------\n in1 : array_like\n First input.\n in2 : array_like\n Second input. Should have the same number of dimensions as `in1`.\n axes : array_like of ints\n Axes over which to compute the FFTs.\n shape : array_like of ints\n The sizes of the FFTs.\n calc_fast_len : bool, optional\n If `True`, set each value of `shape` to the next fast FFT length.\n Default is `False`, use `axes` as-is.\n\n Returns\n -------\n out : array\n An N-dimensional array containing the discrete linear convolution of\n `in1` with `in2`.\n\n \"\"\"\n if not len(axes):\n return in1 * in2\n\n complex_result = (in1.dtype.kind == 'c' or in2.dtype.kind == 'c')\n\n if calc_fast_len:\n # Speed up FFT by padding to optimal size.\n fshape = [\n sp_fft.next_fast_len(shape[a], not complex_result) for a in axes]\n else:\n fshape = shape\n\n if not complex_result:\n fft, ifft = sp_fft.rfftn, sp_fft.irfftn\n else:\n fft, ifft = sp_fft.fftn, sp_fft.ifftn\n\n sp1 = fft(in1, fshape, axes=axes)\n sp2 = fft(in2, fshape, axes=axes)\n\n ret = ifft(sp1 * sp2, fshape, axes=axes)\n\n if calc_fast_len:\n fslice = tuple([slice(sz) for sz in shape])\n ret = ret[fslice]\n\n return ret\n\n\ndef _apply_conv_mode(ret, s1, s2, mode, axes):\n \"\"\"Calculate the convolution result shape based on the `mode` argument.\n\n Returns the result sliced to the correct size for the given mode.\n\n Parameters\n ----------\n ret : array\n The result array, with the appropriate shape for the 'full' mode.\n s1 : list of int\n The shape of the first input.\n s2 : list of int\n The shape of the second input.\n mode : str {'full', 'valid', 'same'}\n A string indicating the size of the output.\n See the documentation `fftconvolve` for more information.\n axes : list of ints\n Axes over which to compute the convolution.\n\n Returns\n -------\n ret : array\n A copy of `res`, sliced to the correct size for the given `mode`.\n\n \"\"\"\n if mode == \"full\":\n return ret.copy()\n elif mode == \"same\":\n return _centered(ret, s1).copy()\n elif mode == \"valid\":\n shape_valid = [ret.shape[a] if a not in axes else s1[a] - s2[a] + 1\n for a in range(ret.ndim)]\n return _centered(ret, shape_valid).copy()\n else:\n raise ValueError(\"acceptable mode flags are 'valid',\"\n \" 'same', or 'full'\")\n\n\ndef fftconvolve(in1, in2, mode=\"full\", axes=None):\n \"\"\"Convolve two N-dimensional arrays using FFT.\n\n Convolve `in1` and `in2` using the fast Fourier transform method, with\n the output size determined by the `mode` argument.\n\n This is generally much faster than `convolve` for large arrays (n > ~500),\n but can be slower when only a few output values are needed, and can only\n output float arrays (int or object array inputs will be cast to float).\n\n As of v0.19, `convolve` automatically chooses this method or the direct\n method based on an estimation of which is faster.\n\n Parameters\n ----------\n in1 : array_like\n First input.\n in2 : array_like\n Second input. Should have the same number of dimensions as `in1`.\n mode : str {'full', 'valid', 'same'}, optional\n A string indicating the size of the output:\n\n ``full``\n The output is the full discrete linear convolution\n of the inputs. (Default)\n ``valid``\n The output consists only of those elements that do not\n rely on the zero-padding. In 'valid' mode, either `in1` or `in2`\n must be at least as large as the other in every dimension.\n ``same``\n The output is the same size as `in1`, centered\n with respect to the 'full' output.\n axes : int or array_like of ints or None, optional\n Axes over which to compute the convolution.\n The default is over all axes.\n\n Returns\n -------\n out : array\n An N-dimensional array containing a subset of the discrete linear\n convolution of `in1` with `in2`.\n\n See Also\n --------\n convolve : Uses the direct convolution or FFT convolution algorithm\n depending on which is faster.\n oaconvolve : Uses the overlap-add method to do convolution, which is\n generally faster when the input arrays are large and\n significantly different in size.\n\n Examples\n --------\n Autocorrelation of white noise is an impulse.\n\n >>> from scipy import signal\n >>> sig = np.random.randn(1000)\n >>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')\n\n >>> import matplotlib.pyplot as plt\n >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)\n >>> ax_orig.plot(sig)\n >>> ax_orig.set_title('White noise')\n >>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)\n >>> ax_mag.set_title('Autocorrelation')\n >>> fig.tight_layout()\n >>> fig.show()\n\n Gaussian blur implemented using FFT convolution. Notice the dark borders\n around the image, due to the zero-padding beyond its boundaries.\n The `convolve2d` function allows for other types of image boundaries,\n but is far slower.\n\n >>> from scipy import misc\n >>> face = misc.face(gray=True)\n >>> kernel = np.outer(signal.windows.gaussian(70, 8),\n ... signal.windows.gaussian(70, 8))\n >>> blurred = signal.fftconvolve(face, kernel, mode='same')\n\n >>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1,\n ... figsize=(6, 15))\n >>> ax_orig.imshow(face, cmap='gray')\n >>> ax_orig.set_title('Original')\n >>> ax_orig.set_axis_off()\n >>> ax_kernel.imshow(kernel, cmap='gray')\n >>> ax_kernel.set_title('Gaussian kernel')\n >>> ax_kernel.set_axis_off()\n >>> ax_blurred.imshow(blurred, cmap='gray')\n >>> ax_blurred.set_title('Blurred')\n >>> ax_blurred.set_axis_off()\n >>> fig.show()\n \"\"\"\n in1 = np.asarray(in1)\n in2 = np.asarray(in2)\n\n if in1.ndim == in2.ndim == 0: # scalar inputs\n return in1 * in2\n elif in1.ndim != in2.ndim:\n raise ValueError(\"in1 and in2 should have the same dimensionality\")\n elif in1.size == 0 or in2.size == 0: # empty arrays\n return np.array([])\n\n in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes,\n sorted_axes=False)\n\n s1 = in1.shape\n s2 = in2.shape\n\n shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1\n for i in range(in1.ndim)]\n\n ret = _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=True)\n\n return _apply_conv_mode(ret, s1, s2, mode, axes)\n\n\ndef _calc_oa_lens(s1, s2):\n \"\"\"Calculate the optimal FFT lengths for overlapp-add convolution.\n\n The calculation is done for a single dimension.\n\n Parameters\n ----------\n s1 : int\n Size of the dimension for the first array.\n s2 : int\n Size of the dimension for the second array.\n\n Returns\n -------\n block_size : int\n The size of the FFT blocks.\n overlap : int\n The amount of overlap between two blocks.\n in1_step : int\n The size of each step for the first array.\n in2_step : int\n The size of each step for the first array.\n\n \"\"\"\n # Set up the arguments for the conventional FFT approach.\n fallback = (s1+s2-1, None, s1, s2)\n\n # Use conventional FFT convolve if sizes are same.\n if s1 == s2 or s1 == 1 or s2 == 1:\n return fallback\n\n if s2 > s1:\n s1, s2 = s2, s1\n swapped = True\n else:\n swapped = False\n\n # There cannot be a useful block size if s2 is more than half of s1.\n if s2 >= s1/2:\n return fallback\n\n # Derivation of optimal block length\n # For original formula see:\n # https://en.wikipedia.org/wiki/Overlap-add_method\n #\n # Formula:\n # K = overlap = s2-1\n # N = block_size\n # C = complexity\n # e = exponential, exp(1)\n #\n # C = (N*(log2(N)+1))/(N-K)\n # C = (N*log2(2N))/(N-K)\n # C = N/(N-K) * log2(2N)\n # C1 = N/(N-K)\n # C2 = log2(2N) = ln(2N)/ln(2)\n #\n # dC1/dN = (1*(N-K)-N)/(N-K)^2 = -K/(N-K)^2\n # dC2/dN = 2/(2*N*ln(2)) = 1/(N*ln(2))\n #\n # dC/dN = dC1/dN*C2 + dC2/dN*C1\n # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + N/(N*ln(2)*(N-K))\n # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + 1/(ln(2)*(N-K))\n # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + (N-K)/(ln(2)*(N-K)^2)\n # dC/dN = (-K*ln(2N) + (N-K)/(ln(2)*(N-K)^2)\n # dC/dN = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2)\n #\n # Solve for minimum, where dC/dN = 0\n # 0 = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2)\n # 0 * ln(2)*(N-K)^2 = N - K*ln(2N) - K\n # 0 = N - K*ln(2N) - K\n # 0 = N - K*(ln(2N) + 1)\n # 0 = N - K*ln(2Ne)\n # N = K*ln(2Ne)\n # N/K = ln(2Ne)\n #\n # e^(N/K) = e^ln(2Ne)\n # e^(N/K) = 2Ne\n # 1/e^(N/K) = 1/(2*N*e)\n # e^(N/-K) = 1/(2*N*e)\n # e^(N/-K) = K/N*1/(2*K*e)\n # N/K*e^(N/-K) = 1/(2*e*K)\n # N/-K*e^(N/-K) = -1/(2*e*K)\n #\n # Using Lambert W function\n # https://en.wikipedia.org/wiki/Lambert_W_function\n # x = W(y) It is the solution to y = x*e^x\n # x = N/-K\n # y = -1/(2*e*K)\n #\n # N/-K = W(-1/(2*e*K))\n #\n # N = -K*W(-1/(2*e*K))\n overlap = s2-1\n opt_size = -overlap*lambertw(-1/(2*math.e*overlap), k=-1).real\n block_size = sp_fft.next_fast_len(math.ceil(opt_size))\n\n # Use conventional FFT convolve if there is only going to be one block.\n if block_size >= s1:\n return fallback\n\n if not swapped:\n in1_step = block_size-s2+1\n in2_step = s2\n else:\n in1_step = s2\n in2_step = block_size-s2+1\n\n return block_size, overlap, in1_step, in2_step\n\n\ndef oaconvolve(in1, in2, mode=\"full\", axes=None):\n \"\"\"Convolve two N-dimensional arrays using the overlap-add method.\n\n Convolve `in1` and `in2` using the overlap-add method, with\n the output size determined by the `mode` argument.\n\n This is generally much faster than `convolve` for large arrays (n > ~500),\n and generally much faster than `fftconvolve` when one array is much\n larger than the other, but can be slower when only a few output values are\n needed or when the arrays are very similar in shape, and can only\n output float arrays (int or object array inputs will be cast to float).\n\n Parameters\n ----------\n in1 : array_like\n First input.\n in2 : array_like\n Second input. Should have the same number of dimensions as `in1`.\n mode : str {'full', 'valid', 'same'}, optional\n A string indicating the size of the output:\n\n ``full``\n The output is the full discrete linear convolution\n of the inputs. (Default)\n ``valid``\n The output consists only of those elements that do not\n rely on the zero-padding. In 'valid' mode, either `in1` or `in2`\n must be at least as large as the other in every dimension.\n ``same``\n The output is the same size as `in1`, centered\n with respect to the 'full' output.\n axes : int or array_like of ints or None, optional\n Axes over which to compute the convolution.\n The default is over all axes.\n\n Returns\n -------\n out : array\n An N-dimensional array containing a subset of the discrete linear\n convolution of `in1` with `in2`.\n\n See Also\n --------\n convolve : Uses the direct convolution or FFT convolution algorithm\n depending on which is faster.\n fftconvolve : An implementation of convolution using FFT.\n\n Notes\n -----\n .. versionadded:: 1.4.0\n\n Examples\n --------\n Convolve a 100,000 sample signal with a 512-sample filter.\n\n >>> from scipy import signal\n >>> sig = np.random.randn(100000)\n >>> filt = signal.firwin(512, 0.01)\n >>> fsig = signal.oaconvolve(sig, filt)\n\n >>> import matplotlib.pyplot as plt\n >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)\n >>> ax_orig.plot(sig)\n >>> ax_orig.set_title('White noise')\n >>> ax_mag.plot(fsig)\n >>> ax_mag.set_title('Filtered noise')\n >>> fig.tight_layout()\n >>> fig.show()\n\n References\n ----------\n .. [1] Wikipedia, \"Overlap-add_method\".\n https://en.wikipedia.org/wiki/Overlap-add_method\n .. [2] Richard G. Lyons. Understanding Digital Signal Processing,\n Third Edition, 2011. Chapter 13.10.\n ISBN 13: 978-0137-02741-5\n\n \"\"\"\n in1 = np.asarray(in1)\n in2 = np.asarray(in2)\n\n if in1.ndim == in2.ndim == 0: # scalar inputs\n return in1 * in2\n elif in1.ndim != in2.ndim:\n raise ValueError(\"in1 and in2 should have the same dimensionality\")\n elif in1.size == 0 or in2.size == 0: # empty arrays\n return np.array([])\n elif in1.shape == in2.shape: # Equivalent to fftconvolve\n return fftconvolve(in1, in2, mode=mode, axes=axes)\n\n in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes,\n sorted_axes=True)\n\n s1 = in1.shape\n s2 = in2.shape\n\n if not axes:\n ret = in1 * in2\n return _apply_conv_mode(ret, s1, s2, mode, axes)\n\n # Calculate this now since in1 is changed later\n shape_final = [None if i not in axes else\n s1[i] + s2[i] - 1 for i in range(in1.ndim)]\n\n # Calculate the block sizes for the output, steps, first and second inputs.\n # It is simpler to calculate them all together than doing them in separate\n # loops due to all the special cases that need to be handled.\n optimal_sizes = ((-1, -1, s1[i], s2[i]) if i not in axes else\n _calc_oa_lens(s1[i], s2[i]) for i in range(in1.ndim))\n block_size, overlaps, \\\n in1_step, in2_step = zip(*optimal_sizes)\n\n # Fall back to fftconvolve if there is only one block in every dimension.\n if in1_step == s1 and in2_step == s2:\n return fftconvolve(in1, in2, mode=mode, axes=axes)\n\n # Figure out the number of steps and padding.\n # This would get too complicated in a list comprehension.\n nsteps1 = []\n nsteps2 = []\n pad_size1 = []\n pad_size2 = []\n for i in range(in1.ndim):\n if i not in axes:\n pad_size1 += [(0, 0)]\n pad_size2 += [(0, 0)]\n continue\n\n if s1[i] > in1_step[i]:\n curnstep1 = math.ceil((s1[i]+1)/in1_step[i])\n if (block_size[i] - overlaps[i])*curnstep1 < shape_final[i]:\n curnstep1 += 1\n\n curpad1 = curnstep1*in1_step[i] - s1[i]\n else:\n curnstep1 = 1\n curpad1 = 0\n\n if s2[i] > in2_step[i]:\n curnstep2 = math.ceil((s2[i]+1)/in2_step[i])\n if (block_size[i] - overlaps[i])*curnstep2 < shape_final[i]:\n curnstep2 += 1\n\n curpad2 = curnstep2*in2_step[i] - s2[i]\n else:\n curnstep2 = 1\n curpad2 = 0\n\n nsteps1 += [curnstep1]\n nsteps2 += [curnstep2]\n pad_size1 += [(0, curpad1)]\n pad_size2 += [(0, curpad2)]\n\n # Pad the array to a size that can be reshaped to the desired shape\n # if necessary.\n if not all(curpad == (0, 0) for curpad in pad_size1):\n in1 = np.pad(in1, pad_size1, mode='constant', constant_values=0)\n\n if not all(curpad == (0, 0) for curpad in pad_size2):\n in2 = np.pad(in2, pad_size2, mode='constant', constant_values=0)\n\n # Reshape the overlap-add parts to input block sizes.\n split_axes = [iax+i for i, iax in enumerate(axes)]\n fft_axes = [iax+1 for iax in split_axes]\n\n # We need to put each new dimension before the corresponding dimension\n # being reshaped in order to get the data in the right layout at the end.\n reshape_size1 = list(in1_step)\n reshape_size2 = list(in2_step)\n for i, iax in enumerate(split_axes):\n reshape_size1.insert(iax, nsteps1[i])\n reshape_size2.insert(iax, nsteps2[i])\n\n in1 = in1.reshape(*reshape_size1)\n in2 = in2.reshape(*reshape_size2)\n\n # Do the convolution.\n fft_shape = [block_size[i] for i in axes]\n ret = _freq_domain_conv(in1, in2, fft_axes, fft_shape, calc_fast_len=False)\n\n # Do the overlap-add.\n for ax, ax_fft, ax_split in zip(axes, fft_axes, split_axes):\n overlap = overlaps[ax]\n if overlap is None:\n continue\n\n ret, overpart = np.split(ret, [-overlap], ax_fft)\n overpart = np.split(overpart, [-1], ax_split)[0]\n\n ret_overpart = np.split(ret, [overlap], ax_fft)[0]\n ret_overpart = np.split(ret_overpart, [1], ax_split)[1]\n ret_overpart += overpart\n\n # Reshape back to the correct dimensionality.\n shape_ret = [ret.shape[i] if i not in fft_axes else\n ret.shape[i]*ret.shape[i-1]\n for i in range(ret.ndim) if i not in split_axes]\n ret = ret.reshape(*shape_ret)\n\n # Slice to the correct size.\n slice_final = tuple([slice(islice) for islice in shape_final])\n ret = ret[slice_final]\n\n return _apply_conv_mode(ret, s1, s2, mode, axes)\n\n\ndef _numeric_arrays(arrays, kinds='buifc'):\n \"\"\"\n See if a list of arrays are all numeric.\n\n Parameters\n ----------\n ndarrays : array or list of arrays\n arrays to check if numeric.\n numeric_kinds : string-like\n The dtypes of the arrays to be checked. If the dtype.kind of\n the ndarrays are not in this string the function returns False and\n otherwise returns True.\n \"\"\"\n if type(arrays) == np.ndarray:\n return arrays.dtype.kind in kinds\n for array_ in arrays:\n if array_.dtype.kind not in kinds:\n return False\n return True\n\n\ndef _conv_ops(x_shape, h_shape, mode):\n \"\"\"\n Find the number of operations required for direct/fft methods of\n convolution. The direct operations were recorded by making a dummy class to\n record the number of operations by overriding ``__mul__`` and ``__add__``.\n The FFT operations rely on the (well-known) computational complexity of the\n FFT (and the implementation of ``_freq_domain_conv``).\n\n \"\"\"\n if mode == \"full\":\n out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)]\n elif mode == \"valid\":\n out_shape = [abs(n - k) + 1 for n, k in zip(x_shape, h_shape)]\n elif mode == \"same\":\n out_shape = x_shape\n else:\n raise ValueError(\"Acceptable mode flags are 'valid',\"\n \" 'same', or 'full', not mode={}\".format(mode))\n\n s1, s2 = x_shape, h_shape\n if len(x_shape) == 1:\n s1, s2 = s1[0], s2[0]\n if mode == \"full\":\n direct_ops = s1 * s2\n elif mode == \"valid\":\n direct_ops = (s2 - s1 + 1) * s1 if s2 >= s1 else (s1 - s2 + 1) * s2\n elif mode == \"same\":\n direct_ops = (s1 * s2 if s1 < s2 else\n s1 * s2 - (s2 // 2) * ((s2 + 1) // 2))\n else:\n if mode == \"full\":\n direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)\n elif mode == \"valid\":\n direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)\n elif mode == \"same\":\n direct_ops = _prod(s1) * _prod(s2)\n\n full_out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)]\n N = _prod(full_out_shape)\n fft_ops = 3 * N * np.log(N) # 3 separate FFTs of size full_out_shape\n return fft_ops, direct_ops\n\n\ndef _fftconv_faster(x, h, mode):\n \"\"\"\n See if using fftconvolve or convolve is faster.\n\n Parameters\n ----------\n x : np.ndarray\n Signal\n h : np.ndarray\n Kernel\n mode : str\n Mode passed to convolve\n\n Returns\n -------\n fft_faster : bool\n\n Notes\n -----\n See docstring of `choose_conv_method` for details on tuning hardware.\n\n See pull request 11031 for more detail:\n https://github.com/scipy/scipy/pull/11031.\n\n \"\"\"\n fft_ops, direct_ops = _conv_ops(x.shape, h.shape, mode)\n offset = -1e-3 if x.ndim == 1 else -1e-4\n constants = {\n \"valid\": (1.89095737e-9, 2.1364985e-10, offset),\n \"full\": (1.7649070e-9, 2.1414831e-10, offset),\n \"same\": (3.2646654e-9, 2.8478277e-10, offset)\n if h.size <= x.size\n else (3.21635404e-9, 1.1773253e-8, -1e-5),\n } if x.ndim == 1 else {\n \"valid\": (1.85927e-9, 2.11242e-8, offset),\n \"full\": (1.99817e-9, 1.66174e-8, offset),\n \"same\": (2.04735e-9, 1.55367e-8, offset),\n }\n O_fft, O_direct, O_offset = constants[mode]\n return O_fft * fft_ops < O_direct * direct_ops + O_offset\n\n\ndef _reverse_and_conj(x):\n \"\"\"\n Reverse array `x` in all dimensions and perform the complex conjugate\n \"\"\"\n reverse = (slice(None, None, -1),) * x.ndim\n return x[reverse].conj()\n\n\ndef _np_conv_ok(volume, kernel, mode):\n \"\"\"\n See if numpy supports convolution of `volume` and `kernel` (i.e. both are\n 1D ndarrays and of the appropriate shape). NumPy's 'same' mode uses the\n size of the larger input, while SciPy's uses the size of the first input.\n\n Invalid mode strings will return False and be caught by the calling func.\n \"\"\"\n if volume.ndim == kernel.ndim == 1:\n if mode in ('full', 'valid'):\n return True\n elif mode == 'same':\n return volume.size >= kernel.size\n else:\n return False\n\n\ndef _timeit_fast(stmt=\"pass\", setup=\"pass\", repeat=3):\n \"\"\"\n Returns the time the statement/function took, in seconds.\n\n Faster, less precise version of IPython's timeit. `stmt` can be a statement\n written as a string or a callable.\n\n Will do only 1 loop (like IPython's timeit) with no repetitions\n (unlike IPython) for very slow functions. For fast functions, only does\n enough loops to take 5 ms, which seems to produce similar results (on\n Windows at least), and avoids doing an extraneous cycle that isn't\n measured.\n\n \"\"\"\n timer = timeit.Timer(stmt, setup)\n\n # determine number of calls per rep so total time for 1 rep >= 5 ms\n x = 0\n for p in range(0, 10):\n number = 10**p\n x = timer.timeit(number) # seconds\n if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one\n break\n if x > 1: # second\n # If it's macroscopic, don't bother with repetitions\n best = x\n else:\n number *= 10\n r = timer.repeat(repeat, number)\n best = min(r)\n\n sec = best / number\n return sec\n\n\ndef choose_conv_method(in1, in2, mode='full', measure=False):\n \"\"\"\n Find the fastest convolution/correlation method.\n\n This primarily exists to be called during the ``method='auto'`` option in\n `convolve` and `correlate`. It can also be used to determine the value of\n ``method`` for many different convolutions of the same dtype/shape.\n In addition, it supports timing the convolution to adapt the value of\n ``method`` to a particular set of inputs and/or hardware.\n\n Parameters\n ----------\n in1 : array_like\n The first argument passed into the convolution function.\n in2 : array_like\n The second argument passed into the convolution function.\n mode : str {'full', 'valid', 'same'}, optional\n A string indicating the size of the output:\n\n ``full``\n The output is the full discrete linear convolution\n of the inputs. (Default)\n ``valid``\n The output consists only of those elements that do not\n rely on the zero-padding.\n ``same``\n The output is the same size as `in1`, centered\n with respect to the 'full' output.\n measure : bool, optional\n If True, run and time the convolution of `in1` and `in2` with both\n methods and return the fastest. If False (default), predict the fastest\n method using precomputed values.\n\n Returns\n -------\n method : str\n A string indicating which convolution method is fastest, either\n 'direct' or 'fft'\n times : dict, optional\n A dictionary containing the times (in seconds) needed for each method.\n This value is only returned if ``measure=True``.\n\n See Also\n --------\n convolve\n correlate\n\n Notes\n -----\n Generally, this method is 99% accurate for 2D signals and 85% accurate\n for 1D signals for randomly chosen input sizes. For precision, use\n ``measure=True`` to find the fastest method by timing the convolution.\n This can be used to avoid the minimal overhead of finding the fastest\n ``method`` later, or to adapt the value of ``method`` to a particular set\n of inputs.\n\n Experiments were run on an Amazon EC2 r5a.2xlarge machine to test this\n function. These experiments measured the ratio between the time required\n when using ``method='auto'`` and the time required for the fastest method\n (i.e., ``ratio = time_auto / min(time_fft, time_direct)``). In these\n experiments, we found:\n\n * There is a 95% chance of this ratio being less than 1.5 for 1D signals\n and a 99% chance of being less than 2.5 for 2D signals.\n * The ratio was always less than 2.5/5 for 1D/2D signals respectively.\n * This function is most inaccurate for 1D convolutions that take between 1\n and 10 milliseconds with ``method='direct'``. A good proxy for this\n (at least in our experiments) is ``1e6 <= in1.size * in2.size <= 1e7``.\n\n The 2D results almost certainly generalize to 3D/4D/etc because the\n implementation is the same (the 1D implementation is different).\n\n All the numbers above are specific to the EC2 machine. However, we did find\n that this function generalizes fairly decently across hardware. The speed\n tests were of similar quality (and even slightly better) than the same\n tests performed on the machine to tune this function's numbers (a mid-2014\n 15-inch MacBook Pro with 16GB RAM and a 2.5GHz Intel i7 processor).\n\n There are cases when `fftconvolve` supports the inputs but this function\n returns `direct` (e.g., to protect against floating point integer\n precision).\n\n .. versionadded:: 0.19\n\n Examples\n --------\n Estimate the fastest method for a given input:\n\n >>> from scipy import signal\n >>> img = np.random.rand(32, 32)\n >>> filter = np.random.rand(8, 8)\n >>> method = signal.choose_conv_method(img, filter, mode='same')\n >>> method\n 'fft'\n\n This can then be applied to other arrays of the same dtype and shape:\n\n >>> img2 = np.random.rand(32, 32)\n >>> filter2 = np.random.rand(8, 8)\n >>> corr2 = signal.correlate(img2, filter2, mode='same', method=method)\n >>> conv2 = signal.convolve(img2, filter2, mode='same', method=method)\n\n The output of this function (``method``) works with `correlate` and\n `convolve`.\n\n \"\"\"\n volume = np.asarray(in1)\n kernel = np.asarray(in2)\n\n if measure:\n times = {}\n for method in ['fft', 'direct']:\n times[method] = _timeit_fast(lambda: convolve(volume, kernel,\n mode=mode, method=method))\n\n chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct'\n return chosen_method, times\n\n # for integer input,\n # catch when more precision required than float provides (representing an\n # integer as float can lose precision in fftconvolve if larger than 2**52)\n if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):\n max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())\n max_value *= int(min(volume.size, kernel.size))\n if max_value > 2**np.finfo('float').nmant - 1:\n return 'direct'\n\n if _numeric_arrays([volume, kernel], kinds='b'):\n return 'direct'\n\n if _numeric_arrays([volume, kernel]):\n if _fftconv_faster(volume, kernel, mode):\n return 'fft'\n\n return 'direct'\n\n\ndef convolve(in1, in2, mode='full', method='auto'):\n \"\"\"\n Convolve two N-dimensional arrays.\n\n Convolve `in1` and `in2`, with the output size determined by the\n `mode` argument.\n\n Parameters\n ----------\n in1 : array_like\n First input.\n in2 : array_like\n Second input. Should have the same number of dimensions as `in1`.\n mode : str {'full', 'valid', 'same'}, optional\n A string indicating the size of the output:\n\n ``full``\n The output is the full discrete linear convolution\n of the inputs. (Default)\n ``valid``\n The output consists only of those elements that do not\n rely on the zero-padding. In 'valid' mode, either `in1` or `in2`\n must be at least as large as the other in every dimension.\n ``same``\n The output is the same size as `in1`, centered\n with respect to the 'full' output.\n method : str {'auto', 'direct', 'fft'}, optional\n A string indicating which method to use to calculate the convolution.\n\n ``direct``\n The convolution is determined directly from sums, the definition of\n convolution.\n ``fft``\n The Fourier Transform is used to perform the convolution by calling\n `fftconvolve`.\n ``auto``\n Automatically chooses direct or Fourier method based on an estimate\n of which is faster (default). See Notes for more detail.\n\n .. versionadded:: 0.19.0\n\n Returns\n -------\n convolve : array\n An N-dimensional array containing a subset of the discrete linear\n convolution of `in1` with `in2`.\n\n See Also\n --------\n numpy.polymul : performs polynomial multiplication (same operation, but\n also accepts poly1d objects)\n choose_conv_method : chooses the fastest appropriate convolution method\n fftconvolve : Always uses the FFT method.\n oaconvolve : Uses the overlap-add method to do convolution, which is\n generally faster when the input arrays are large and\n significantly different in size.\n\n Notes\n -----\n By default, `convolve` and `correlate` use ``method='auto'``, which calls\n `choose_conv_method` to choose the fastest method using pre-computed\n values (`choose_conv_method` can also measure real-world timing with a\n keyword argument). Because `fftconvolve` relies on floating point numbers,\n there are certain constraints that may force `method=direct` (more detail\n in `choose_conv_method` docstring).\n\n Examples\n --------\n Smooth a square pulse using a Hann window:\n\n >>> from scipy import signal\n >>> sig = np.repeat([0., 1., 0.], 100)\n >>> win = signal.windows.hann(50)\n >>> filtered = signal.convolve(sig, win, mode='same') / sum(win)\n\n >>> import matplotlib.pyplot as plt\n >>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)\n >>> ax_orig.plot(sig)\n >>> ax_orig.set_title('Original pulse')\n >>> ax_orig.margins(0, 0.1)\n >>> ax_win.plot(win)\n >>> ax_win.set_title('Filter impulse response')\n >>> ax_win.margins(0, 0.1)\n >>> ax_filt.plot(filtered)\n >>> ax_filt.set_title('Filtered signal')\n >>> ax_filt.margins(0, 0.1)\n >>> fig.tight_layout()\n >>> fig.show()\n\n \"\"\"\n volume = np.asarray(in1)\n kernel = np.asarray(in2)\n\n if volume.ndim == kernel.ndim == 0:\n return volume * kernel\n elif volume.ndim != kernel.ndim:\n raise ValueError(\"volume and kernel should have the same \"\n \"dimensionality\")\n\n if _inputs_swap_needed(mode, volume.shape, kernel.shape):\n # Convolution is commutative; order doesn't have any effect on output\n volume, kernel = kernel, volume\n\n if method == 'auto':\n method = choose_conv_method(volume, kernel, mode=mode)\n\n if method == 'fft':\n out = fftconvolve(volume, kernel, mode=mode)\n result_type = np.result_type(volume, kernel)\n if result_type.kind in {'u', 'i'}:\n out = np.around(out)\n return out.astype(result_type)\n elif method == 'direct':\n # fastpath to faster numpy.convolve for 1d inputs when possible\n if _np_conv_ok(volume, kernel, mode):\n return np.convolve(volume, kernel, mode)\n\n return correlate(volume, _reverse_and_conj(kernel), mode, 'direct')\n else:\n raise ValueError(\"Acceptable method flags are 'auto',\"\n \" 'direct', or 'fft'.\")\n\n\ndef order_filter(a, domain, rank):\n \"\"\"\n Perform an order filter on an N-D array.\n\n Perform an order filter on the array in. The domain argument acts as a\n mask centered over each pixel. The non-zero elements of domain are\n used to select elements surrounding each input pixel which are placed\n in a list. The list is sorted, and the output for that pixel is the\n element corresponding to rank in the sorted list.\n\n Parameters\n ----------\n a : ndarray\n The N-dimensional input array.\n domain : array_like\n A mask array with the same number of dimensions as `a`.\n Each dimension should have an odd number of elements.\n rank : int\n A non-negative integer which selects the element from the\n sorted list (0 corresponds to the smallest element, 1 is the\n next smallest element, etc.).\n\n Returns\n -------\n out : ndarray\n The results of the order filter in an array with the same\n shape as `a`.\n\n Examples\n --------\n >>> from scipy import signal\n >>> x = np.arange(25).reshape(5, 5)\n >>> domain = np.identity(3)\n >>> x\n array([[ 0, 1, 2, 3, 4],\n [ 5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n >>> signal.order_filter(x, domain, 0)\n array([[ 0., 0., 0., 0., 0.],\n [ 0., 0., 1., 2., 0.],\n [ 0., 5., 6., 7., 0.],\n [ 0., 10., 11., 12., 0.],\n [ 0., 0., 0., 0., 0.]])\n >>> signal.order_filter(x, domain, 2)\n array([[ 6., 7., 8., 9., 4.],\n [ 11., 12., 13., 14., 9.],\n [ 16., 17., 18., 19., 14.],\n [ 21., 22., 23., 24., 19.],\n [ 20., 21., 22., 23., 24.]])\n\n \"\"\"\n domain = np.asarray(domain)\n size = domain.shape\n for k in range(len(size)):\n if (size[k] % 2) != 1:\n raise ValueError(\"Each dimension of domain argument \"\n \" should have an odd number of elements.\")\n return sigtools._order_filterND(a, domain, rank)\n\n\ndef medfilt(volume, kernel_size=None):\n \"\"\"\n Perform a median filter on an N-dimensional array.\n\n Apply a median filter to the input array using a local window-size\n given by `kernel_size`. The array will automatically be zero-padded.\n\n Parameters\n ----------\n volume : array_like\n An N-dimensional input array.\n kernel_size : array_like, optional\n A scalar or an N-length list giving the size of the median filter\n window in each dimension. Elements of `kernel_size` should be odd.\n If `kernel_size` is a scalar, then this scalar is used as the size in\n each dimension. Default size is 3 for each dimension.\n\n Returns\n -------\n out : ndarray\n An array the same size as input containing the median filtered\n result.\n\n Warns\n -----\n UserWarning\n If array size is smaller than kernel size along any dimension\n\n See Also\n --------\n scipy.ndimage.median_filter\n\n Notes\n -------\n The more general function `scipy.ndimage.median_filter` has a more\n efficient implementation of a median filter and therefore runs much faster.\n \"\"\"\n volume = np.atleast_1d(volume)\n if kernel_size is None:\n kernel_size = [3] * volume.ndim\n kernel_size = np.asarray(kernel_size)\n if kernel_size.shape == ():\n kernel_size = np.repeat(kernel_size.item(), volume.ndim)\n\n for k in range(volume.ndim):\n if (kernel_size[k] % 2) != 1:\n raise ValueError(\"Each element of kernel_size should be odd.\")\n if any(k > s for k, s in zip(kernel_size, volume.shape)):\n warnings.warn('kernel_size exceeds volume extent: the volume will be '\n 'zero-padded.')\n\n domain = np.ones(kernel_size)\n\n numels = np.prod(kernel_size, axis=0)\n order = numels // 2\n return sigtools._order_filterND(volume, domain, order)\n\n\ndef wiener(im, mysize=None, noise=None):\n \"\"\"\n Perform a Wiener filter on an N-dimensional array.\n\n Apply a Wiener filter to the N-dimensional array `im`.\n\n Parameters\n ----------\n im : ndarray\n An N-dimensional array.\n mysize : int or array_like, optional\n A scalar or an N-length list giving the size of the Wiener filter\n window in each dimension. Elements of mysize should be odd.\n If mysize is a scalar, then this scalar is used as the size\n in each dimension.\n noise : float, optional\n The noise-power to use. If None, then noise is estimated as the\n average of the local variance of the input.\n\n Returns\n -------\n out : ndarray\n Wiener filtered result with the same shape as `im`.\n\n Examples\n --------\n\n >>> from scipy.misc import face\n >>> from scipy.signal.signaltools import wiener\n >>> import matplotlib.pyplot as plt\n >>> import numpy as np\n >>> img = np.random.random((40, 40)) #Create a random image\n >>> filtered_img = wiener(img, (5, 5)) #Filter the image\n >>> f, (plot1, plot2) = plt.subplots(1, 2)\n >>> plot1.imshow(img)\n >>> plot2.imshow(filtered_img)\n >>> plt.show()\n\n Notes\n -----\n This implementation is similar to wiener2 in Matlab/Octave.\n For more details see [1]_\n\n References\n ----------\n .. [1] Lim, Jae S., Two-Dimensional Signal and Image Processing,\n Englewood Cliffs, NJ, Prentice Hall, 1990, p. 548.\n\n\n \"\"\"\n im = np.asarray(im)\n if mysize is None:\n mysize = [3] * im.ndim\n mysize = np.asarray(mysize)\n if mysize.shape == ():\n mysize = np.repeat(mysize.item(), im.ndim)\n\n # Estimate the local mean\n lMean = correlate(im, np.ones(mysize), 'same') / np.prod(mysize, axis=0)\n\n # Estimate the local variance\n lVar = (correlate(im ** 2, np.ones(mysize), 'same') /\n np.prod(mysize, axis=0) - lMean ** 2)\n\n # Estimate the noise power if needed.\n if noise is None:\n noise = np.mean(np.ravel(lVar), axis=0)\n\n res = (im - lMean)\n res *= (1 - noise / lVar)\n res += lMean\n out = np.where(lVar < noise, lMean, res)\n\n return out\n\n\ndef convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):\n \"\"\"\n Convolve two 2-dimensional arrays.\n\n Convolve `in1` and `in2` with output size determined by `mode`, and\n boundary conditions determined by `boundary` and `fillvalue`.\n\n Parameters\n ----------\n in1 : array_like\n First input.\n in2 : array_like\n Second input. Should have the same number of dimensions as `in1`.\n mode : str {'full', 'valid', 'same'}, optional\n A string indicating the size of the output:\n\n ``full``\n The output is the full discrete linear convolution\n of the inputs. (Default)\n ``valid``\n The output consists only of those elements that do not\n rely on the zero-padding. In 'valid' mode, either `in1` or `in2`\n must be at least as large as the other in every dimension.\n ``same``\n The output is the same size as `in1`, centered\n with respect to the 'full' output.\n boundary : str {'fill', 'wrap', 'symm'}, optional\n A flag indicating how to handle boundaries:\n\n ``fill``\n pad input arrays with fillvalue. (default)\n ``wrap``\n circular boundary conditions.\n ``symm``\n symmetrical boundary conditions.\n\n fillvalue : scalar, optional\n Value to fill pad input arrays with. Default is 0.\n\n Returns\n -------\n out : ndarray\n A 2-dimensional array containing a subset of the discrete linear\n convolution of `in1` with `in2`.\n\n Examples\n --------\n Compute the gradient of an image by 2D convolution with a complex Scharr\n operator. (Horizontal operator is real, vertical is imaginary.) Use\n symmetric boundary condition to avoid creating edges at the image\n boundaries.\n\n >>> from scipy import signal\n >>> from scipy import misc\n >>> ascent = misc.ascent()\n >>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],\n ... [-10+0j, 0+ 0j, +10 +0j],\n ... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy\n >>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same')\n\n >>> import matplotlib.pyplot as plt\n >>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15))\n >>> ax_orig.imshow(ascent, cmap='gray')\n >>> ax_orig.set_title('Original')\n >>> ax_orig.set_axis_off()\n >>> ax_mag.imshow(np.absolute(grad), cmap='gray')\n >>> ax_mag.set_title('Gradient magnitude')\n >>> ax_mag.set_axis_off()\n >>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles\n >>> ax_ang.set_title('Gradient orientation')\n >>> ax_ang.set_axis_off()\n >>> fig.show()\n\n \"\"\"\n in1 = np.asarray(in1)\n in2 = np.asarray(in2)\n\n if not in1.ndim == in2.ndim == 2:\n raise ValueError('convolve2d inputs must both be 2-D arrays')\n\n if _inputs_swap_needed(mode, in1.shape, in2.shape):\n in1, in2 = in2, in1\n\n val = _valfrommode(mode)\n bval = _bvalfromboundary(boundary)\n out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)\n return out\n\n\ndef correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):\n \"\"\"\n Cross-correlate two 2-dimensional arrays.\n\n Cross correlate `in1` and `in2` with output size determined by `mode`, and\n boundary conditions determined by `boundary` and `fillvalue`.\n\n Parameters\n ----------\n in1 : array_like\n First input.\n in2 : array_like\n Second input. Should have the same number of dimensions as `in1`.\n mode : str {'full', 'valid', 'same'}, optional\n A string indicating the size of the output:\n\n ``full``\n The output is the full discrete linear cross-correlation\n of the inputs. (Default)\n ``valid``\n The output consists only of those elements that do not\n rely on the zero-padding. In 'valid' mode, either `in1` or `in2`\n must be at least as large as the other in every dimension.\n ``same``\n The output is the same size as `in1`, centered\n with respect to the 'full' output.\n boundary : str {'fill', 'wrap', 'symm'}, optional\n A flag indicating how to handle boundaries:\n\n ``fill``\n pad input arrays with fillvalue. (default)\n ``wrap``\n circular boundary conditions.\n ``symm``\n symmetrical boundary conditions.\n\n fillvalue : scalar, optional\n Value to fill pad input arrays with. Default is 0.\n\n Returns\n -------\n correlate2d : ndarray\n A 2-dimensional array containing a subset of the discrete linear\n cross-correlation of `in1` with `in2`.\n\n Notes\n -----\n When using \"same\" mode with even-length inputs, the outputs of `correlate`\n and `correlate2d` differ: There is a 1-index offset between them.\n\n Examples\n --------\n Use 2D cross-correlation to find the location of a template in a noisy\n image:\n\n >>> from scipy import signal\n >>> from scipy import misc\n >>> face = misc.face(gray=True) - misc.face(gray=True).mean()\n >>> template = np.copy(face[300:365, 670:750]) # right eye\n >>> template -= template.mean()\n >>> face = face + np.random.randn(*face.shape) * 50 # add noise\n >>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')\n >>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match\n\n >>> import matplotlib.pyplot as plt\n >>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1,\n ... figsize=(6, 15))\n >>> ax_orig.imshow(face, cmap='gray')\n >>> ax_orig.set_title('Original')\n >>> ax_orig.set_axis_off()\n >>> ax_template.imshow(template, cmap='gray')\n >>> ax_template.set_title('Template')\n >>> ax_template.set_axis_off()\n >>> ax_corr.imshow(corr, cmap='gray')\n >>> ax_corr.set_title('Cross-correlation')\n >>> ax_corr.set_axis_off()\n >>> ax_orig.plot(x, y, 'ro')\n >>> fig.show()\n\n \"\"\"\n in1 = np.asarray(in1)\n in2 = np.asarray(in2)\n\n if not in1.ndim == in2.ndim == 2:\n raise ValueError('correlate2d inputs must both be 2-D arrays')\n\n swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape)\n if swapped_inputs:\n in1, in2 = in2, in1\n\n val = _valfrommode(mode)\n bval = _bvalfromboundary(boundary)\n out = sigtools._convolve2d(in1, in2.conj(), 0, val, bval, fillvalue)\n\n if swapped_inputs:\n out = out[::-1, ::-1]\n\n return out\n\n\ndef medfilt2d(input, kernel_size=3):\n \"\"\"\n Median filter a 2-dimensional array.\n\n Apply a median filter to the `input` array using a local window-size\n given by `kernel_size` (must be odd). The array is zero-padded\n automatically.\n\n Parameters\n ----------\n input : array_like\n A 2-dimensional input array.\n kernel_size : array_like, optional\n A scalar or a list of length 2, giving the size of the\n median filter window in each dimension. Elements of\n `kernel_size` should be odd. If `kernel_size` is a scalar,\n then this scalar is used as the size in each dimension.\n Default is a kernel of size (3, 3).\n\n Returns\n -------\n out : ndarray\n An array the same size as input containing the median filtered\n result.\n\n See also\n --------\n scipy.ndimage.median_filter\n\n Notes\n -------\n The more general function `scipy.ndimage.median_filter` has a more\n efficient implementation of a median filter and therefore runs much faster.\n \"\"\"\n image = np.asarray(input)\n if kernel_size is None:\n kernel_size = [3] * 2\n kernel_size = np.asarray(kernel_size)\n if kernel_size.shape == ():\n kernel_size = np.repeat(kernel_size.item(), 2)\n\n for size in kernel_size:\n if (size % 2) != 1:\n raise ValueError(\"Each element of kernel_size should be odd.\")\n\n return sigtools._medfilt2d(image, kernel_size)\n\n\ndef lfilter(b, a, x, axis=-1, zi=None):\n \"\"\"\n Filter data along one-dimension with an IIR or FIR filter.\n\n Filter a data sequence, `x`, using a digital filter. This works for many\n fundamental data types (including Object type). The filter is a direct\n form II transposed implementation of the standard difference equation\n (see Notes).\n\n The function `sosfilt` (and filter design using ``output='sos'``) should be\n preferred over `lfilter` for most filtering tasks, as second-order sections\n have fewer numerical problems.\n\n Parameters\n ----------\n b : array_like\n The numerator coefficient vector in a 1-D sequence.\n a : array_like\n The denominator coefficient vector in a 1-D sequence. If ``a[0]``\n is not 1, then both `a` and `b` are normalized by ``a[0]``.\n x : array_like\n An N-dimensional input array.\n axis : int, optional\n The axis of the input data array along which to apply the\n linear filter. The filter is applied to each subarray along\n this axis. Default is -1.\n zi : array_like, optional\n Initial conditions for the filter delays. It is a vector\n (or array of vectors for an N-dimensional input) of length\n ``max(len(a), len(b)) - 1``. If `zi` is None or is not given then\n initial rest is assumed. See `lfiltic` for more information.\n\n Returns\n -------\n y : array\n The output of the digital filter.\n zf : array, optional\n If `zi` is None, this is not returned, otherwise, `zf` holds the\n final filter delay values.\n\n See Also\n --------\n lfiltic : Construct initial conditions for `lfilter`.\n lfilter_zi : Compute initial state (steady state of step response) for\n `lfilter`.\n filtfilt : A forward-backward filter, to obtain a filter with linear phase.\n savgol_filter : A Savitzky-Golay filter.\n sosfilt: Filter data using cascaded second-order sections.\n sosfiltfilt: A forward-backward filter using second-order sections.\n\n Notes\n -----\n The filter function is implemented as a direct II transposed structure.\n This means that the filter implements::\n\n a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M]\n - a[1]*y[n-1] - ... - a[N]*y[n-N]\n\n where `M` is the degree of the numerator, `N` is the degree of the\n denominator, and `n` is the sample number. It is implemented using\n the following difference equations (assuming M = N)::\n\n a[0]*y[n] = b[0] * x[n] + d[0][n-1]\n d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1]\n d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1]\n ...\n d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1]\n d[N-1][n] = b[N] * x[n] - a[N] * y[n]\n\n where `d` are the state variables.\n\n The rational transfer function describing this filter in the\n z-transform domain is::\n\n -1 -M\n b[0] + b[1]z + ... + b[M] z\n Y(z) = -------------------------------- X(z)\n -1 -N\n a[0] + a[1]z + ... + a[N] z\n\n Examples\n --------\n Generate a noisy signal to be filtered:\n\n >>> from scipy import signal\n >>> import matplotlib.pyplot as plt\n >>> t = np.linspace(-1, 1, 201)\n >>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) +\n ... 0.1*np.sin(2*np.pi*1.25*t + 1) +\n ... 0.18*np.cos(2*np.pi*3.85*t))\n >>> xn = x + np.random.randn(len(t)) * 0.08\n\n Create an order 3 lowpass butterworth filter:\n\n >>> b, a = signal.butter(3, 0.05)\n\n Apply the filter to xn. Use lfilter_zi to choose the initial condition of\n the filter:\n\n >>> zi = signal.lfilter_zi(b, a)\n >>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0])\n\n Apply the filter again, to have a result filtered at an order the same as\n filtfilt:\n\n >>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0])\n\n Use filtfilt to apply the filter:\n\n >>> y = signal.filtfilt(b, a, xn)\n\n Plot the original signal and the various filtered versions:\n\n >>> plt.figure\n >>> plt.plot(t, xn, 'b', alpha=0.75)\n >>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k')\n >>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice',\n ... 'filtfilt'), loc='best')\n >>> plt.grid(True)\n >>> plt.show()\n\n \"\"\"\n a = np.atleast_1d(a)\n if len(a) == 1:\n # This path only supports types fdgFDGO to mirror _linear_filter below.\n # Any of b, a, x, or zi can set the dtype, but there is no default\n # casting of other types; instead a NotImplementedError is raised.\n b = np.asarray(b)\n a = np.asarray(a)\n if b.ndim != 1 and a.ndim != 1:\n raise ValueError('object of too small depth for desired array')\n x = _validate_x(x)\n inputs = [b, a, x]\n if zi is not None:\n # _linear_filter does not broadcast zi, but does do expansion of\n # singleton dims.\n zi = np.asarray(zi)\n if zi.ndim != x.ndim:\n raise ValueError('object of too small depth for desired array')\n expected_shape = list(x.shape)\n expected_shape[axis] = b.shape[0] - 1\n expected_shape = tuple(expected_shape)\n # check the trivial case where zi is the right shape first\n if zi.shape != expected_shape:\n strides = zi.ndim * [None]\n if axis < 0:\n axis += zi.ndim\n for k in range(zi.ndim):\n if k == axis and zi.shape[k] == expected_shape[k]:\n strides[k] = zi.strides[k]\n elif k != axis and zi.shape[k] == expected_shape[k]:\n strides[k] = zi.strides[k]\n elif k != axis and zi.shape[k] == 1:\n strides[k] = 0\n else:\n raise ValueError('Unexpected shape for zi: expected '\n '%s, found %s.' %\n (expected_shape, zi.shape))\n zi = np.lib.stride_tricks.as_strided(zi, expected_shape,\n strides)\n inputs.append(zi)\n dtype = np.result_type(*inputs)\n\n if dtype.char not in 'fdgFDGO':\n raise NotImplementedError(\"input type '%s' not supported\" % dtype)\n\n b = np.array(b, dtype=dtype)\n a = np.array(a, dtype=dtype, copy=False)\n b /= a[0]\n x = np.array(x, dtype=dtype, copy=False)\n\n out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)\n ind = out_full.ndim * [slice(None)]\n if zi is not None:\n ind[axis] = slice(zi.shape[axis])\n out_full[tuple(ind)] += zi\n\n ind[axis] = slice(out_full.shape[axis] - len(b) + 1)\n out = out_full[tuple(ind)]\n\n if zi is None:\n return out\n else:\n ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)\n zf = out_full[tuple(ind)]\n return out, zf\n else:\n if zi is None:\n return sigtools._linear_filter(b, a, x, axis)\n else:\n return sigtools._linear_filter(b, a, x, axis, zi)\n\n\ndef lfiltic(b, a, y, x=None):\n \"\"\"\n Construct initial conditions for lfilter given input and output vectors.\n\n Given a linear filter (b, a) and initial conditions on the output `y`\n and the input `x`, return the initial conditions on the state vector zi\n which is used by `lfilter` to generate the output given the input.\n\n Parameters\n ----------\n b : array_like\n Linear filter term.\n a : array_like\n Linear filter term.\n y : array_like\n Initial conditions.\n\n If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.\n\n If `y` is too short, it is padded with zeros.\n x : array_like, optional\n Initial conditions.\n\n If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.\n\n If `x` is not given, its initial conditions are assumed zero.\n\n If `x` is too short, it is padded with zeros.\n\n Returns\n -------\n zi : ndarray\n The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``,\n where ``K = max(M, N)``.\n\n See Also\n --------\n lfilter, lfilter_zi\n\n \"\"\"\n N = np.size(a) - 1\n M = np.size(b) - 1\n K = max(M, N)\n y = np.asarray(y)\n\n if x is None:\n result_type = np.result_type(np.asarray(b), np.asarray(a), y)\n if result_type.kind in 'bui':\n result_type = np.float64\n x = np.zeros(M, dtype=result_type)\n else:\n x = np.asarray(x)\n\n result_type = np.result_type(np.asarray(b), np.asarray(a), y, x)\n if result_type.kind in 'bui':\n result_type = np.float64\n x = x.astype(result_type)\n\n L = np.size(x)\n if L < M:\n x = np.r_[x, np.zeros(M - L)]\n\n y = y.astype(result_type)\n zi = np.zeros(K, result_type)\n\n L = np.size(y)\n if L < N:\n y = np.r_[y, np.zeros(N - L)]\n\n for m in range(M):\n zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0)\n\n for m in range(N):\n zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0)\n\n return zi\n\n\ndef deconvolve(signal, divisor):\n \"\"\"Deconvolves ``divisor`` out of ``signal`` using inverse filtering.\n\n Returns the quotient and remainder such that\n ``signal = convolve(divisor, quotient) + remainder``\n\n Parameters\n ----------\n signal : array_like\n Signal data, typically a recorded signal\n divisor : array_like\n Divisor data, typically an impulse response or filter that was\n applied to the original signal\n\n Returns\n -------\n quotient : ndarray\n Quotient, typically the recovered original signal\n remainder : ndarray\n Remainder\n\n Examples\n --------\n Deconvolve a signal that's been filtered:\n\n >>> from scipy import signal\n >>> original = [0, 1, 0, 0, 1, 1, 0, 0]\n >>> impulse_response = [2, 1]\n >>> recorded = signal.convolve(impulse_response, original)\n >>> recorded\n array([0, 2, 1, 0, 2, 3, 1, 0, 0])\n >>> recovered, remainder = signal.deconvolve(recorded, impulse_response)\n >>> recovered\n array([ 0., 1., 0., 0., 1., 1., 0., 0.])\n\n See Also\n --------\n numpy.polydiv : performs polynomial division (same operation, but\n also accepts poly1d objects)\n\n \"\"\"\n num = np.atleast_1d(signal)\n den = np.atleast_1d(divisor)\n N = len(num)\n D = len(den)\n if D > N:\n quot = []\n rem = num\n else:\n input = np.zeros(N - D + 1, float)\n input[0] = 1\n quot = lfilter(num, den, input)\n rem = num - convolve(den, quot, mode='full')\n return quot, rem\n\n\ndef hilbert(x, N=None, axis=-1):\n \"\"\"\n Compute the analytic signal, using the Hilbert transform.\n\n The transformation is done along the last axis by default.\n\n Parameters\n ----------\n x : array_like\n Signal data. Must be real.\n N : int, optional\n Number of Fourier components. Default: ``x.shape[axis]``\n axis : int, optional\n Axis along which to do the transformation. Default: -1.\n\n Returns\n -------\n xa : ndarray\n Analytic signal of `x`, of each 1-D array along `axis`\n\n Notes\n -----\n The analytic signal ``x_a(t)`` of signal ``x(t)`` is:\n\n .. math:: x_a = F^{-1}(F(x) 2U) = x + i y\n\n where `F` is the Fourier transform, `U` the unit step function,\n and `y` the Hilbert transform of `x`. [1]_\n\n In other words, the negative half of the frequency spectrum is zeroed\n out, turning the real-valued signal into a complex signal. The Hilbert\n transformed signal can be obtained from ``np.imag(hilbert(x))``, and the\n original signal from ``np.real(hilbert(x))``.\n\n Examples\n ---------\n In this example we use the Hilbert transform to determine the amplitude\n envelope and instantaneous frequency of an amplitude-modulated signal.\n\n >>> import numpy as np\n >>> import matplotlib.pyplot as plt\n >>> from scipy.signal import hilbert, chirp\n\n >>> duration = 1.0\n >>> fs = 400.0\n >>> samples = int(fs*duration)\n >>> t = np.arange(samples) / fs\n\n We create a chirp of which the frequency increases from 20 Hz to 100 Hz and\n apply an amplitude modulation.\n\n >>> signal = chirp(t, 20.0, t[-1], 100.0)\n >>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )\n\n The amplitude envelope is given by magnitude of the analytic signal. The\n instantaneous frequency can be obtained by differentiating the\n instantaneous phase in respect to time. The instantaneous phase corresponds\n to the phase angle of the analytic signal.\n\n >>> analytic_signal = hilbert(signal)\n >>> amplitude_envelope = np.abs(analytic_signal)\n >>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))\n >>> instantaneous_frequency = (np.diff(instantaneous_phase) /\n ... (2.0*np.pi) * fs)\n\n >>> fig = plt.figure()\n >>> ax0 = fig.add_subplot(211)\n >>> ax0.plot(t, signal, label='signal')\n >>> ax0.plot(t, amplitude_envelope, label='envelope')\n >>> ax0.set_xlabel(\"time in seconds\")\n >>> ax0.legend()\n >>> ax1 = fig.add_subplot(212)\n >>> ax1.plot(t[1:], instantaneous_frequency)\n >>> ax1.set_xlabel(\"time in seconds\")\n >>> ax1.set_ylim(0.0, 120.0)\n\n References\n ----------\n .. [1] Wikipedia, \"Analytic signal\".\n https://en.wikipedia.org/wiki/Analytic_signal\n .. [2] Leon Cohen, \"Time-Frequency Analysis\", 1995. Chapter 2.\n .. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal\n Processing, Third Edition, 2009. Chapter 12.\n ISBN 13: 978-1292-02572-8\n\n \"\"\"\n x = np.asarray(x)\n if np.iscomplexobj(x):\n raise ValueError(\"x must be real.\")\n if N is None:\n N = x.shape[axis]\n if N <= 0:\n raise ValueError(\"N must be positive.\")\n\n Xf = sp_fft.fft(x, N, axis=axis)\n h = np.zeros(N)\n if N % 2 == 0:\n h[0] = h[N // 2] = 1\n h[1:N // 2] = 2\n else:\n h[0] = 1\n h[1:(N + 1) // 2] = 2\n\n if x.ndim > 1:\n ind = [np.newaxis] * x.ndim\n ind[axis] = slice(None)\n h = h[tuple(ind)]\n x = sp_fft.ifft(Xf * h, axis=axis)\n return x\n\n\ndef hilbert2(x, N=None):\n \"\"\"\n Compute the '2-D' analytic signal of `x`\n\n Parameters\n ----------\n x : array_like\n 2-D signal data.\n N : int or tuple of two ints, optional\n Number of Fourier components. Default is ``x.shape``\n\n Returns\n -------\n xa : ndarray\n Analytic signal of `x` taken along axes (0,1).\n\n References\n ----------\n .. [1] Wikipedia, \"Analytic signal\",\n https://en.wikipedia.org/wiki/Analytic_signal\n\n \"\"\"\n x = np.atleast_2d(x)\n if x.ndim > 2:\n raise ValueError(\"x must be 2-D.\")\n if np.iscomplexobj(x):\n raise ValueError(\"x must be real.\")\n if N is None:\n N = x.shape\n elif isinstance(N, int):\n if N <= 0:\n raise ValueError(\"N must be positive.\")\n N = (N, N)\n elif len(N) != 2 or np.any(np.asarray(N) <= 0):\n raise ValueError(\"When given as a tuple, N must hold exactly \"\n \"two positive integers\")\n\n Xf = sp_fft.fft2(x, N, axes=(0, 1))\n h1 = np.zeros(N[0], 'd')\n h2 = np.zeros(N[1], 'd')\n for p in range(2):\n h = eval(\"h%d\" % (p + 1))\n N1 = N[p]\n if N1 % 2 == 0:\n h[0] = h[N1 // 2] = 1\n h[1:N1 // 2] = 2\n else:\n h[0] = 1\n h[1:(N1 + 1) // 2] = 2\n exec(\"h%d = h\" % (p + 1), globals(), locals())\n\n h = h1[:, np.newaxis] * h2[np.newaxis, :]\n k = x.ndim\n while k > 2:\n h = h[:, np.newaxis]\n k -= 1\n x = sp_fft.ifft2(Xf * h, axes=(0, 1))\n return x\n\n\ndef cmplx_sort(p):\n \"\"\"Sort roots based on magnitude.\n\n Parameters\n ----------\n p : array_like\n The roots to sort, as a 1-D array.\n\n Returns\n -------\n p_sorted : ndarray\n Sorted roots.\n indx : ndarray\n Array of indices needed to sort the input `p`.\n\n Examples\n --------\n >>> from scipy import signal\n >>> vals = [1, 4, 1+1.j, 3]\n >>> p_sorted, indx = signal.cmplx_sort(vals)\n >>> p_sorted\n array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j])\n >>> indx\n array([0, 2, 3, 1])\n \"\"\"\n p = np.asarray(p)\n indx = np.argsort(abs(p))\n return np.take(p, indx, 0), indx\n\n\ndef unique_roots(p, tol=1e-3, rtype='min'):\n \"\"\"Determine unique roots and their multiplicities from a list of roots.\n\n Parameters\n ----------\n p : array_like\n The list of roots.\n tol : float, optional\n The tolerance for two roots to be considered equal in terms of\n the distance between them. Default is 1e-3. Refer to Notes about\n the details on roots grouping.\n rtype : {'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}, optional\n How to determine the returned root if multiple roots are within\n `tol` of each other.\n\n - 'max', 'maximum': pick the maximum of those roots\n - 'min', 'minimum': pick the minimum of those roots\n - 'avg', 'mean': take the average of those roots\n\n When finding minimum or maximum among complex roots they are compared\n first by the real part and then by the imaginary part.\n\n Returns\n -------\n unique : ndarray\n The list of unique roots.\n multiplicity : ndarray\n The multiplicity of each root.\n\n Notes\n -----\n If we have 3 roots ``a``, ``b`` and ``c``, such that ``a`` is close to\n ``b`` and ``b`` is close to ``c`` (distance is less than `tol`), then it\n doesn't necessarily mean that ``a`` is close to ``c``. It means that roots\n grouping is not unique. In this function we use \"greedy\" grouping going\n through the roots in the order they are given in the input `p`.\n\n This utility function is not specific to roots but can be used for any\n sequence of values for which uniqueness and multiplicity has to be\n determined. For a more general routine, see `numpy.unique`.\n\n Examples\n --------\n >>> from scipy import signal\n >>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]\n >>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')\n\n Check which roots have multiplicity larger than 1:\n\n >>> uniq[mult > 1]\n array([ 1.305])\n \"\"\"\n if rtype in ['max', 'maximum']:\n reduce = np.max\n elif rtype in ['min', 'minimum']:\n reduce = np.min\n elif rtype in ['avg', 'mean']:\n reduce = np.mean\n else:\n raise ValueError(\"`rtype` must be one of \"\n \"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}\")\n\n p = np.asarray(p)\n\n points = np.empty((len(p), 2))\n points[:, 0] = np.real(p)\n points[:, 1] = np.imag(p)\n tree = cKDTree(points)\n\n p_unique = []\n p_multiplicity = []\n used = np.zeros(len(p), dtype=bool)\n for i in range(len(p)):\n if used[i]:\n continue\n\n group = tree.query_ball_point(points[i], tol)\n group = [x for x in group if not used[x]]\n\n p_unique.append(reduce(p[group]))\n p_multiplicity.append(len(group))\n\n used[group] = True\n\n return np.asarray(p_unique), np.asarray(p_multiplicity)\n\n\ndef invres(r, p, k, tol=1e-3, rtype='avg'):\n \"\"\"Compute b(s) and a(s) from partial fraction expansion.\n\n If `M` is the degree of numerator `b` and `N` the degree of denominator\n `a`::\n\n b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]\n H(s) = ------ = ------------------------------------------\n a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]\n\n then the partial-fraction expansion H(s) is defined as::\n\n r[0] r[1] r[-1]\n = -------- + -------- + ... + --------- + k(s)\n (s-p[0]) (s-p[1]) (s-p[-1])\n\n If there are any repeated roots (closer together than `tol`), then H(s)\n has terms like::\n\n r[i] r[i+1] r[i+n-1]\n -------- + ----------- + ... + -----------\n (s-p[i]) (s-p[i])**2 (s-p[i])**n\n\n This function is used for polynomials in positive powers of s or z,\n such as analog filters or digital filters in controls engineering. For\n negative powers of z (typical for digital filters in DSP), use `invresz`.\n\n Parameters\n ----------\n r : array_like\n Residues corresponding to the poles. For repeated poles, the residues\n must be ordered to correspond to ascending by power fractions.\n p : array_like\n Poles. Equal poles must be adjacent.\n k : array_like\n Coefficients of the direct polynomial term.\n tol : float, optional\n The tolerance for two roots to be considered equal in terms of\n the distance between them. Default is 1e-3. See `unique_roots`\n for further details.\n rtype : {'avg', 'min', 'max'}, optional\n Method for computing a root to represent a group of identical roots.\n Default is 'avg'. See `unique_roots` for further details.\n\n Returns\n -------\n b : ndarray\n Numerator polynomial coefficients.\n a : ndarray\n Denominator polynomial coefficients.\n\n See Also\n --------\n residue, invresz, unique_roots\n\n \"\"\"\n r = np.atleast_1d(r)\n p = np.atleast_1d(p)\n k = np.trim_zeros(np.atleast_1d(k), 'f')\n\n unique_poles, multiplicity = _group_poles(p, tol, rtype)\n factors, denominator = _compute_factors(unique_poles, multiplicity,\n include_powers=True)\n\n if len(k) == 0:\n numerator = 0\n else:\n numerator = np.polymul(k, denominator)\n\n for residue, factor in zip(r, factors):\n numerator = np.polyadd(numerator, residue * factor)\n\n return numerator, denominator\n\n\ndef _compute_factors(roots, multiplicity, include_powers=False):\n \"\"\"Compute the total polynomial divided by factors for each root.\"\"\"\n current = np.array([1])\n suffixes = [current]\n for pole, mult in zip(roots[-1:0:-1], multiplicity[-1:0:-1]):\n monomial = np.array([1, -pole])\n for _ in range(mult):\n current = np.polymul(current, monomial)\n suffixes.append(current)\n suffixes = suffixes[::-1]\n\n factors = []\n current = np.array([1])\n for pole, mult, suffix in zip(roots, multiplicity, suffixes):\n monomial = np.array([1, -pole])\n block = []\n for i in range(mult):\n if i == 0 or include_powers:\n block.append(np.polymul(current, suffix))\n current = np.polymul(current, monomial)\n factors.extend(reversed(block))\n\n return factors, current\n\n\ndef _compute_residues(poles, multiplicity, numerator):\n denominator_factors, _ = _compute_factors(poles, multiplicity)\n numerator = numerator.astype(poles.dtype)\n\n residues = []\n for pole, mult, factor in zip(poles, multiplicity,\n denominator_factors):\n if mult == 1:\n residues.append(np.polyval(numerator, pole) /\n np.polyval(factor, pole))\n else:\n numer = numerator.copy()\n monomial = np.array([1, -pole])\n factor, d = np.polydiv(factor, monomial)\n\n block = []\n for _ in range(mult):\n numer, n = np.polydiv(numer, monomial)\n r = n[0] / d[0]\n numer = np.polysub(numer, r * factor)\n block.append(r)\n\n residues.extend(reversed(block))\n\n return np.asarray(residues)\n\n\ndef residue(b, a, tol=1e-3, rtype='avg'):\n \"\"\"Compute partial-fraction expansion of b(s) / a(s).\n\n If `M` is the degree of numerator `b` and `N` the degree of denominator\n `a`::\n\n b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]\n H(s) = ------ = ------------------------------------------\n a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]\n\n then the partial-fraction expansion H(s) is defined as::\n\n r[0] r[1] r[-1]\n = -------- + -------- + ... + --------- + k(s)\n (s-p[0]) (s-p[1]) (s-p[-1])\n\n If there are any repeated roots (closer together than `tol`), then H(s)\n has terms like::\n\n r[i] r[i+1] r[i+n-1]\n -------- + ----------- + ... + -----------\n (s-p[i]) (s-p[i])**2 (s-p[i])**n\n\n This function is used for polynomials in positive powers of s or z,\n such as analog filters or digital filters in controls engineering. For\n negative powers of z (typical for digital filters in DSP), use `residuez`.\n\n See Notes for details about the algorithm.\n\n Parameters\n ----------\n b : array_like\n Numerator polynomial coefficients.\n a : array_like\n Denominator polynomial coefficients.\n tol : float, optional\n The tolerance for two roots to be considered equal in terms of\n the distance between them. Default is 1e-3. See `unique_roots`\n for further details.\n rtype : {'avg', 'min', 'max'}, optional\n Method for computing a root to represent a group of identical roots.\n Default is 'avg'. See `unique_roots` for further details.\n\n Returns\n -------\n r : ndarray\n Residues corresponding to the poles. For repeated poles, the residues\n are ordered to correspond to ascending by power fractions.\n p : ndarray\n Poles ordered by magnitude in ascending order.\n k : ndarray\n Coefficients of the direct polynomial term.\n\n See Also\n --------\n invres, residuez, numpy.poly, unique_roots\n\n Notes\n -----\n The \"deflation through subtraction\" algorithm is used for\n computations --- method 6 in [1]_.\n\n The form of partial fraction expansion depends on poles multiplicity in\n the exact mathematical sense. However there is no way to exactly\n determine multiplicity of roots of a polynomial in numerical computing.\n Thus you should think of the result of `residue` with given `tol` as\n partial fraction expansion computed for the denominator composed of the\n computed poles with empirically determined multiplicity. The choice of\n `tol` can drastically change the result if there are close poles.\n\n References\n ----------\n .. [1] J. F. Mahoney, B. D. Sivazlian, \"Partial fractions expansion: a\n review of computational methodology and efficiency\", Journal of\n Computational and Applied Mathematics, Vol. 9, 1983.\n \"\"\"\n b = np.asarray(b)\n a = np.asarray(a)\n if (np.issubdtype(b.dtype, np.complexfloating)\n or np.issubdtype(a.dtype, np.complexfloating)):\n b = b.astype(complex)\n a = a.astype(complex)\n else:\n b = b.astype(float)\n a = a.astype(float)\n\n b = np.trim_zeros(np.atleast_1d(b), 'f')\n a = np.trim_zeros(np.atleast_1d(a), 'f')\n\n if a.size == 0:\n raise ValueError(\"Denominator `a` is zero.\")\n\n poles = np.roots(a)\n if b.size == 0:\n return np.zeros(poles.shape), cmplx_sort(poles)[0], np.array([])\n\n if len(b) < len(a):\n k = np.empty(0)\n else:\n k, b = np.polydiv(b, a)\n\n unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype)\n unique_poles, order = cmplx_sort(unique_poles)\n multiplicity = multiplicity[order]\n\n residues = _compute_residues(unique_poles, multiplicity, b)\n\n index = 0\n for pole, mult in zip(unique_poles, multiplicity):\n poles[index:index + mult] = pole\n index += mult\n\n return residues / a[0], poles, k\n\n\ndef residuez(b, a, tol=1e-3, rtype='avg'):\n \"\"\"Compute partial-fraction expansion of b(z) / a(z).\n\n If `M` is the degree of numerator `b` and `N` the degree of denominator\n `a`::\n\n b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)\n H(z) = ------ = ------------------------------------------\n a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)\n\n then the partial-fraction expansion H(z) is defined as::\n\n r[0] r[-1]\n = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...\n (1-p[0]z**(-1)) (1-p[-1]z**(-1))\n\n If there are any repeated roots (closer than `tol`), then the partial\n fraction expansion has terms like::\n\n r[i] r[i+1] r[i+n-1]\n -------------- + ------------------ + ... + ------------------\n (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n\n\n This function is used for polynomials in negative powers of z,\n such as digital filters in DSP. For positive powers, use `residue`.\n\n See Notes of `residue` for details about the algorithm.\n\n Parameters\n ----------\n b : array_like\n Numerator polynomial coefficients.\n a : array_like\n Denominator polynomial coefficients.\n tol : float, optional\n The tolerance for two roots to be considered equal in terms of\n the distance between them. Default is 1e-3. See `unique_roots`\n for further details.\n rtype : {'avg', 'min', 'max'}, optional\n Method for computing a root to represent a group of identical roots.\n Default is 'avg'. See `unique_roots` for further details.\n\n Returns\n -------\n r : ndarray\n Residues corresponding to the poles. For repeated poles, the residues\n are ordered to correspond to ascending by power fractions.\n p : ndarray\n Poles ordered by magnitude in ascending order.\n k : ndarray\n Coefficients of the direct polynomial term.\n\n See Also\n --------\n invresz, residue, unique_roots\n \"\"\"\n b = np.asarray(b)\n a = np.asarray(a)\n if (np.issubdtype(b.dtype, np.complexfloating)\n or np.issubdtype(a.dtype, np.complexfloating)):\n b = b.astype(complex)\n a = a.astype(complex)\n else:\n b = b.astype(float)\n a = a.astype(float)\n\n b = np.trim_zeros(np.atleast_1d(b), 'b')\n a = np.trim_zeros(np.atleast_1d(a), 'b')\n\n if a.size == 0:\n raise ValueError(\"Denominator `a` is zero.\")\n elif a[0] == 0:\n raise ValueError(\"First coefficient of determinant `a` must be \"\n \"non-zero.\")\n\n poles = np.roots(a)\n if b.size == 0:\n return np.zeros(poles.shape), cmplx_sort(poles)[0], np.array([])\n\n b_rev = b[::-1]\n a_rev = a[::-1]\n\n if len(b_rev) < len(a_rev):\n k_rev = np.empty(0)\n else:\n k_rev, b_rev = np.polydiv(b_rev, a_rev)\n\n unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype)\n unique_poles, order = cmplx_sort(unique_poles)\n multiplicity = multiplicity[order]\n\n residues = _compute_residues(1 / unique_poles, multiplicity, b_rev)\n\n index = 0\n powers = np.empty(len(residues), dtype=int)\n for pole, mult in zip(unique_poles, multiplicity):\n poles[index:index + mult] = pole\n powers[index:index + mult] = 1 + np.arange(mult)\n index += mult\n\n residues *= (-poles) ** powers / a_rev[0]\n\n return residues, poles, k_rev[::-1]\n\n\ndef _group_poles(poles, tol, rtype):\n if rtype in ['max', 'maximum']:\n reduce = np.max\n elif rtype in ['min', 'minimum']:\n reduce = np.min\n elif rtype in ['avg', 'mean']:\n reduce = np.mean\n else:\n raise ValueError(\"`rtype` must be one of \"\n \"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}\")\n\n unique = []\n multiplicity = []\n\n pole = poles[0]\n block = [pole]\n for i in range(1, len(poles)):\n if abs(poles[i] - pole) <= tol:\n block.append(pole)\n else:\n unique.append(reduce(block))\n multiplicity.append(len(block))\n pole = poles[i]\n block = [pole]\n\n unique.append(reduce(block))\n multiplicity.append(len(block))\n\n return np.asarray(unique), np.asarray(multiplicity)\n\n\ndef invresz(r, p, k, tol=1e-3, rtype='avg'):\n \"\"\"Compute b(z) and a(z) from partial fraction expansion.\n\n If `M` is the degree of numerator `b` and `N` the degree of denominator\n `a`::\n\n b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)\n H(z) = ------ = ------------------------------------------\n a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)\n\n then the partial-fraction expansion H(z) is defined as::\n\n r[0] r[-1]\n = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...\n (1-p[0]z**(-1)) (1-p[-1]z**(-1))\n\n If there are any repeated roots (closer than `tol`), then the partial\n fraction expansion has terms like::\n\n r[i] r[i+1] r[i+n-1]\n -------------- + ------------------ + ... + ------------------\n (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n\n\n This function is used for polynomials in negative powers of z,\n such as digital filters in DSP. For positive powers, use `invres`.\n\n Parameters\n ----------\n r : array_like\n Residues corresponding to the poles. For repeated poles, the residues\n must be ordered to correspond to ascending by power fractions.\n p : array_like\n Poles. Equal poles must be adjacent.\n k : array_like\n Coefficients of the direct polynomial term.\n tol : float, optional\n The tolerance for two roots to be considered equal in terms of\n the distance between them. Default is 1e-3. See `unique_roots`\n for further details.\n rtype : {'avg', 'min', 'max'}, optional\n Method for computing a root to represent a group of identical roots.\n Default is 'avg'. See `unique_roots` for further details.\n\n Returns\n -------\n b : ndarray\n Numerator polynomial coefficients.\n a : ndarray\n Denominator polynomial coefficients.\n\n See Also\n --------\n residuez, unique_roots, invres\n\n \"\"\"\n r = np.atleast_1d(r)\n p = np.atleast_1d(p)\n k = np.trim_zeros(np.atleast_1d(k), 'b')\n\n unique_poles, multiplicity = _group_poles(p, tol, rtype)\n factors, denominator = _compute_factors(unique_poles, multiplicity,\n include_powers=True)\n\n if len(k) == 0:\n numerator = 0\n else:\n numerator = np.polymul(k[::-1], denominator[::-1])\n\n for residue, factor in zip(r, factors):\n numerator = np.polyadd(numerator, residue * factor[::-1])\n\n return numerator[::-1], denominator\n\n\ndef resample(x, num, t=None, axis=0, window=None, domain='time'):\n \"\"\"\n Resample `x` to `num` samples using Fourier method along the given axis.\n\n The resampled signal starts at the same value as `x` but is sampled\n with a spacing of ``len(x) / num * (spacing of x)``. Because a\n Fourier method is used, the signal is assumed to be periodic.\n\n Parameters\n ----------\n x : array_like\n The data to be resampled.\n num : int\n The number of samples in the resampled signal.\n t : array_like, optional\n If `t` is given, it is assumed to be the equally spaced sample\n positions associated with the signal data in `x`.\n axis : int, optional\n The axis of `x` that is resampled. Default is 0.\n window : array_like, callable, string, float, or tuple, optional\n Specifies the window applied to the signal in the Fourier\n domain. See below for details.\n domain : string, optional\n A string indicating the domain of the input `x`:\n ``time`` Consider the input `x` as time-domain (Default),\n ``freq`` Consider the input `x` as frequency-domain.\n\n Returns\n -------\n resampled_x or (resampled_x, resampled_t)\n Either the resampled array, or, if `t` was given, a tuple\n containing the resampled array and the corresponding resampled\n positions.\n\n See Also\n --------\n decimate : Downsample the signal after applying an FIR or IIR filter.\n resample_poly : Resample using polyphase filtering and an FIR filter.\n\n Notes\n -----\n The argument `window` controls a Fourier-domain window that tapers\n the Fourier spectrum before zero-padding to alleviate ringing in\n the resampled values for sampled signals you didn't intend to be\n interpreted as band-limited.\n\n If `window` is a function, then it is called with a vector of inputs\n indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).\n\n If `window` is an array of the same length as `x.shape[axis]` it is\n assumed to be the window to be applied directly in the Fourier\n domain (with dc and low-frequency first).\n\n For any other type of `window`, the function `scipy.signal.get_window`\n is called to generate the window.\n\n The first sample of the returned vector is the same as the first\n sample of the input vector. The spacing between samples is changed\n from ``dx`` to ``dx * len(x) / num``.\n\n If `t` is not None, then it is used solely to calculate the resampled\n positions `resampled_t`\n\n As noted, `resample` uses FFT transformations, which can be very\n slow if the number of input or output samples is large and prime;\n see `scipy.fft.fft`.\n\n Examples\n --------\n Note that the end of the resampled data rises to meet the first\n sample of the next cycle:\n\n >>> from scipy import signal\n\n >>> x = np.linspace(0, 10, 20, endpoint=False)\n >>> y = np.cos(-x**2/6.0)\n >>> f = signal.resample(y, 100)\n >>> xnew = np.linspace(0, 10, 100, endpoint=False)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')\n >>> plt.legend(['data', 'resampled'], loc='best')\n >>> plt.show()\n \"\"\"\n\n if domain not in ('time', 'freq'):\n raise ValueError(\"Acceptable domain flags are 'time' or\"\n \" 'freq', not domain={}\".format(domain))\n\n x = np.asarray(x)\n Nx = x.shape[axis]\n\n # Check if we can use faster real FFT\n real_input = np.isrealobj(x)\n\n if domain == 'time':\n # Forward transform\n if real_input:\n X = sp_fft.rfft(x, axis=axis)\n else: # Full complex FFT\n X = sp_fft.fft(x, axis=axis)\n else: # domain == 'freq'\n X = x\n\n # Apply window to spectrum\n if window is not None:\n if callable(window):\n W = window(sp_fft.fftfreq(Nx))\n elif isinstance(window, np.ndarray):\n if window.shape != (Nx,):\n raise ValueError('window must have the same length as data')\n W = window\n else:\n W = sp_fft.ifftshift(get_window(window, Nx))\n\n newshape_W = [1] * x.ndim\n newshape_W[axis] = X.shape[axis]\n if real_input:\n # Fold the window back on itself to mimic complex behavior\n W_real = W.copy()\n W_real[1:] += W_real[-1:0:-1]\n W_real[1:] *= 0.5\n X *= W_real[:newshape_W[axis]].reshape(newshape_W)\n else:\n X *= W.reshape(newshape_W)\n\n # Copy each half of the original spectrum to the output spectrum, either\n # truncating high frequences (downsampling) or zero-padding them\n # (upsampling)\n\n # Placeholder array for output spectrum\n newshape = list(x.shape)\n if real_input:\n newshape[axis] = num // 2 + 1\n else:\n newshape[axis] = num\n Y = np.zeros(newshape, X.dtype)\n\n # Copy positive frequency components (and Nyquist, if present)\n N = min(num, Nx)\n nyq = N // 2 + 1 # Slice index that includes Nyquist if present\n sl = [slice(None)] * x.ndim\n sl[axis] = slice(0, nyq)\n Y[tuple(sl)] = X[tuple(sl)]\n if not real_input:\n # Copy negative frequency components\n if N > 2: # (slice expression doesn't collapse to empty array)\n sl[axis] = slice(nyq - N, None)\n Y[tuple(sl)] = X[tuple(sl)]\n\n # Split/join Nyquist component(s) if present\n # So far we have set Y[+N/2]=X[+N/2]\n if N % 2 == 0:\n if num < Nx: # downsampling\n if real_input:\n sl[axis] = slice(N//2, N//2 + 1)\n Y[tuple(sl)] *= 2.\n else:\n # select the component of Y at frequency +N/2,\n # add the component of X at -N/2\n sl[axis] = slice(-N//2, -N//2 + 1)\n Y[tuple(sl)] += X[tuple(sl)]\n elif Nx < num: # upsampling\n # select the component at frequency +N/2 and halve it\n sl[axis] = slice(N//2, N//2 + 1)\n Y[tuple(sl)] *= 0.5\n if not real_input:\n temp = Y[tuple(sl)]\n # set the component at -N/2 equal to the component at +N/2\n sl[axis] = slice(num-N//2, num-N//2 + 1)\n Y[tuple(sl)] = temp\n\n # Inverse transform\n if real_input:\n y = sp_fft.irfft(Y, num, axis=axis)\n else:\n y = sp_fft.ifft(Y, axis=axis, overwrite_x=True)\n\n y *= (float(num) / float(Nx))\n\n if t is None:\n return y\n else:\n new_t = np.arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]\n return y, new_t\n\n\ndef resample_poly(x, up, down, axis=0, window=('kaiser', 5.0),\n padtype='constant', cval=None):\n \"\"\"\n Resample `x` along the given axis using polyphase filtering.\n\n The signal `x` is upsampled by the factor `up`, a zero-phase low-pass\n FIR filter is applied, and then it is downsampled by the factor `down`.\n The resulting sample rate is ``up / down`` times the original sample\n rate. By default, values beyond the boundary of the signal are assumed\n to be zero during the filtering step.\n\n Parameters\n ----------\n x : array_like\n The data to be resampled.\n up : int\n The upsampling factor.\n down : int\n The downsampling factor.\n axis : int, optional\n The axis of `x` that is resampled. Default is 0.\n window : string, tuple, or array_like, optional\n Desired window to use to design the low-pass filter, or the FIR filter\n coefficients to employ. See below for details.\n padtype : string, optional\n `constant`, `line`, `mean`, `median`, `maximum`, `minimum` or any of\n the other signal extension modes supported by `scipy.signal.upfirdn`.\n Changes assumptions on values beyond the boundary. If `constant`,\n assumed to be `cval` (default zero). If `line` assumed to continue a\n linear trend defined by the first and last points. `mean`, `median`,\n `maximum` and `minimum` work as in `np.pad` and assume that the values\n beyond the boundary are the mean, median, maximum or minimum\n respectively of the array along the axis.\n\n .. versionadded:: 1.4.0\n cval : float, optional\n Value to use if `padtype='constant'`. Default is zero.\n\n .. versionadded:: 1.4.0\n\n Returns\n -------\n resampled_x : array\n The resampled array.\n\n See Also\n --------\n decimate : Downsample the signal after applying an FIR or IIR filter.\n resample : Resample up or down using the FFT method.\n\n Notes\n -----\n This polyphase method will likely be faster than the Fourier method\n in `scipy.signal.resample` when the number of samples is large and\n prime, or when the number of samples is large and `up` and `down`\n share a large greatest common denominator. The length of the FIR\n filter used will depend on ``max(up, down) // gcd(up, down)``, and\n the number of operations during polyphase filtering will depend on\n the filter length and `down` (see `scipy.signal.upfirdn` for details).\n\n The argument `window` specifies the FIR low-pass filter design.\n\n If `window` is an array_like it is assumed to be the FIR filter\n coefficients. Note that the FIR filter is applied after the upsampling\n step, so it should be designed to operate on a signal at a sampling\n frequency higher than the original by a factor of `up//gcd(up, down)`.\n This function's output will be centered with respect to this array, so it\n is best to pass a symmetric filter with an odd number of samples if, as\n is usually the case, a zero-phase filter is desired.\n\n For any other type of `window`, the functions `scipy.signal.get_window`\n and `scipy.signal.firwin` are called to generate the appropriate filter\n coefficients.\n\n The first sample of the returned vector is the same as the first\n sample of the input vector. The spacing between samples is changed\n from ``dx`` to ``dx * down / float(up)``.\n\n Examples\n --------\n By default, the end of the resampled data rises to meet the first\n sample of the next cycle for the FFT method, and gets closer to zero\n for the polyphase method:\n\n >>> from scipy import signal\n\n >>> x = np.linspace(0, 10, 20, endpoint=False)\n >>> y = np.cos(-x**2/6.0)\n >>> f_fft = signal.resample(y, 100)\n >>> f_poly = signal.resample_poly(y, 100, 20)\n >>> xnew = np.linspace(0, 10, 100, endpoint=False)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-')\n >>> plt.plot(x, y, 'ko-')\n >>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries\n >>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best')\n >>> plt.show()\n\n This default behaviour can be changed by using the padtype option:\n\n >>> import numpy as np\n >>> from scipy import signal\n\n >>> N = 5\n >>> x = np.linspace(0, 1, N, endpoint=False)\n >>> y = 2 + x**2 - 1.7*np.sin(x) + .2*np.cos(11*x)\n >>> y2 = 1 + x**3 + 0.1*np.sin(x) + .1*np.cos(11*x)\n >>> Y = np.stack([y, y2], axis=-1)\n >>> up = 4\n >>> xr = np.linspace(0, 1, N*up, endpoint=False)\n\n >>> y2 = signal.resample_poly(Y, up, 1, padtype='constant')\n >>> y3 = signal.resample_poly(Y, up, 1, padtype='mean')\n >>> y4 = signal.resample_poly(Y, up, 1, padtype='line')\n\n >>> import matplotlib.pyplot as plt\n >>> for i in [0,1]:\n ... plt.figure()\n ... plt.plot(xr, y4[:,i], 'g.', label='line')\n ... plt.plot(xr, y3[:,i], 'y.', label='mean')\n ... plt.plot(xr, y2[:,i], 'r.', label='constant')\n ... plt.plot(x, Y[:,i], 'k-')\n ... plt.legend()\n >>> plt.show()\n\n \"\"\"\n x = np.asarray(x)\n if up != int(up):\n raise ValueError(\"up must be an integer\")\n if down != int(down):\n raise ValueError(\"down must be an integer\")\n up = int(up)\n down = int(down)\n if up < 1 or down < 1:\n raise ValueError('up and down must be >= 1')\n if cval is not None and padtype != 'constant':\n raise ValueError('cval has no effect when padtype is ', padtype)\n\n # Determine our up and down factors\n # Use a rational approximation to save computation time on really long\n # signals\n g_ = math.gcd(up, down)\n up //= g_\n down //= g_\n if up == down == 1:\n return x.copy()\n n_in = x.shape[axis]\n n_out = n_in * up\n n_out = n_out // down + bool(n_out % down)\n\n if isinstance(window, (list, np.ndarray)):\n window = np.array(window) # use array to force a copy (we modify it)\n if window.ndim > 1:\n raise ValueError('window must be 1-D')\n half_len = (window.size - 1) // 2\n h = window\n else:\n # Design a linear-phase low-pass FIR filter\n max_rate = max(up, down)\n f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)\n half_len = 10 * max_rate # reasonable cutoff for sinc-like function\n h = firwin(2 * half_len + 1, f_c, window=window)\n h *= up\n\n # Zero-pad our filter to put the output samples at the center\n n_pre_pad = (down - half_len % down)\n n_post_pad = 0\n n_pre_remove = (half_len + n_pre_pad) // down\n # We should rarely need to do this given our filter lengths...\n while _output_len(len(h) + n_pre_pad + n_post_pad, n_in,\n up, down) < n_out + n_pre_remove:\n n_post_pad += 1\n h = np.concatenate((np.zeros(n_pre_pad, dtype=h.dtype), h,\n np.zeros(n_post_pad, dtype=h.dtype)))\n n_pre_remove_end = n_pre_remove + n_out\n\n # Remove background depending on the padtype option\n funcs = {'mean': np.mean, 'median': np.median,\n 'minimum': np.amin, 'maximum': np.amax}\n upfirdn_kwargs = {'mode': 'constant', 'cval': 0}\n if padtype in funcs:\n background_values = funcs[padtype](x, axis=axis, keepdims=True)\n elif padtype in _upfirdn_modes:\n upfirdn_kwargs = {'mode': padtype}\n if padtype == 'constant':\n if cval is None:\n cval = 0\n upfirdn_kwargs['cval'] = cval\n else:\n raise ValueError(\n 'padtype must be one of: maximum, mean, median, minimum, ' +\n ', '.join(_upfirdn_modes))\n\n if padtype in funcs:\n x = x - background_values\n\n # filter then remove excess\n y = upfirdn(h, x, up, down, axis=axis, **upfirdn_kwargs)\n keep = [slice(None), ]*x.ndim\n keep[axis] = slice(n_pre_remove, n_pre_remove_end)\n y_keep = y[tuple(keep)]\n\n # Add background back\n if padtype in funcs:\n y_keep += background_values\n\n return y_keep\n\n\ndef vectorstrength(events, period):\n '''\n Determine the vector strength of the events corresponding to the given\n period.\n\n The vector strength is a measure of phase synchrony, how well the\n timing of the events is synchronized to a single period of a periodic\n signal.\n\n If multiple periods are used, calculate the vector strength of each.\n This is called the \"resonating vector strength\".\n\n Parameters\n ----------\n events : 1D array_like\n An array of time points containing the timing of the events.\n period : float or array_like\n The period of the signal that the events should synchronize to.\n The period is in the same units as `events`. It can also be an array\n of periods, in which case the outputs are arrays of the same length.\n\n Returns\n -------\n strength : float or 1D array\n The strength of the synchronization. 1.0 is perfect synchronization\n and 0.0 is no synchronization. If `period` is an array, this is also\n an array with each element containing the vector strength at the\n corresponding period.\n phase : float or array\n The phase that the events are most strongly synchronized to in radians.\n If `period` is an array, this is also an array with each element\n containing the phase for the corresponding period.\n\n References\n ----------\n van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector\n strength: Auditory system, electric fish, and noise.\n Chaos 21, 047508 (2011);\n :doi:`10.1063/1.3670512`.\n van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:\n biological and mathematical perspectives. Biol Cybern.\n 2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`.\n van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens\n when we vary the \"probing\" frequency while keeping the spike times\n fixed. Biol Cybern. 2013 Aug;107(4):491-94.\n :doi:`10.1007/s00422-013-0560-8`.\n '''\n events = np.asarray(events)\n period = np.asarray(period)\n if events.ndim > 1:\n raise ValueError('events cannot have dimensions more than 1')\n if period.ndim > 1:\n raise ValueError('period cannot have dimensions more than 1')\n\n # we need to know later if period was originally a scalar\n scalarperiod = not period.ndim\n\n events = np.atleast_2d(events)\n period = np.atleast_2d(period)\n if (period <= 0).any():\n raise ValueError('periods must be positive')\n\n # this converts the times to vectors\n vectors = np.exp(np.dot(2j*np.pi/period.T, events))\n\n # the vector strength is just the magnitude of the mean of the vectors\n # the vector phase is the angle of the mean of the vectors\n vectormean = np.mean(vectors, axis=1)\n strength = abs(vectormean)\n phase = np.angle(vectormean)\n\n # if the original period was a scalar, return scalars\n if scalarperiod:\n strength = strength[0]\n phase = phase[0]\n return strength, phase\n\n\ndef detrend(data, axis=-1, type='linear', bp=0, overwrite_data=False):\n \"\"\"\n Remove linear trend along axis from data.\n\n Parameters\n ----------\n data : array_like\n The input data.\n axis : int, optional\n The axis along which to detrend the data. By default this is the\n last axis (-1).\n type : {'linear', 'constant'}, optional\n The type of detrending. If ``type == 'linear'`` (default),\n the result of a linear least-squares fit to `data` is subtracted\n from `data`.\n If ``type == 'constant'``, only the mean of `data` is subtracted.\n bp : array_like of ints, optional\n A sequence of break points. If given, an individual linear fit is\n performed for each part of `data` between two break points.\n Break points are specified as indices into `data`. This parameter\n only has an effect when ``type == 'linear'``.\n overwrite_data : bool, optional\n If True, perform in place detrending and avoid a copy. Default is False\n\n Returns\n -------\n ret : ndarray\n The detrended input data.\n\n Examples\n --------\n >>> from scipy import signal\n >>> randgen = np.random.RandomState(9)\n >>> npoints = 1000\n >>> noise = randgen.randn(npoints)\n >>> x = 3 + 2*np.linspace(0, 1, npoints) + noise\n >>> (signal.detrend(x) - noise).max() < 0.01\n True\n\n \"\"\"\n if type not in ['linear', 'l', 'constant', 'c']:\n raise ValueError(\"Trend type must be 'linear' or 'constant'.\")\n data = np.asarray(data)\n dtype = data.dtype.char\n if dtype not in 'dfDF':\n dtype = 'd'\n if type in ['constant', 'c']:\n ret = data - np.expand_dims(np.mean(data, axis), axis)\n return ret\n else:\n dshape = data.shape\n N = dshape[axis]\n bp = np.sort(np.unique(np.r_[0, bp, N]))\n if np.any(bp > N):\n raise ValueError(\"Breakpoints must be less than length \"\n \"of data along given axis.\")\n Nreg = len(bp) - 1\n # Restructure data so that axis is along first dimension and\n # all other dimensions are collapsed into second dimension\n rnk = len(dshape)\n if axis < 0:\n axis = axis + rnk\n newdims = np.r_[axis, 0:axis, axis + 1:rnk]\n newdata = np.reshape(np.transpose(data, tuple(newdims)),\n (N, _prod(dshape) // N))\n if not overwrite_data:\n newdata = newdata.copy() # make sure we have a copy\n if newdata.dtype.char not in 'dfDF':\n newdata = newdata.astype(dtype)\n # Find leastsq fit and remove it for each piece\n for m in range(Nreg):\n Npts = bp[m + 1] - bp[m]\n A = np.ones((Npts, 2), dtype)\n A[:, 0] = np.cast[dtype](np.arange(1, Npts + 1) * 1.0 / Npts)\n sl = slice(bp[m], bp[m + 1])\n coef, resids, rank, s = linalg.lstsq(A, newdata[sl])\n newdata[sl] = newdata[sl] - np.dot(A, coef)\n # Put data back in original shape.\n tdshape = np.take(dshape, newdims, 0)\n ret = np.reshape(newdata, tuple(tdshape))\n vals = list(range(1, rnk))\n olddims = vals[:axis] + [0] + vals[axis:]\n ret = np.transpose(ret, tuple(olddims))\n return ret\n\n\ndef lfilter_zi(b, a):\n \"\"\"\n Construct initial conditions for lfilter for step response steady-state.\n\n Compute an initial state `zi` for the `lfilter` function that corresponds\n to the steady state of the step response.\n\n A typical use of this function is to set the initial state so that the\n output of the filter starts at the same value as the first element of\n the signal to be filtered.\n\n Parameters\n ----------\n b, a : array_like (1-D)\n The IIR filter coefficients. See `lfilter` for more\n information.\n\n Returns\n -------\n zi : 1-D ndarray\n The initial state for the filter.\n\n See Also\n --------\n lfilter, lfiltic, filtfilt\n\n Notes\n -----\n A linear filter with order m has a state space representation (A, B, C, D),\n for which the output y of the filter can be expressed as::\n\n z(n+1) = A*z(n) + B*x(n)\n y(n) = C*z(n) + D*x(n)\n\n where z(n) is a vector of length m, A has shape (m, m), B has shape\n (m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is\n a scalar). lfilter_zi solves::\n\n zi = A*zi + B\n\n In other words, it finds the initial condition for which the response\n to an input of all ones is a constant.\n\n Given the filter coefficients `a` and `b`, the state space matrices\n for the transposed direct form II implementation of the linear filter,\n which is the implementation used by scipy.signal.lfilter, are::\n\n A = scipy.linalg.companion(a).T\n B = b[1:] - a[1:]*b[0]\n\n assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first\n divided by a[0].\n\n Examples\n --------\n The following code creates a lowpass Butterworth filter. Then it\n applies that filter to an array whose values are all 1.0; the\n output is also all 1.0, as expected for a lowpass filter. If the\n `zi` argument of `lfilter` had not been given, the output would have\n shown the transient signal.\n\n >>> from numpy import array, ones\n >>> from scipy.signal import lfilter, lfilter_zi, butter\n >>> b, a = butter(5, 0.25)\n >>> zi = lfilter_zi(b, a)\n >>> y, zo = lfilter(b, a, ones(10), zi=zi)\n >>> y\n array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])\n\n Another example:\n\n >>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])\n >>> y, zf = lfilter(b, a, x, zi=zi*x[0])\n >>> y\n array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,\n 0.44399389, 0.35505241])\n\n Note that the `zi` argument to `lfilter` was computed using\n `lfilter_zi` and scaled by `x[0]`. Then the output `y` has no\n transient until the input drops from 0.5 to 0.0.\n\n \"\"\"\n\n # FIXME: Can this function be replaced with an appropriate\n # use of lfiltic? For example, when b,a = butter(N,Wn),\n # lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).\n #\n\n # We could use scipy.signal.normalize, but it uses warnings in\n # cases where a ValueError is more appropriate, and it allows\n # b to be 2D.\n b = np.atleast_1d(b)\n if b.ndim != 1:\n raise ValueError(\"Numerator b must be 1-D.\")\n a = np.atleast_1d(a)\n if a.ndim != 1:\n raise ValueError(\"Denominator a must be 1-D.\")\n\n while len(a) > 1 and a[0] == 0.0:\n a = a[1:]\n if a.size < 1:\n raise ValueError(\"There must be at least one nonzero `a` coefficient.\")\n\n if a[0] != 1.0:\n # Normalize the coefficients so a[0] == 1.\n b = b / a[0]\n a = a / a[0]\n\n n = max(len(a), len(b))\n\n # Pad a or b with zeros so they are the same length.\n if len(a) < n:\n a = np.r_[a, np.zeros(n - len(a))]\n elif len(b) < n:\n b = np.r_[b, np.zeros(n - len(b))]\n\n IminusA = np.eye(n - 1, dtype=np.result_type(a, b)) - linalg.companion(a).T\n B = b[1:] - a[1:] * b[0]\n # Solve zi = A*zi + B\n zi = np.linalg.solve(IminusA, B)\n\n # For future reference: we could also use the following\n # explicit formulas to solve the linear system:\n #\n # zi = np.zeros(n - 1)\n # zi[0] = B.sum() / IminusA[:,0].sum()\n # asum = 1.0\n # csum = 0.0\n # for k in range(1,n-1):\n # asum += a[k]\n # csum += b[k] - a[k]*b[0]\n # zi[k] = asum*zi[0] - csum\n\n return zi\n\n\ndef sosfilt_zi(sos):\n \"\"\"\n Construct initial conditions for sosfilt for step response steady-state.\n\n Compute an initial state `zi` for the `sosfilt` function that corresponds\n to the steady state of the step response.\n\n A typical use of this function is to set the initial state so that the\n output of the filter starts at the same value as the first element of\n the signal to be filtered.\n\n Parameters\n ----------\n sos : array_like\n Array of second-order filter coefficients, must have shape\n ``(n_sections, 6)``. See `sosfilt` for the SOS filter format\n specification.\n\n Returns\n -------\n zi : ndarray\n Initial conditions suitable for use with ``sosfilt``, shape\n ``(n_sections, 2)``.\n\n See Also\n --------\n sosfilt, zpk2sos\n\n Notes\n -----\n .. versionadded:: 0.16.0\n\n Examples\n --------\n Filter a rectangular pulse that begins at time 0, with and without\n the use of the `zi` argument of `scipy.signal.sosfilt`.\n\n >>> from scipy import signal\n >>> import matplotlib.pyplot as plt\n\n >>> sos = signal.butter(9, 0.125, output='sos')\n >>> zi = signal.sosfilt_zi(sos)\n >>> x = (np.arange(250) < 100).astype(int)\n >>> f1 = signal.sosfilt(sos, x)\n >>> f2, zo = signal.sosfilt(sos, x, zi=zi)\n\n >>> plt.plot(x, 'k--', label='x')\n >>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')\n >>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')\n >>> plt.legend(loc='best')\n >>> plt.show()\n\n \"\"\"\n sos = np.asarray(sos)\n if sos.ndim != 2 or sos.shape[1] != 6:\n raise ValueError('sos must be shape (n_sections, 6)')\n\n if sos.dtype.kind in 'bui':\n sos = sos.astype(np.float64)\n\n n_sections = sos.shape[0]\n zi = np.empty((n_sections, 2), dtype=sos.dtype)\n scale = 1.0\n for section in range(n_sections):\n b = sos[section, :3]\n a = sos[section, 3:]\n zi[section] = scale * lfilter_zi(b, a)\n # If H(z) = B(z)/A(z) is this section's transfer function, then\n # b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady\n # state value of this section's step response.\n scale *= b.sum() / a.sum()\n\n return zi\n\n\ndef _filtfilt_gust(b, a, x, axis=-1, irlen=None):\n \"\"\"Forward-backward IIR filter that uses Gustafsson's method.\n\n Apply the IIR filter defined by `(b,a)` to `x` twice, first forward\n then backward, using Gustafsson's initial conditions [1]_.\n\n Let ``y_fb`` be the result of filtering first forward and then backward,\n and let ``y_bf`` be the result of filtering first backward then forward.\n Gustafsson's method is to compute initial conditions for the forward\n pass and the backward pass such that ``y_fb == y_bf``.\n\n Parameters\n ----------\n b : scalar or 1-D ndarray\n Numerator coefficients of the filter.\n a : scalar or 1-D ndarray\n Denominator coefficients of the filter.\n x : ndarray\n Data to be filtered.\n axis : int, optional\n Axis of `x` to be filtered. Default is -1.\n irlen : int or None, optional\n The length of the nonnegligible part of the impulse response.\n If `irlen` is None, or if the length of the signal is less than\n ``2 * irlen``, then no part of the impulse response is ignored.\n\n Returns\n -------\n y : ndarray\n The filtered data.\n x0 : ndarray\n Initial condition for the forward filter.\n x1 : ndarray\n Initial condition for the backward filter.\n\n Notes\n -----\n Typically the return values `x0` and `x1` are not needed by the\n caller. The intended use of these return values is in unit tests.\n\n References\n ----------\n .. [1] F. Gustaffson. Determining the initial states in forward-backward\n filtering. Transactions on Signal Processing, 46(4):988-992, 1996.\n\n \"\"\"\n # In the comments, \"Gustafsson's paper\" and [1] refer to the\n # paper referenced in the docstring.\n\n b = np.atleast_1d(b)\n a = np.atleast_1d(a)\n\n order = max(len(b), len(a)) - 1\n if order == 0:\n # The filter is just scalar multiplication, with no state.\n scale = (b[0] / a[0])**2\n y = scale * x\n return y, np.array([]), np.array([])\n\n if axis != -1 or axis != x.ndim - 1:\n # Move the axis containing the data to the end.\n x = np.swapaxes(x, axis, x.ndim - 1)\n\n # n is the number of samples in the data to be filtered.\n n = x.shape[-1]\n\n if irlen is None or n <= 2*irlen:\n m = n\n else:\n m = irlen\n\n # Create Obs, the observability matrix (called O in the paper).\n # This matrix can be interpreted as the operator that propagates\n # an arbitrary initial state to the output, assuming the input is\n # zero.\n # In Gustafsson's paper, the forward and backward filters are not\n # necessarily the same, so he has both O_f and O_b. We use the same\n # filter in both directions, so we only need O. The same comment\n # applies to S below.\n Obs = np.zeros((m, order))\n zi = np.zeros(order)\n zi[0] = 1\n Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]\n for k in range(1, order):\n Obs[k:, k] = Obs[:-k, 0]\n\n # Obsr is O^R (Gustafsson's notation for row-reversed O)\n Obsr = Obs[::-1]\n\n # Create S. S is the matrix that applies the filter to the reversed\n # propagated initial conditions. That is,\n # out = S.dot(zi)\n # is the same as\n # tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.\n # out = lfilter(b, a, tmp[::-1]) # Reverse and filter.\n\n # Equations (5) & (6) of [1]\n S = lfilter(b, a, Obs[::-1], axis=0)\n\n # Sr is S^R (row-reversed S)\n Sr = S[::-1]\n\n # M is [(S^R - O), (O^R - S)]\n if m == n:\n M = np.hstack((Sr - Obs, Obsr - S))\n else:\n # Matrix described in section IV of [1].\n M = np.zeros((2*m, 2*order))\n M[:m, :order] = Sr - Obs\n M[m:, order:] = Obsr - S\n\n # Naive forward-backward and backward-forward filters.\n # These have large transients because the filters use zero initial\n # conditions.\n y_f = lfilter(b, a, x)\n y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]\n\n y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]\n y_bf = lfilter(b, a, y_b)\n\n delta_y_bf_fb = y_bf - y_fb\n if m == n:\n delta = delta_y_bf_fb\n else:\n start_m = delta_y_bf_fb[..., :m]\n end_m = delta_y_bf_fb[..., -m:]\n delta = np.concatenate((start_m, end_m), axis=-1)\n\n # ic_opt holds the \"optimal\" initial conditions.\n # The following code computes the result shown in the formula\n # of the paper between equations (6) and (7).\n if delta.ndim == 1:\n ic_opt = linalg.lstsq(M, delta)[0]\n else:\n # Reshape delta so it can be used as an array of multiple\n # right-hand-sides in linalg.lstsq.\n delta2d = delta.reshape(-1, delta.shape[-1]).T\n ic_opt0 = linalg.lstsq(M, delta2d)[0].T\n ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))\n\n # Now compute the filtered signal using equation (7) of [1].\n # First, form [S^R, O^R] and call it W.\n if m == n:\n W = np.hstack((Sr, Obsr))\n else:\n W = np.zeros((2*m, 2*order))\n W[:m, :order] = Sr\n W[m:, order:] = Obsr\n\n # Equation (7) of [1] says\n # Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]\n # `wic` is (almost) the product on the right.\n # W has shape (m, 2*order), and ic_opt has shape (..., 2*order),\n # so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,\n # so wic has shape (..., m).\n wic = ic_opt.dot(W.T)\n\n # `wic` is \"almost\" the product of W and the optimal ICs in equation\n # (7)--if we're using a truncated impulse response (m < n), `wic`\n # contains only the adjustments required for the ends of the signal.\n # Here we form y_opt, taking this into account if necessary.\n y_opt = y_fb\n if m == n:\n y_opt += wic\n else:\n y_opt[..., :m] += wic[..., :m]\n y_opt[..., -m:] += wic[..., -m:]\n\n x0 = ic_opt[..., :order]\n x1 = ic_opt[..., -order:]\n if axis != -1 or axis != x.ndim - 1:\n # Restore the data axis to its original position.\n x0 = np.swapaxes(x0, axis, x.ndim - 1)\n x1 = np.swapaxes(x1, axis, x.ndim - 1)\n y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)\n\n return y_opt, x0, x1\n\n\ndef filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',\n irlen=None):\n \"\"\"\n Apply a digital filter forward and backward to a signal.\n\n This function applies a linear digital filter twice, once forward and\n once backwards. The combined filter has zero phase and a filter order\n twice that of the original.\n\n The function provides options for handling the edges of the signal.\n\n The function `sosfiltfilt` (and filter design using ``output='sos'``)\n should be preferred over `filtfilt` for most filtering tasks, as\n second-order sections have fewer numerical problems.\n\n Parameters\n ----------\n b : (N,) array_like\n The numerator coefficient vector of the filter.\n a : (N,) array_like\n The denominator coefficient vector of the filter. If ``a[0]``\n is not 1, then both `a` and `b` are normalized by ``a[0]``.\n x : array_like\n The array of data to be filtered.\n axis : int, optional\n The axis of `x` to which the filter is applied.\n Default is -1.\n padtype : str or None, optional\n Must be 'odd', 'even', 'constant', or None. This determines the\n type of extension to use for the padded signal to which the filter\n is applied. If `padtype` is None, no padding is used. The default\n is 'odd'.\n padlen : int or None, optional\n The number of elements by which to extend `x` at both ends of\n `axis` before applying the filter. This value must be less than\n ``x.shape[axis] - 1``. ``padlen=0`` implies no padding.\n The default value is ``3 * max(len(a), len(b))``.\n method : str, optional\n Determines the method for handling the edges of the signal, either\n \"pad\" or \"gust\". When `method` is \"pad\", the signal is padded; the\n type of padding is determined by `padtype` and `padlen`, and `irlen`\n is ignored. When `method` is \"gust\", Gustafsson's method is used,\n and `padtype` and `padlen` are ignored.\n irlen : int or None, optional\n When `method` is \"gust\", `irlen` specifies the length of the\n impulse response of the filter. If `irlen` is None, no part\n of the impulse response is ignored. For a long signal, specifying\n `irlen` can significantly improve the performance of the filter.\n\n Returns\n -------\n y : ndarray\n The filtered output with the same shape as `x`.\n\n See Also\n --------\n sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt\n\n Notes\n -----\n When `method` is \"pad\", the function pads the data along the given axis\n in one of three ways: odd, even or constant. The odd and even extensions\n have the corresponding symmetry about the end point of the data. The\n constant extension extends the data with the values at the end points. On\n both the forward and backward passes, the initial condition of the\n filter is found by using `lfilter_zi` and scaling it by the end point of\n the extended data.\n\n When `method` is \"gust\", Gustafsson's method [1]_ is used. Initial\n conditions are chosen for the forward and backward passes so that the\n forward-backward filter gives the same result as the backward-forward\n filter.\n\n The option to use Gustaffson's method was added in scipy version 0.16.0.\n\n References\n ----------\n .. [1] F. Gustaffson, \"Determining the initial states in forward-backward\n filtering\", Transactions on Signal Processing, Vol. 46, pp. 988-992,\n 1996.\n\n Examples\n --------\n The examples will use several functions from `scipy.signal`.\n\n >>> from scipy import signal\n >>> import matplotlib.pyplot as plt\n\n First we create a one second signal that is the sum of two pure sine\n waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.\n\n >>> t = np.linspace(0, 1.0, 2001)\n >>> xlow = np.sin(2 * np.pi * 5 * t)\n >>> xhigh = np.sin(2 * np.pi * 250 * t)\n >>> x = xlow + xhigh\n\n Now create a lowpass Butterworth filter with a cutoff of 0.125 times\n the Nyquist frequency, or 125 Hz, and apply it to ``x`` with `filtfilt`.\n The result should be approximately ``xlow``, with no phase shift.\n\n >>> b, a = signal.butter(8, 0.125)\n >>> y = signal.filtfilt(b, a, x, padlen=150)\n >>> np.abs(y - xlow).max()\n 9.1086182074789912e-06\n\n We get a fairly clean result for this artificial example because\n the odd extension is exact, and with the moderately long padding,\n the filter's transients have dissipated by the time the actual data\n is reached. In general, transient effects at the edges are\n unavoidable.\n\n The following example demonstrates the option ``method=\"gust\"``.\n\n First, create a filter.\n\n >>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.\n >>> np.random.seed(123456)\n\n `sig` is a random input signal to be filtered.\n\n >>> n = 60\n >>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()\n\n Apply `filtfilt` to `sig`, once using the Gustafsson method, and\n once using padding, and plot the results for comparison.\n\n >>> fgust = signal.filtfilt(b, a, sig, method=\"gust\")\n >>> fpad = signal.filtfilt(b, a, sig, padlen=50)\n >>> plt.plot(sig, 'k-', label='input')\n >>> plt.plot(fgust, 'b-', linewidth=4, label='gust')\n >>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')\n >>> plt.legend(loc='best')\n >>> plt.show()\n\n The `irlen` argument can be used to improve the performance\n of Gustafsson's method.\n\n Estimate the impulse response length of the filter.\n\n >>> z, p, k = signal.tf2zpk(b, a)\n >>> eps = 1e-9\n >>> r = np.max(np.abs(p))\n >>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))\n >>> approx_impulse_len\n 137\n\n Apply the filter to a longer signal, with and without the `irlen`\n argument. The difference between `y1` and `y2` is small. For long\n signals, using `irlen` gives a significant performance improvement.\n\n >>> x = np.random.randn(5000)\n >>> y1 = signal.filtfilt(b, a, x, method='gust')\n >>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)\n >>> print(np.max(np.abs(y1 - y2)))\n 1.80056858312e-10\n\n \"\"\"\n b = np.atleast_1d(b)\n a = np.atleast_1d(a)\n x = np.asarray(x)\n\n if method not in [\"pad\", \"gust\"]:\n raise ValueError(\"method must be 'pad' or 'gust'.\")\n\n if method == \"gust\":\n y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)\n return y\n\n # method == \"pad\"\n edge, ext = _validate_pad(padtype, padlen, x, axis,\n ntaps=max(len(a), len(b)))\n\n # Get the steady state of the filter's step response.\n zi = lfilter_zi(b, a)\n\n # Reshape zi and create x0 so that zi*x0 broadcasts\n # to the correct value for the 'zi' keyword argument\n # to lfilter.\n zi_shape = [1] * x.ndim\n zi_shape[axis] = zi.size\n zi = np.reshape(zi, zi_shape)\n x0 = axis_slice(ext, stop=1, axis=axis)\n\n # Forward filter.\n (y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)\n\n # Backward filter.\n # Create y0 so zi*y0 broadcasts appropriately.\n y0 = axis_slice(y, start=-1, axis=axis)\n (y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)\n\n # Reverse y.\n y = axis_reverse(y, axis=axis)\n\n if edge > 0:\n # Slice the actual signal from the extended signal.\n y = axis_slice(y, start=edge, stop=-edge, axis=axis)\n\n return y\n\n\ndef _validate_pad(padtype, padlen, x, axis, ntaps):\n \"\"\"Helper to validate padding for filtfilt\"\"\"\n if padtype not in ['even', 'odd', 'constant', None]:\n raise ValueError((\"Unknown value '%s' given to padtype. padtype \"\n \"must be 'even', 'odd', 'constant', or None.\") %\n padtype)\n\n if padtype is None:\n padlen = 0\n\n if padlen is None:\n # Original padding; preserved for backwards compatibility.\n edge = ntaps * 3\n else:\n edge = padlen\n\n # x's 'axis' dimension must be bigger than edge.\n if x.shape[axis] <= edge:\n raise ValueError(\"The length of the input vector x must be greater \"\n \"than padlen, which is %d.\" % edge)\n\n if padtype is not None and edge > 0:\n # Make an extension of length `edge` at each\n # end of the input array.\n if padtype == 'even':\n ext = even_ext(x, edge, axis=axis)\n elif padtype == 'odd':\n ext = odd_ext(x, edge, axis=axis)\n else:\n ext = const_ext(x, edge, axis=axis)\n else:\n ext = x\n return edge, ext\n\n\ndef _validate_x(x):\n x = np.asarray(x)\n if x.ndim == 0:\n raise ValueError('x must be at least 1-D')\n return x\n\n\ndef sosfilt(sos, x, axis=-1, zi=None):\n \"\"\"\n Filter data along one dimension using cascaded second-order sections.\n\n Filter a data sequence, `x`, using a digital IIR filter defined by\n `sos`.\n\n Parameters\n ----------\n sos : array_like\n Array of second-order filter coefficients, must have shape\n ``(n_sections, 6)``. Each row corresponds to a second-order\n section, with the first three columns providing the numerator\n coefficients and the last three providing the denominator\n coefficients.\n x : array_like\n An N-dimensional input array.\n axis : int, optional\n The axis of the input data array along which to apply the\n linear filter. The filter is applied to each subarray along\n this axis. Default is -1.\n zi : array_like, optional\n Initial conditions for the cascaded filter delays. It is a (at\n least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where\n ``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``\n replaced by 2. If `zi` is None or is not given then initial rest\n (i.e. all zeros) is assumed.\n Note that these initial conditions are *not* the same as the initial\n conditions given by `lfiltic` or `lfilter_zi`.\n\n Returns\n -------\n y : ndarray\n The output of the digital filter.\n zf : ndarray, optional\n If `zi` is None, this is not returned, otherwise, `zf` holds the\n final filter delay values.\n\n See Also\n --------\n zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz\n\n Notes\n -----\n The filter function is implemented as a series of second-order filters\n with direct-form II transposed structure. It is designed to minimize\n numerical precision errors for high-order filters.\n\n .. versionadded:: 0.16.0\n\n Examples\n --------\n Plot a 13th-order filter's impulse response using both `lfilter` and\n `sosfilt`, showing the instability that results from trying to do a\n 13th-order filter in a single stage (the numerical error pushes some poles\n outside of the unit circle):\n\n >>> import matplotlib.pyplot as plt\n >>> from scipy import signal\n >>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')\n >>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')\n >>> x = signal.unit_impulse(700)\n >>> y_tf = signal.lfilter(b, a, x)\n >>> y_sos = signal.sosfilt(sos, x)\n >>> plt.plot(y_tf, 'r', label='TF')\n >>> plt.plot(y_sos, 'k', label='SOS')\n >>> plt.legend(loc='best')\n >>> plt.show()\n\n \"\"\"\n x = _validate_x(x)\n sos, n_sections = _validate_sos(sos)\n x_zi_shape = list(x.shape)\n x_zi_shape[axis] = 2\n x_zi_shape = tuple([n_sections] + x_zi_shape)\n inputs = [sos, x]\n if zi is not None:\n inputs.append(np.asarray(zi))\n dtype = np.result_type(*inputs)\n if dtype.char not in 'fdgFDGO':\n raise NotImplementedError(\"input type '%s' not supported\" % dtype)\n if zi is not None:\n zi = np.array(zi, dtype) # make a copy so that we can operate in place\n if zi.shape != x_zi_shape:\n raise ValueError('Invalid zi shape. With axis=%r, an input with '\n 'shape %r, and an sos array with %d sections, zi '\n 'must have shape %r, got %r.' %\n (axis, x.shape, n_sections, x_zi_shape, zi.shape))\n return_zi = True\n else:\n zi = np.zeros(x_zi_shape, dtype=dtype)\n return_zi = False\n axis = axis % x.ndim # make positive\n x = np.moveaxis(x, axis, -1)\n zi = np.moveaxis(zi, [0, axis + 1], [-2, -1])\n x_shape, zi_shape = x.shape, zi.shape\n x = np.reshape(x, (-1, x.shape[-1]))\n x = np.array(x, dtype, order='C') # make a copy, can modify in place\n zi = np.ascontiguousarray(np.reshape(zi, (-1, n_sections, 2)))\n sos = sos.astype(dtype, copy=False)\n _sosfilt(sos, x, zi)\n x.shape = x_shape\n x = np.moveaxis(x, -1, axis)\n if return_zi:\n zi.shape = zi_shape\n zi = np.moveaxis(zi, [-2, -1], [0, axis + 1])\n out = (x, zi)\n else:\n out = x\n return out\n\n\ndef sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None):\n \"\"\"\n A forward-backward digital filter using cascaded second-order sections.\n\n See `filtfilt` for more complete information about this method.\n\n Parameters\n ----------\n sos : array_like\n Array of second-order filter coefficients, must have shape\n ``(n_sections, 6)``. Each row corresponds to a second-order\n section, with the first three columns providing the numerator\n coefficients and the last three providing the denominator\n coefficients.\n x : array_like\n The array of data to be filtered.\n axis : int, optional\n The axis of `x` to which the filter is applied.\n Default is -1.\n padtype : str or None, optional\n Must be 'odd', 'even', 'constant', or None. This determines the\n type of extension to use for the padded signal to which the filter\n is applied. If `padtype` is None, no padding is used. The default\n is 'odd'.\n padlen : int or None, optional\n The number of elements by which to extend `x` at both ends of\n `axis` before applying the filter. This value must be less than\n ``x.shape[axis] - 1``. ``padlen=0`` implies no padding.\n The default value is::\n\n 3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(),\n (sos[:, 5] == 0).sum()))\n\n The extra subtraction at the end attempts to compensate for poles\n and zeros at the origin (e.g. for odd-order filters) to yield\n equivalent estimates of `padlen` to those of `filtfilt` for\n second-order section filters built with `scipy.signal` functions.\n\n Returns\n -------\n y : ndarray\n The filtered output with the same shape as `x`.\n\n See Also\n --------\n filtfilt, sosfilt, sosfilt_zi, sosfreqz\n\n Notes\n -----\n .. versionadded:: 0.18.0\n\n Examples\n --------\n >>> from scipy.signal import sosfiltfilt, butter\n >>> import matplotlib.pyplot as plt\n\n Create an interesting signal to filter.\n\n >>> n = 201\n >>> t = np.linspace(0, 1, n)\n >>> np.random.seed(123)\n >>> x = 1 + (t < 0.5) - 0.25*t**2 + 0.05*np.random.randn(n)\n\n Create a lowpass Butterworth filter, and use it to filter `x`.\n\n >>> sos = butter(4, 0.125, output='sos')\n >>> y = sosfiltfilt(sos, x)\n\n For comparison, apply an 8th order filter using `sosfilt`. The filter\n is initialized using the mean of the first four values of `x`.\n\n >>> from scipy.signal import sosfilt, sosfilt_zi\n >>> sos8 = butter(8, 0.125, output='sos')\n >>> zi = x[:4].mean() * sosfilt_zi(sos8)\n >>> y2, zo = sosfilt(sos8, x, zi=zi)\n\n Plot the results. Note that the phase of `y` matches the input, while\n `y2` has a significant phase delay.\n\n >>> plt.plot(t, x, alpha=0.5, label='x(t)')\n >>> plt.plot(t, y, label='y(t)')\n >>> plt.plot(t, y2, label='y2(t)')\n >>> plt.legend(framealpha=1, shadow=True)\n >>> plt.grid(alpha=0.25)\n >>> plt.xlabel('t')\n >>> plt.show()\n\n \"\"\"\n sos, n_sections = _validate_sos(sos)\n x = _validate_x(x)\n\n # `method` is \"pad\"...\n ntaps = 2 * n_sections + 1\n ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())\n edge, ext = _validate_pad(padtype, padlen, x, axis,\n ntaps=ntaps)\n\n # These steps follow the same form as filtfilt with modifications\n zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...)\n zi_shape = [1] * x.ndim\n zi_shape[axis] = 2\n zi.shape = [n_sections] + zi_shape\n x_0 = axis_slice(ext, stop=1, axis=axis)\n (y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0)\n y_0 = axis_slice(y, start=-1, axis=axis)\n (y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0)\n y = axis_reverse(y, axis=axis)\n if edge > 0:\n y = axis_slice(y, start=edge, stop=-edge, axis=axis)\n return y\n\n\ndef decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True):\n \"\"\"\n Downsample the signal after applying an anti-aliasing filter.\n\n By default, an order 8 Chebyshev type I filter is used. A 30 point FIR\n filter with Hamming window is used if `ftype` is 'fir'.\n\n Parameters\n ----------\n x : array_like\n The signal to be downsampled, as an N-dimensional array.\n q : int\n The downsampling factor. When using IIR downsampling, it is recommended\n to call `decimate` multiple times for downsampling factors higher than\n 13.\n n : int, optional\n The order of the filter (1 less than the length for 'fir'). Defaults to\n 8 for 'iir' and 20 times the downsampling factor for 'fir'.\n ftype : str {'iir', 'fir'} or ``dlti`` instance, optional\n If 'iir' or 'fir', specifies the type of lowpass filter. If an instance\n of an `dlti` object, uses that object to filter before downsampling.\n axis : int, optional\n The axis along which to decimate.\n zero_phase : bool, optional\n Prevent phase shift by filtering with `filtfilt` instead of `lfilter`\n when using an IIR filter, and shifting the outputs back by the filter's\n group delay when using an FIR filter. The default value of ``True`` is\n recommended, since a phase shift is generally not desired.\n\n .. versionadded:: 0.18.0\n\n Returns\n -------\n y : ndarray\n The down-sampled signal.\n\n See Also\n --------\n resample : Resample up or down using the FFT method.\n resample_poly : Resample using polyphase filtering and an FIR filter.\n\n Notes\n -----\n The ``zero_phase`` keyword was added in 0.18.0.\n The possibility to use instances of ``dlti`` as ``ftype`` was added in\n 0.18.0.\n \"\"\"\n\n x = np.asarray(x)\n q = operator.index(q)\n\n if n is not None:\n n = operator.index(n)\n\n if ftype == 'fir':\n if n is None:\n half_len = 10 * q # reasonable cutoff for our sinc-like function\n n = 2 * half_len\n b, a = firwin(n+1, 1. / q, window='hamming'), 1.\n elif ftype == 'iir':\n if n is None:\n n = 8\n system = dlti(*cheby1(n, 0.05, 0.8 / q))\n b, a = system.num, system.den\n elif isinstance(ftype, dlti):\n system = ftype._as_tf() # Avoids copying if already in TF form\n b, a = system.num, system.den\n else:\n raise ValueError('invalid ftype')\n\n result_type = x.dtype\n if result_type.kind in 'bui':\n result_type = np.float64\n b = np.asarray(b, dtype=result_type)\n a = np.asarray(a, dtype=result_type)\n\n sl = [slice(None)] * x.ndim\n a = np.asarray(a)\n\n if a.size == 1: # FIR case\n b = b / a\n if zero_phase:\n y = resample_poly(x, 1, q, axis=axis, window=b)\n else:\n # upfirdn is generally faster than lfilter by a factor equal to the\n # downsampling factor, since it only calculates the needed outputs\n n_out = x.shape[axis] // q + bool(x.shape[axis] % q)\n y = upfirdn(b, x, up=1, down=q, axis=axis)\n sl[axis] = slice(None, n_out, None)\n\n else: # IIR case\n if zero_phase:\n y = filtfilt(b, a, x, axis=axis)\n else:\n y = lfilter(b, a, x, axis=axis)\n sl[axis] = slice(None, None, q)\n\n return y[tuple(sl)]\n", "'''What's the origin of this file? It is not ours.\nDoes not run because of missing mtx files, now included\n\nchanges: JP corrections to imports so it runs, comment out print\n'''\nimport numpy as np\nfrom numpy import dot, outer, random\nfrom scipy import io, linalg, optimize\nfrom scipy.sparse import eye as speye\nimport matplotlib.pyplot as plt\n\ndef R(v):\n rq = dot(v.T,A*v)/dot(v.T,B*v)\n res = (A*v-rq*B*v)/linalg.norm(B*v)\n data.append(linalg.norm(res))\n return rq\n\ndef Rp(v):\n \"\"\" Gradient \"\"\"\n result = 2*(A*v-R(v)*B*v)/dot(v.T,B*v)\n #print \"Rp: \", result\n return result\n\ndef Rpp(v):\n \"\"\" Hessian \"\"\"\n result = 2*(A-R(v)*B-outer(B*v,Rp(v))-outer(Rp(v),B*v))/dot(v.T,B*v)\n #print \"Rpp: \", result\n return result\n\n\nA = io.mmread('nos4.mtx') # clustered eigenvalues\n#B = io.mmread('bcsstm02.mtx.gz')\n#A = io.mmread('bcsstk06.mtx.gz') # clustered eigenvalues\n#B = io.mmread('bcsstm06.mtx.gz')\nn = A.shape[0]\nB = speye(n,n)\nrandom.seed(1)\nv_0=random.rand(n)\n\nprint(\"try fmin_bfgs\")\nfull_output = 1\ndata=[]\nv,fopt, gopt, Hopt, func_calls, grad_calls, warnflag, allvecs = \\\n optimize.fmin_bfgs(R,v_0,fprime=Rp,full_output=full_output,retall=1)\nif warnflag == 0:\n plt.semilogy(np.arange(0,len(data)),data)\n print('Rayleigh quotient BFGS',R(v))\n\n\nprint(\"fmin_bfgs OK\")\n\nprint(\"try fmin_ncg\")\n\n#\n# WARNING: the program may hangs if fmin_ncg is used\n#\ndata=[]\nv,fopt, fcalls, gcalls, hcalls, warnflag, allvecs = \\\n optimize.fmin_ncg(R,v_0,fprime=Rp,fhess=Rpp,full_output=full_output,retall=1)\nif warnflag==0:\n plt.figure()\n plt.semilogy(np.arange(0,len(data)),data)\n print('Rayleigh quotient NCG',R(v))\n", "\"\"\"\nGeneralized linear models currently supports estimation using the one-parameter\nexponential families\n\nReferences\n----------\nGill, Jeff. 2000. Generalized Linear Models: A Unified Approach.\n SAGE QASS Series.\n\nGreen, PJ. 1984. \"Iteratively reweighted least squares for maximum\n likelihood estimation, and some robust and resistant alternatives.\"\n Journal of the Royal Statistical Society, Series B, 46, 149-192.\n\nHardin, J.W. and Hilbe, J.M. 2007. \"Generalized Linear Models and\n Extensions.\" 2nd ed. Stata Press, College Station, TX.\n\nMcCullagh, P. and Nelder, J.A. 1989. \"Generalized Linear Models.\" 2nd ed.\n Chapman & Hall, Boca Rotan.\n\"\"\"\nimport numpy as np\n\nfrom . import families\n\nfrom statsmodels.tools.decorators import (cache_readonly,\n cached_data, cached_value)\nfrom statsmodels.tools.validation import float_like\nfrom statsmodels.compat.pandas import Appender\n\nimport statsmodels.base.model as base\nimport statsmodels.regression.linear_model as lm\nimport statsmodels.base.wrapper as wrap\nimport statsmodels.regression._tools as reg_tools\nimport warnings\n\nfrom statsmodels.graphics._regressionplots_doc import (\n _plot_added_variable_doc,\n _plot_partial_residuals_doc,\n _plot_ceres_residuals_doc)\n\n# need import in module instead of lazily to copy `__doc__`\nfrom . import _prediction as pred\nfrom statsmodels.genmod._prediction import PredictionResults\n\nfrom statsmodels.tools.sm_exceptions import (PerfectSeparationError,\n DomainWarning,\n HessianInversionWarning)\n\nfrom numpy.linalg.linalg import LinAlgError\n\n__all__ = ['GLM', 'PredictionResults']\n\n\ndef _check_convergence(criterion, iteration, atol, rtol):\n return np.allclose(criterion[iteration], criterion[iteration + 1],\n atol=atol, rtol=rtol)\n\n\n# Remove after 0.13 when bic changes to bic llf\nclass _ModuleVariable:\n _value = None\n\n @property\n def use_bic_llf(self):\n return self._value\n\n def set_use_bic_llf(self, val):\n if val not in (True, False, None):\n raise ValueError(\"Must be True, False or None\")\n self._value = bool(val) if val is not None else val\n\n\n_use_bic_helper = _ModuleVariable()\nSET_USE_BIC_LLF = _use_bic_helper.set_use_bic_llf\n\n\nclass GLM(base.LikelihoodModel):\n __doc__ = \"\"\"\n Generalized Linear Models\n\n GLM inherits from statsmodels.base.model.LikelihoodModel\n\n Parameters\n ----------\n endog : array_like\n 1d array of endogenous response variable. This array can be 1d or 2d.\n Binomial family models accept a 2d array with two columns. If\n supplied, each observation is expected to be [success, failure].\n exog : array_like\n A nobs x k array where `nobs` is the number of observations and `k`\n is the number of regressors. An intercept is not included by default\n and should be added by the user (models specified using a formula\n include an intercept by default). See `statsmodels.tools.add_constant`.\n family : family class instance\n The default is Gaussian. To specify the binomial distribution\n family = sm.family.Binomial()\n Each family can take a link instance as an argument. See\n statsmodels.family.family for more information.\n offset : array_like or None\n An offset to be included in the model. If provided, must be\n an array whose length is the number of rows in exog.\n exposure : array_like or None\n Log(exposure) will be added to the linear prediction in the model.\n Exposure is only valid if the log link is used. If provided, it must be\n an array with the same length as endog.\n freq_weights : array_like\n 1d array of frequency weights. The default is None. If None is selected\n or a blank value, then the algorithm will replace with an array of 1's\n with length equal to the endog.\n WARNING: Using weights is not verified yet for all possible options\n and results, see Notes.\n var_weights : array_like\n 1d array of variance (analytic) weights. The default is None. If None\n is selected or a blank value, then the algorithm will replace with an\n array of 1's with length equal to the endog.\n WARNING: Using weights is not verified yet for all possible options\n and results, see Notes.\n %(extra_params)s\n\n Attributes\n ----------\n df_model : float\n Model degrees of freedom is equal to p - 1, where p is the number\n of regressors. Note that the intercept is not reported as a\n degree of freedom.\n df_resid : float\n Residual degrees of freedom is equal to the number of observation n\n minus the number of regressors p.\n endog : ndarray\n See Notes. Note that `endog` is a reference to the data so that if\n data is already an array and it is changed, then `endog` changes\n as well.\n exposure : array_like\n Include ln(exposure) in model with coefficient constrained to 1. Can\n only be used if the link is the logarithm function.\n exog : ndarray\n See Notes. Note that `exog` is a reference to the data so that if\n data is already an array and it is changed, then `exog` changes\n as well.\n freq_weights : ndarray\n See Notes. Note that `freq_weights` is a reference to the data so that\n if data is already an array and it is changed, then `freq_weights`\n changes as well.\n var_weights : ndarray\n See Notes. Note that `var_weights` is a reference to the data so that\n if data is already an array and it is changed, then `var_weights`\n changes as well.\n iteration : int\n The number of iterations that fit has run. Initialized at 0.\n family : family class instance\n The distribution family of the model. Can be any family in\n statsmodels.families. Default is Gaussian.\n mu : ndarray\n The mean response of the transformed variable. `mu` is the value of\n the inverse of the link function at lin_pred, where lin_pred is the\n linear predicted value of the WLS fit of the transformed variable.\n `mu` is only available after fit is called. See\n statsmodels.families.family.fitted of the distribution family for more\n information.\n n_trials : ndarray\n See Notes. Note that `n_trials` is a reference to the data so that if\n data is already an array and it is changed, then `n_trials` changes\n as well. `n_trials` is the number of binomial trials and only available\n with that distribution. See statsmodels.families.Binomial for more\n information.\n normalized_cov_params : ndarray\n The p x p normalized covariance of the design / exogenous data.\n This is approximately equal to (X.T X)^(-1)\n offset : array_like\n Include offset in model with coefficient constrained to 1.\n scale : float\n The estimate of the scale / dispersion of the model fit. Only\n available after fit is called. See GLM.fit and GLM.estimate_scale\n for more information.\n scaletype : str\n The scaling used for fitting the model. This is only available after\n fit is called. The default is None. See GLM.fit for more information.\n weights : ndarray\n The value of the weights after the last iteration of fit. Only\n available after fit is called. See statsmodels.families.family for\n the specific distribution weighting functions.\n\n Examples\n --------\n >>> import statsmodels.api as sm\n >>> data = sm.datasets.scotland.load(as_pandas=False)\n >>> data.exog = sm.add_constant(data.exog)\n\n Instantiate a gamma family model with the default link function.\n\n >>> gamma_model = sm.GLM(data.endog, data.exog,\n ... family=sm.families.Gamma())\n\n >>> gamma_results = gamma_model.fit()\n >>> gamma_results.params\n array([-0.01776527, 0.00004962, 0.00203442, -0.00007181, 0.00011185,\n -0.00000015, -0.00051868, -0.00000243])\n >>> gamma_results.scale\n 0.0035842831734919055\n >>> gamma_results.deviance\n 0.087388516416999198\n >>> gamma_results.pearson_chi2\n 0.086022796163805704\n >>> gamma_results.llf\n -83.017202161073527\n\n See Also\n --------\n statsmodels.genmod.families.family.Family\n :ref:`families`\n :ref:`links`\n\n Notes\n -----\n Only the following combinations make sense for family and link:\n\n ============= ===== === ===== ====== ======= === ==== ====== ====== ====\n Family ident log logit probit cloglog pow opow nbinom loglog logc\n ============= ===== === ===== ====== ======= === ==== ====== ====== ====\n Gaussian x x x x x x x x x\n inv Gaussian x x x\n binomial x x x x x x x x x\n Poisson x x x\n neg binomial x x x x\n gamma x x x\n Tweedie x x x\n ============= ===== === ===== ====== ======= === ==== ====== ====== ====\n\n Not all of these link functions are currently available.\n\n Endog and exog are references so that if the data they refer to are already\n arrays and these arrays are changed, endog and exog will change.\n\n statsmodels supports two separate definitions of weights: frequency weights\n and variance weights.\n\n Frequency weights produce the same results as repeating observations by the\n frequencies (if those are integers). Frequency weights will keep the number\n of observations consistent, but the degrees of freedom will change to\n reflect the new weights.\n\n Variance weights (referred to in other packages as analytic weights) are\n used when ``endog`` represents an an average or mean. This relies on the\n assumption that that the inverse variance scales proportionally to the\n weight--an observation that is deemed more credible should have less\n variance and therefore have more weight. For the ``Poisson`` family--which\n assumes that occurrences scale proportionally with time--a natural practice\n would be to use the amount of time as the variance weight and set ``endog``\n to be a rate (occurrences per period of time). Similarly, using a\n compound Poisson family, namely ``Tweedie``, makes a similar assumption\n about the rate (or frequency) of occurrences having variance proportional to\n time.\n\n Both frequency and variance weights are verified for all basic results with\n nonrobust or heteroscedasticity robust ``cov_type``. Other robust\n covariance types have not yet been verified, and at least the small sample\n correction is currently not based on the correct total frequency count.\n\n Currently, all residuals are not weighted by frequency, although they may\n incorporate ``n_trials`` for ``Binomial`` and ``var_weights``\n\n +---------------+----------------------------------+\n | Residual Type | Applicable weights |\n +===============+==================================+\n | Anscombe | ``var_weights`` |\n +---------------+----------------------------------+\n | Deviance | ``var_weights`` |\n +---------------+----------------------------------+\n | Pearson | ``var_weights`` and ``n_trials`` |\n +---------------+----------------------------------+\n | Reponse | ``n_trials`` |\n +---------------+----------------------------------+\n | Working | ``n_trials`` |\n +---------------+----------------------------------+\n\n WARNING: Loglikelihood and deviance are not valid in models where\n scale is equal to 1 (i.e., ``Binomial``, ``NegativeBinomial``, and\n ``Poisson``). If variance weights are specified, then results such as\n ``loglike`` and ``deviance`` are based on a quasi-likelihood\n interpretation. The loglikelihood is not correctly specified in this case,\n and statistics based on it, such AIC or likelihood ratio tests, are not\n appropriate.\n \"\"\" % {'extra_params': base._missing_param_doc}\n # Maximum number of endogenous variables when using a formula\n _formula_max_endog = 2\n\n def __init__(self, endog, exog, family=None, offset=None,\n exposure=None, freq_weights=None, var_weights=None,\n missing='none', **kwargs):\n\n if (family is not None) and not isinstance(family.link,\n tuple(family.safe_links)):\n\n warnings.warn((f\"The {type(family.link).__name__} link function \"\n \"does not respect the domain of the \"\n f\"{type(family).__name__} family.\"),\n DomainWarning)\n\n if exposure is not None:\n exposure = np.log(exposure)\n if offset is not None: # this should probably be done upstream\n offset = np.asarray(offset)\n\n if freq_weights is not None:\n freq_weights = np.asarray(freq_weights)\n if var_weights is not None:\n var_weights = np.asarray(var_weights)\n\n self.freq_weights = freq_weights\n self.var_weights = var_weights\n\n super(GLM, self).__init__(endog, exog, missing=missing,\n offset=offset, exposure=exposure,\n freq_weights=freq_weights,\n var_weights=var_weights, **kwargs)\n self._check_inputs(family, self.offset, self.exposure, self.endog,\n self.freq_weights, self.var_weights)\n if offset is None:\n delattr(self, 'offset')\n if exposure is None:\n delattr(self, 'exposure')\n\n self.nobs = self.endog.shape[0]\n\n # things to remove_data\n self._data_attr.extend(['weights', 'mu', 'freq_weights',\n 'var_weights', 'iweights', '_offset_exposure',\n 'n_trials'])\n # register kwds for __init__, offset and exposure are added by super\n self._init_keys.append('family')\n\n self._setup_binomial()\n # internal usage for recreating a model\n if 'n_trials' in kwargs:\n self.n_trials = kwargs['n_trials']\n\n # Construct a combined offset/exposure term. Note that\n # exposure has already been logged if present.\n offset_exposure = 0.\n if hasattr(self, 'offset'):\n offset_exposure = self.offset\n if hasattr(self, 'exposure'):\n offset_exposure = offset_exposure + self.exposure\n self._offset_exposure = offset_exposure\n\n self.scaletype = None\n\n def initialize(self):\n \"\"\"\n Initialize a generalized linear model.\n \"\"\"\n self.df_model = np.linalg.matrix_rank(self.exog) - 1\n\n if (self.freq_weights is not None) and \\\n (self.freq_weights.shape[0] == self.endog.shape[0]):\n self.wnobs = self.freq_weights.sum()\n self.df_resid = self.wnobs - self.df_model - 1\n else:\n self.wnobs = self.exog.shape[0]\n self.df_resid = self.exog.shape[0] - self.df_model - 1\n\n def _check_inputs(self, family, offset, exposure, endog, freq_weights,\n var_weights):\n\n # Default family is Gaussian\n if family is None:\n family = families.Gaussian()\n self.family = family\n\n if exposure is not None:\n if not isinstance(self.family.link, families.links.Log):\n raise ValueError(\"exposure can only be used with the log \"\n \"link function\")\n elif exposure.shape[0] != endog.shape[0]:\n raise ValueError(\"exposure is not the same length as endog\")\n\n if offset is not None:\n if offset.shape[0] != endog.shape[0]:\n raise ValueError(\"offset is not the same length as endog\")\n\n if freq_weights is not None:\n if freq_weights.shape[0] != endog.shape[0]:\n raise ValueError(\"freq weights not the same length as endog\")\n if len(freq_weights.shape) > 1:\n raise ValueError(\"freq weights has too many dimensions\")\n\n # internal flag to store whether freq_weights were not None\n self._has_freq_weights = (self.freq_weights is not None)\n if self.freq_weights is None:\n self.freq_weights = np.ones((endog.shape[0]))\n # TODO: check do we want to keep None as sentinel for freq_weights\n\n if np.shape(self.freq_weights) == () and self.freq_weights > 1:\n self.freq_weights = (self.freq_weights *\n np.ones((endog.shape[0])))\n\n if var_weights is not None:\n if var_weights.shape[0] != endog.shape[0]:\n raise ValueError(\"var weights not the same length as endog\")\n if len(var_weights.shape) > 1:\n raise ValueError(\"var weights has too many dimensions\")\n\n # internal flag to store whether var_weights were not None\n self._has_var_weights = (var_weights is not None)\n if var_weights is None:\n self.var_weights = np.ones((endog.shape[0]))\n # TODO: check do we want to keep None as sentinel for var_weights\n self.iweights = np.asarray(self.freq_weights * self.var_weights)\n\n def _get_init_kwds(self):\n # this is a temporary fixup because exposure has been transformed\n # see #1609, copied from discrete_model.CountModel\n kwds = super(GLM, self)._get_init_kwds()\n if 'exposure' in kwds and kwds['exposure'] is not None:\n kwds['exposure'] = np.exp(kwds['exposure'])\n return kwds\n\n def loglike_mu(self, mu, scale=1.):\n \"\"\"\n Evaluate the log-likelihood for a generalized linear model.\n \"\"\"\n scale = float_like(scale, \"scale\")\n return self.family.loglike(self.endog, mu, self.var_weights,\n self.freq_weights, scale)\n\n def loglike(self, params, scale=None):\n \"\"\"\n Evaluate the log-likelihood for a generalized linear model.\n \"\"\"\n scale = float_like(scale, \"scale\", optional=True)\n lin_pred = np.dot(self.exog, params) + self._offset_exposure\n expval = self.family.link.inverse(lin_pred)\n if scale is None:\n scale = self.estimate_scale(expval)\n llf = self.family.loglike(self.endog, expval, self.var_weights,\n self.freq_weights, scale)\n return llf\n\n def score_obs(self, params, scale=None):\n \"\"\"score first derivative of the loglikelihood for each observation.\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n scale : None or float\n If scale is None, then the default scale will be calculated.\n Default scale is defined by `self.scaletype` and set in fit.\n If scale is not None, then it is used as a fixed scale.\n\n Returns\n -------\n score_obs : ndarray, 2d\n The first derivative of the loglikelihood function evaluated at\n params for each observation.\n \"\"\"\n scale = float_like(scale, \"scale\", optional=True)\n score_factor = self.score_factor(params, scale=scale)\n return score_factor[:, None] * self.exog\n\n def score(self, params, scale=None):\n \"\"\"score, first derivative of the loglikelihood function\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n scale : None or float\n If scale is None, then the default scale will be calculated.\n Default scale is defined by `self.scaletype` and set in fit.\n If scale is not None, then it is used as a fixed scale.\n\n Returns\n -------\n score : ndarray_1d\n The first derivative of the loglikelihood function calculated as\n the sum of `score_obs`\n \"\"\"\n scale = float_like(scale, \"scale\", optional=True)\n score_factor = self.score_factor(params, scale=scale)\n return np.dot(score_factor, self.exog)\n\n def score_factor(self, params, scale=None):\n \"\"\"weights for score for each observation\n\n This can be considered as score residuals.\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n scale : None or float\n If scale is None, then the default scale will be calculated.\n Default scale is defined by `self.scaletype` and set in fit.\n If scale is not None, then it is used as a fixed scale.\n\n Returns\n -------\n score_factor : ndarray_1d\n A 1d weight vector used in the calculation of the score_obs.\n The score_obs are obtained by `score_factor[:, None] * exog`\n \"\"\"\n scale = float_like(scale, \"scale\", optional=True)\n mu = self.predict(params)\n if scale is None:\n scale = self.estimate_scale(mu)\n\n score_factor = (self.endog - mu) / self.family.link.deriv(mu)\n score_factor /= self.family.variance(mu)\n score_factor *= self.iweights * self.n_trials\n\n if not scale == 1:\n score_factor /= scale\n\n return score_factor\n\n def hessian_factor(self, params, scale=None, observed=True):\n \"\"\"Weights for calculating Hessian\n\n Parameters\n ----------\n params : ndarray\n parameter at which Hessian is evaluated\n scale : None or float\n If scale is None, then the default scale will be calculated.\n Default scale is defined by `self.scaletype` and set in fit.\n If scale is not None, then it is used as a fixed scale.\n observed : bool\n If True, then the observed Hessian is returned. If false then the\n expected information matrix is returned.\n\n Returns\n -------\n hessian_factor : ndarray, 1d\n A 1d weight vector used in the calculation of the Hessian.\n The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`\n \"\"\"\n\n # calculating eim_factor\n mu = self.predict(params)\n if scale is None:\n scale = self.estimate_scale(mu)\n\n eim_factor = 1 / (self.family.link.deriv(mu)**2 *\n self.family.variance(mu))\n eim_factor *= self.iweights * self.n_trials\n\n if not observed:\n if not scale == 1:\n eim_factor /= scale\n return eim_factor\n\n # calculating oim_factor, eim_factor is with scale=1\n\n score_factor = self.score_factor(params, scale=1.)\n if eim_factor.ndim > 1 or score_factor.ndim > 1:\n raise RuntimeError('something wrong')\n\n tmp = self.family.variance(mu) * self.family.link.deriv2(mu)\n tmp += self.family.variance.deriv(mu) * self.family.link.deriv(mu)\n\n tmp = score_factor * tmp\n # correct for duplicatee iweights in oim_factor and score_factor\n tmp /= self.iweights * self.n_trials\n oim_factor = eim_factor * (1 + tmp)\n\n if tmp.ndim > 1:\n raise RuntimeError('something wrong')\n\n if not scale == 1:\n oim_factor /= scale\n\n return oim_factor\n\n def hessian(self, params, scale=None, observed=None):\n \"\"\"Hessian, second derivative of loglikelihood function\n\n Parameters\n ----------\n params : ndarray\n parameter at which Hessian is evaluated\n scale : None or float\n If scale is None, then the default scale will be calculated.\n Default scale is defined by `self.scaletype` and set in fit.\n If scale is not None, then it is used as a fixed scale.\n observed : bool\n If True, then the observed Hessian is returned (default).\n If false then the expected information matrix is returned.\n\n Returns\n -------\n hessian : ndarray\n Hessian, i.e. observed information, or expected information matrix.\n \"\"\"\n if observed is None:\n if getattr(self, '_optim_hessian', None) == 'eim':\n observed = False\n else:\n observed = True\n scale = float_like(scale, \"scale\", optional=True)\n tmp = getattr(self, '_tmp_like_exog', np.empty_like(self.exog, dtype=float))\n\n factor = self.hessian_factor(params, scale=scale, observed=observed)\n np.multiply(self.exog.T, factor, out=tmp.T)\n return -tmp.T.dot(self.exog)\n\n def information(self, params, scale=None):\n \"\"\"\n Fisher information matrix.\n \"\"\"\n scale = float_like(scale, \"scale\", optional=True)\n return self.hessian(params, scale=scale, observed=False)\n\n def _deriv_mean_dparams(self, params):\n \"\"\"\n Derivative of the expected endog with respect to the parameters.\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n\n Returns\n -------\n The value of the derivative of the expected endog with respect\n to the parameter vector.\n \"\"\"\n lin_pred = self.predict(params, linear=True)\n idl = self.family.link.inverse_deriv(lin_pred)\n dmat = self.exog * idl[:, None]\n return dmat\n\n def _deriv_score_obs_dendog(self, params, scale=None):\n \"\"\"derivative of score_obs w.r.t. endog\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n scale : None or float\n If scale is None, then the default scale will be calculated.\n Default scale is defined by `self.scaletype` and set in fit.\n If scale is not None, then it is used as a fixed scale.\n\n Returns\n -------\n derivative : ndarray_2d\n The derivative of the score_obs with respect to endog. This\n can is given by `score_factor0[:, None] * exog` where\n `score_factor0` is the score_factor without the residual.\n \"\"\"\n scale = float_like(scale, \"scale\", optional=True)\n mu = self.predict(params)\n if scale is None:\n scale = self.estimate_scale(mu)\n\n score_factor = 1 / self.family.link.deriv(mu)\n score_factor /= self.family.variance(mu)\n score_factor *= self.iweights * self.n_trials\n\n if not scale == 1:\n score_factor /= scale\n\n return score_factor[:, None] * self.exog\n\n def score_test(self, params_constrained, k_constraints=None,\n exog_extra=None, observed=True):\n \"\"\"score test for restrictions or for omitted variables\n\n The covariance matrix for the score is based on the Hessian, i.e.\n observed information matrix or optionally on the expected information\n matrix..\n\n Parameters\n ----------\n params_constrained : array_like\n estimated parameter of the restricted model. This can be the\n parameter estimate for the current when testing for omitted\n variables.\n k_constraints : int or None\n Number of constraints that were used in the estimation of params\n restricted relative to the number of exog in the model.\n This must be provided if no exog_extra are given. If exog_extra is\n not None, then k_constraints is assumed to be zero if it is None.\n exog_extra : None or array_like\n Explanatory variables that are jointly tested for inclusion in the\n model, i.e. omitted variables.\n observed : bool\n If True, then the observed Hessian is used in calculating the\n covariance matrix of the score. If false then the expected\n information matrix is used.\n\n Returns\n -------\n chi2_stat : float\n chisquare statistic for the score test\n p-value : float\n P-value of the score test based on the chisquare distribution.\n df : int\n Degrees of freedom used in the p-value calculation. This is equal\n to the number of constraints.\n\n Notes\n -----\n not yet verified for case with scale not equal to 1.\n \"\"\"\n\n if exog_extra is None:\n if k_constraints is None:\n raise ValueError('if exog_extra is None, then k_constraints'\n 'needs to be given')\n\n score = self.score(params_constrained)\n hessian = self.hessian(params_constrained, observed=observed)\n\n else:\n # exog_extra = np.asarray(exog_extra)\n if k_constraints is None:\n k_constraints = 0\n\n ex = np.column_stack((self.exog, exog_extra))\n k_constraints += ex.shape[1] - self.exog.shape[1]\n\n score_factor = self.score_factor(params_constrained)\n score = (score_factor[:, None] * ex).sum(0)\n hessian_factor = self.hessian_factor(params_constrained,\n observed=observed)\n hessian = -np.dot(ex.T * hessian_factor, ex)\n\n from scipy import stats\n # TODO check sign, why minus?\n chi2stat = -score.dot(np.linalg.solve(hessian, score[:, None]))\n pval = stats.chi2.sf(chi2stat, k_constraints)\n # return a stats results instance instead? Contrast?\n return chi2stat, pval, k_constraints\n\n def _update_history(self, tmp_result, mu, history):\n \"\"\"\n Helper method to update history during iterative fit.\n \"\"\"\n history['params'].append(tmp_result.params)\n history['deviance'].append(self.family.deviance(self.endog, mu,\n self.var_weights,\n self.freq_weights,\n self.scale))\n return history\n\n def estimate_scale(self, mu):\n \"\"\"\n Estimate the dispersion/scale.\n\n Type of scale can be chose in the fit method.\n\n Parameters\n ----------\n mu : ndarray\n mu is the mean response estimate\n\n Returns\n -------\n Estimate of scale\n\n Notes\n -----\n The default scale for Binomial, Poisson and Negative Binomial\n families is 1. The default for the other families is Pearson's\n Chi-Square estimate.\n\n See Also\n --------\n statsmodels.genmod.generalized_linear_model.GLM.fit\n \"\"\"\n if not self.scaletype:\n if isinstance(self.family, (families.Binomial, families.Poisson,\n families.NegativeBinomial)):\n return 1.\n else:\n return self._estimate_x2_scale(mu)\n\n if isinstance(self.scaletype, float):\n return np.array(self.scaletype)\n\n if isinstance(self.scaletype, str):\n if self.scaletype.lower() == 'x2':\n return self._estimate_x2_scale(mu)\n elif self.scaletype.lower() == 'dev':\n return (self.family.deviance(self.endog, mu, self.var_weights,\n self.freq_weights, 1.) /\n (self.df_resid))\n else:\n raise ValueError(\"Scale %s with type %s not understood\" %\n (self.scaletype, type(self.scaletype)))\n else:\n raise ValueError(\"Scale %s with type %s not understood\" %\n (self.scaletype, type(self.scaletype)))\n\n def _estimate_x2_scale(self, mu):\n resid = np.power(self.endog - mu, 2) * self.iweights\n return np.sum(resid / self.family.variance(mu)) / self.df_resid\n\n def estimate_tweedie_power(self, mu, method='brentq', low=1.01, high=5.):\n \"\"\"\n Tweedie specific function to estimate scale and the variance parameter.\n The variance parameter is also referred to as p, xi, or shape.\n\n Parameters\n ----------\n mu : array_like\n Fitted mean response variable\n method : str, defaults to 'brentq'\n Scipy optimizer used to solve the Pearson equation. Only brentq\n currently supported.\n low : float, optional\n Low end of the bracketing interval [a,b] to be used in the search\n for the power. Defaults to 1.01.\n high : float, optional\n High end of the bracketing interval [a,b] to be used in the search\n for the power. Defaults to 5.\n\n Returns\n -------\n power : float\n The estimated shape or power.\n \"\"\"\n if method == 'brentq':\n from scipy.optimize import brentq\n\n def psi_p(power, mu):\n scale = ((self.iweights * (self.endog - mu) ** 2 /\n (mu ** power)).sum() / self.df_resid)\n return (np.sum(self.iweights * ((self.endog - mu) ** 2 /\n (scale * (mu ** power)) - 1) *\n np.log(mu)) / self.freq_weights.sum())\n power = brentq(psi_p, low, high, args=(mu))\n else:\n raise NotImplementedError('Only brentq can currently be used')\n return power\n\n def predict(self, params, exog=None, exposure=None, offset=None,\n linear=False):\n \"\"\"\n Return predicted values for a design matrix\n\n Parameters\n ----------\n params : array_like\n Parameters / coefficients of a GLM.\n exog : array_like, optional\n Design / exogenous data. Is exog is None, model exog is used.\n exposure : array_like, optional\n Exposure time values, only can be used with the log link\n function. See notes for details.\n offset : array_like, optional\n Offset values. See notes for details.\n linear : bool\n If True, returns the linear predicted values. If False,\n returns the value of the inverse of the model's link function at\n the linear predicted values.\n\n Returns\n -------\n An array of fitted values\n\n Notes\n -----\n Any `exposure` and `offset` provided here take precedence over\n the `exposure` and `offset` used in the model fit. If `exog`\n is passed as an argument here, then any `exposure` and\n `offset` values in the fit will be ignored.\n\n Exposure values must be strictly positive.\n \"\"\"\n\n # Use fit offset if appropriate\n if offset is None and exog is None and hasattr(self, 'offset'):\n offset = self.offset\n elif offset is None:\n offset = 0.\n\n if exposure is not None and not isinstance(self.family.link,\n families.links.Log):\n raise ValueError(\"exposure can only be used with the log link \"\n \"function\")\n\n # Use fit exposure if appropriate\n if exposure is None and exog is None and hasattr(self, 'exposure'):\n # Already logged\n exposure = self.exposure\n elif exposure is None:\n exposure = 0.\n else:\n exposure = np.log(np.asarray(exposure))\n\n if exog is None:\n exog = self.exog\n\n linpred = np.dot(exog, params) + offset + exposure\n if linear:\n return linpred\n else:\n return self.family.fitted(linpred)\n\n def get_distribution(self, params, scale=1., exog=None, exposure=None,\n offset=None):\n \"\"\"\n Return a random number generator for the predictive distribution.\n\n Parameters\n ----------\n params : array_like\n The model parameters.\n scale : scalar\n The scale parameter.\n exog : array_like\n The predictor variable matrix.\n\n Returns\n -------\n gen\n Frozen random number generator object. Use the ``rvs`` method to\n generate random values.\n\n Notes\n -----\n Due to the behavior of ``scipy.stats.distributions objects``, the\n returned random number generator must be called with ``gen.rvs(n)``\n where ``n`` is the number of observations in the data set used\n to fit the model. If any other value is used for ``n``, misleading\n results will be produced.\n \"\"\"\n scale = float_like(scale, \"scale\", optional=True)\n fit = self.predict(params, exog, exposure, offset, linear=False)\n\n import scipy.stats.distributions as dist\n\n if isinstance(self.family, families.Gaussian):\n return dist.norm(loc=fit, scale=np.sqrt(scale))\n\n elif isinstance(self.family, families.Binomial):\n return dist.binom(n=1, p=fit)\n\n elif isinstance(self.family, families.Poisson):\n return dist.poisson(mu=fit)\n\n elif isinstance(self.family, families.Gamma):\n alpha = fit / float(scale)\n return dist.gamma(alpha, scale=scale)\n\n else:\n raise ValueError(\"get_distribution not implemented for %s\" %\n self.family.name)\n\n def _setup_binomial(self):\n # this checks what kind of data is given for Binomial.\n # family will need a reference to endog if this is to be removed from\n # preprocessing\n self.n_trials = np.ones((self.endog.shape[0])) # For binomial\n if isinstance(self.family, families.Binomial):\n tmp = self.family.initialize(self.endog, self.freq_weights)\n self.endog = tmp[0]\n self.n_trials = tmp[1]\n self._init_keys.append('n_trials')\n\n def fit(self, start_params=None, maxiter=100, method='IRLS', tol=1e-8,\n scale=None, cov_type='nonrobust', cov_kwds=None, use_t=None,\n full_output=True, disp=False, max_start_irls=3, **kwargs):\n \"\"\"\n Fits a generalized linear model for a given family.\n\n Parameters\n ----------\n start_params : array_like, optional\n Initial guess of the solution for the loglikelihood maximization.\n The default is family-specific and is given by the\n ``family.starting_mu(endog)``. If start_params is given then the\n initial mean will be calculated as ``np.dot(exog, start_params)``.\n maxiter : int, optional\n Default is 100.\n method : str\n Default is 'IRLS' for iteratively reweighted least squares.\n Otherwise gradient optimization is used.\n tol : float\n Convergence tolerance. Default is 1e-8.\n scale : str or float, optional\n `scale` can be 'X2', 'dev', or a float\n The default value is None, which uses `X2` for Gamma, Gaussian,\n and Inverse Gaussian.\n `X2` is Pearson's chi-square divided by `df_resid`.\n The default is 1 for the Binomial and Poisson families.\n `dev` is the deviance divided by df_resid\n cov_type : str\n The type of parameter estimate covariance matrix to compute.\n cov_kwds : dict-like\n Extra arguments for calculating the covariance of the parameter\n estimates.\n use_t : bool\n If True, the Student t-distribution is used for inference.\n full_output : bool, optional\n Set to True to have all available output in the Results object's\n mle_retvals attribute. The output is dependent on the solver.\n See LikelihoodModelResults notes section for more information.\n Not used if methhod is IRLS.\n disp : bool, optional\n Set to True to print convergence messages. Not used if method is\n IRLS.\n max_start_irls : int\n The number of IRLS iterations used to obtain starting\n values for gradient optimization. Only relevant if\n `method` is set to something other than 'IRLS'.\n atol : float, optional\n (available with IRLS fits) The absolute tolerance criterion that\n must be satisfied. Defaults to ``tol``. Convergence is attained\n when: :math:`rtol * prior + atol > abs(current - prior)`\n rtol : float, optional\n (available with IRLS fits) The relative tolerance criterion that\n must be satisfied. Defaults to 0 which means ``rtol`` is not used.\n Convergence is attained when:\n :math:`rtol * prior + atol > abs(current - prior)`\n tol_criterion : str, optional\n (available with IRLS fits) Defaults to ``'deviance'``. Can\n optionally be ``'params'``.\n wls_method : str, optional\n (available with IRLS fits) options are 'lstsq', 'pinv' and 'qr'\n specifies which linear algebra function to use for the irls\n optimization. Default is `lstsq` which uses the same underlying\n svd based approach as 'pinv', but is faster during iterations.\n 'lstsq' and 'pinv' regularize the estimate in singular and\n near-singular cases by truncating small singular values based\n on `rcond` of the respective numpy.linalg function. 'qr' is\n only valid for cases that are not singular nor near-singular.\n optim_hessian : {'eim', 'oim'}, optional\n (available with scipy optimizer fits) When 'oim'--the default--the\n observed Hessian is used in fitting. 'eim' is the expected Hessian.\n This may provide more stable fits, but adds assumption that the\n Hessian is correctly specified.\n\n Notes\n -----\n If method is 'IRLS', then an additional keyword 'attach_wls' is\n available. This is currently for internal use only and might change\n in future versions. If attach_wls' is true, then the final WLS\n instance of the IRLS iteration is attached to the results instance\n as `results_wls` attribute.\n \"\"\"\n if isinstance(scale, str):\n scale = scale.lower()\n if scale not in (\"x2\", \"dev\"):\n raise ValueError(\n \"scale must be either X2 or dev when a string.\"\n )\n elif scale is not None:\n # GH-6627\n try:\n scale = float(scale)\n except Exception as exc:\n raise type(exc)(\n \"scale must be a float if given and no a string.\"\n )\n self.scaletype = scale\n\n if method.lower() == \"irls\":\n if cov_type.lower() == 'eim':\n cov_type = 'nonrobust'\n return self._fit_irls(start_params=start_params, maxiter=maxiter,\n tol=tol, scale=scale, cov_type=cov_type,\n cov_kwds=cov_kwds, use_t=use_t, **kwargs)\n else:\n self._optim_hessian = kwargs.get('optim_hessian')\n self._tmp_like_exog = np.empty_like(self.exog, dtype=float)\n fit_ = self._fit_gradient(start_params=start_params,\n method=method,\n maxiter=maxiter,\n tol=tol, scale=scale,\n full_output=full_output,\n disp=disp, cov_type=cov_type,\n cov_kwds=cov_kwds, use_t=use_t,\n max_start_irls=max_start_irls,\n **kwargs)\n del self._optim_hessian\n del self._tmp_like_exog\n return fit_\n\n def _fit_gradient(self, start_params=None, method=\"newton\",\n maxiter=100, tol=1e-8, full_output=True,\n disp=True, scale=None, cov_type='nonrobust',\n cov_kwds=None, use_t=None, max_start_irls=3,\n **kwargs):\n \"\"\"\n Fits a generalized linear model for a given family iteratively\n using the scipy gradient optimizers.\n \"\"\"\n\n # fix scale during optimization, see #4616\n scaletype = self.scaletype\n self.scaletype = 1.\n\n if (max_start_irls > 0) and (start_params is None):\n irls_rslt = self._fit_irls(start_params=start_params,\n maxiter=max_start_irls,\n tol=tol, scale=1., cov_type='nonrobust',\n cov_kwds=None, use_t=None,\n **kwargs)\n start_params = irls_rslt.params\n del irls_rslt\n\n rslt = super(GLM, self).fit(start_params=start_params, tol=tol,\n maxiter=maxiter, full_output=full_output,\n method=method, disp=disp, **kwargs)\n\n # reset scaletype to original\n self.scaletype = scaletype\n\n mu = self.predict(rslt.params)\n scale = self.estimate_scale(mu)\n\n if rslt.normalized_cov_params is None:\n cov_p = None\n else:\n cov_p = rslt.normalized_cov_params / scale\n\n if cov_type.lower() == 'eim':\n oim = False\n cov_type = 'nonrobust'\n else:\n oim = True\n\n try:\n cov_p = np.linalg.inv(-self.hessian(rslt.params, observed=oim)) / scale\n except LinAlgError:\n warnings.warn('Inverting hessian failed, no bse or cov_params '\n 'available', HessianInversionWarning)\n cov_p = None\n\n results_class = getattr(self, '_results_class', GLMResults)\n results_class_wrapper = getattr(self, '_results_class_wrapper', GLMResultsWrapper)\n glm_results = results_class(self, rslt.params,\n cov_p,\n scale,\n cov_type=cov_type, cov_kwds=cov_kwds,\n use_t=use_t)\n\n # TODO: iteration count is not always available\n history = {'iteration': 0}\n if full_output:\n glm_results.mle_retvals = rslt.mle_retvals\n if 'iterations' in rslt.mle_retvals:\n history['iteration'] = rslt.mle_retvals['iterations']\n glm_results.method = method\n glm_results.fit_history = history\n\n return results_class_wrapper(glm_results)\n\n def _fit_irls(self, start_params=None, maxiter=100, tol=1e-8,\n scale=None, cov_type='nonrobust', cov_kwds=None,\n use_t=None, **kwargs):\n \"\"\"\n Fits a generalized linear model for a given family using\n iteratively reweighted least squares (IRLS).\n \"\"\"\n attach_wls = kwargs.pop('attach_wls', False)\n atol = kwargs.get('atol')\n rtol = kwargs.get('rtol', 0.)\n tol_criterion = kwargs.get('tol_criterion', 'deviance')\n wls_method = kwargs.get('wls_method', 'lstsq')\n atol = tol if atol is None else atol\n\n endog = self.endog\n wlsexog = self.exog\n if start_params is None:\n start_params = np.zeros(self.exog.shape[1])\n mu = self.family.starting_mu(self.endog)\n lin_pred = self.family.predict(mu)\n else:\n lin_pred = np.dot(wlsexog, start_params) + self._offset_exposure\n mu = self.family.fitted(lin_pred)\n self.scale = self.estimate_scale(mu)\n dev = self.family.deviance(self.endog, mu, self.var_weights,\n self.freq_weights, self.scale)\n if np.isnan(dev):\n raise ValueError(\"The first guess on the deviance function \"\n \"returned a nan. This could be a boundary \"\n \" problem and should be reported.\")\n\n # first guess on the deviance is assumed to be scaled by 1.\n # params are none to start, so they line up with the deviance\n history = dict(params=[np.inf, start_params], deviance=[np.inf, dev])\n converged = False\n criterion = history[tol_criterion]\n # This special case is used to get the likelihood for a specific\n # params vector.\n if maxiter == 0:\n mu = self.family.fitted(lin_pred)\n self.scale = self.estimate_scale(mu)\n wls_results = lm.RegressionResults(self, start_params, None)\n iteration = 0\n for iteration in range(maxiter):\n self.weights = (self.iweights * self.n_trials *\n self.family.weights(mu))\n wlsendog = (lin_pred + self.family.link.deriv(mu) * (self.endog-mu)\n - self._offset_exposure)\n wls_mod = reg_tools._MinimalWLS(wlsendog, wlsexog,\n self.weights, check_endog=True,\n check_weights=True)\n wls_results = wls_mod.fit(method=wls_method)\n lin_pred = np.dot(self.exog, wls_results.params)\n lin_pred += self._offset_exposure\n mu = self.family.fitted(lin_pred)\n history = self._update_history(wls_results, mu, history)\n self.scale = self.estimate_scale(mu)\n if endog.squeeze().ndim == 1 and np.allclose(mu - endog, 0):\n msg = \"Perfect separation detected, results not available\"\n raise PerfectSeparationError(msg)\n converged = _check_convergence(criterion, iteration + 1, atol,\n rtol)\n if converged:\n break\n self.mu = mu\n\n if maxiter > 0: # Only if iterative used\n wls_method2 = 'pinv' if wls_method == 'lstsq' else wls_method\n wls_model = lm.WLS(wlsendog, wlsexog, self.weights)\n wls_results = wls_model.fit(method=wls_method2)\n\n glm_results = GLMResults(self, wls_results.params,\n wls_results.normalized_cov_params,\n self.scale,\n cov_type=cov_type, cov_kwds=cov_kwds,\n use_t=use_t)\n\n glm_results.method = \"IRLS\"\n glm_results.mle_settings = {}\n glm_results.mle_settings['wls_method'] = wls_method\n glm_results.mle_settings['optimizer'] = glm_results.method\n if (maxiter > 0) and (attach_wls is True):\n glm_results.results_wls = wls_results\n history['iteration'] = iteration + 1\n glm_results.fit_history = history\n glm_results.converged = converged\n return GLMResultsWrapper(glm_results)\n\n def fit_regularized(self, method=\"elastic_net\", alpha=0.,\n start_params=None, refit=False,\n opt_method=\"bfgs\", **kwargs):\n r\"\"\"\n Return a regularized fit to a linear regression model.\n\n Parameters\n ----------\n method : {'elastic_net'}\n Only the `elastic_net` approach is currently implemented.\n alpha : scalar or array_like\n The penalty weight. If a scalar, the same penalty weight\n applies to all variables in the model. If a vector, it\n must have the same length as `params`, and contains a\n penalty weight for each coefficient.\n start_params : array_like\n Starting values for `params`.\n refit : bool\n If True, the model is refit using only the variables that\n have non-zero coefficients in the regularized fit. The\n refitted model is not regularized.\n opt_method : string\n The method used for numerical optimization.\n **kwargs\n Additional keyword arguments used when fitting the model.\n\n Returns\n -------\n GLMResults\n An array or a GLMResults object, same type returned by `fit`.\n\n Notes\n -----\n The penalty is the ``elastic net`` penalty, which is a\n combination of L1 and L2 penalties.\n\n The function that is minimized is:\n\n .. math::\n\n -loglike/n + alpha*((1-L1\\_wt)*|params|_2^2/2 + L1\\_wt*|params|_1)\n\n where :math:`|*|_1` and :math:`|*|_2` are the L1 and L2 norms.\n\n Post-estimation results are based on the same data used to\n select variables, hence may be subject to overfitting biases.\n\n The elastic_net method uses the following keyword arguments:\n\n maxiter : int\n Maximum number of iterations\n L1_wt : float\n Must be in [0, 1]. The L1 penalty has weight L1_wt and the\n L2 penalty has weight 1 - L1_wt.\n cnvrg_tol : float\n Convergence threshold for maximum parameter change after\n one sweep through all coefficients.\n zero_tol : float\n Coefficients below this threshold are treated as zero.\n \"\"\"\n\n if kwargs.get(\"L1_wt\", 1) == 0:\n return self._fit_ridge(alpha, start_params, opt_method)\n\n from statsmodels.base.elastic_net import fit_elasticnet\n\n if method != \"elastic_net\":\n raise ValueError(\"method for fit_regularied must be elastic_net\")\n\n defaults = {\"maxiter\": 50, \"L1_wt\": 1, \"cnvrg_tol\": 1e-10,\n \"zero_tol\": 1e-10}\n defaults.update(kwargs)\n\n result = fit_elasticnet(self, method=method,\n alpha=alpha,\n start_params=start_params,\n refit=refit,\n **defaults)\n\n self.mu = self.predict(result.params)\n self.scale = self.estimate_scale(self.mu)\n\n if not result.converged:\n warnings.warn(\"Elastic net fitting did not converge\")\n\n return result\n\n def _fit_ridge(self, alpha, start_params, method):\n\n if start_params is None:\n start_params = np.zeros(self.exog.shape[1])\n\n def fun(x):\n return -(self.loglike(x) / self.nobs - np.sum(alpha * x**2) / 2)\n\n def grad(x):\n return -(self.score(x) / self.nobs - alpha * x)\n\n from scipy.optimize import minimize\n from statsmodels.base.elastic_net import (RegularizedResults,\n RegularizedResultsWrapper)\n\n mr = minimize(fun, start_params, jac=grad, method=method)\n params = mr.x\n\n if not mr.success:\n import warnings\n ngrad = np.sqrt(np.sum(mr.jac**2))\n msg = \"GLM ridge optimization may have failed, |grad|=%f\" % ngrad\n warnings.warn(msg)\n\n results = RegularizedResults(self, params)\n results = RegularizedResultsWrapper(results)\n\n return results\n\n def fit_constrained(self, constraints, start_params=None, **fit_kwds):\n \"\"\"fit the model subject to linear equality constraints\n\n The constraints are of the form `R params = q`\n where R is the constraint_matrix and q is the vector of\n constraint_values.\n\n The estimation creates a new model with transformed design matrix,\n exog, and converts the results back to the original parameterization.\n\n\n Parameters\n ----------\n constraints : formula expression or tuple\n If it is a tuple, then the constraint needs to be given by two\n arrays (constraint_matrix, constraint_value), i.e. (R, q).\n Otherwise, the constraints can be given as strings or list of\n strings.\n see t_test for details\n start_params : None or array_like\n starting values for the optimization. `start_params` needs to be\n given in the original parameter space and are internally\n transformed.\n **fit_kwds : keyword arguments\n fit_kwds are used in the optimization of the transformed model.\n\n Returns\n -------\n results : Results instance\n \"\"\"\n\n from patsy import DesignInfo\n from statsmodels.base._constraints import (fit_constrained,\n LinearConstraints)\n\n # same pattern as in base.LikelihoodModel.t_test\n lc = DesignInfo(self.exog_names).linear_constraint(constraints)\n R, q = lc.coefs, lc.constants\n\n # TODO: add start_params option, need access to tranformation\n # fit_constrained needs to do the transformation\n params, cov, res_constr = fit_constrained(self, R, q,\n start_params=start_params,\n fit_kwds=fit_kwds)\n # create dummy results Instance, TODO: wire up properly\n res = self.fit(start_params=params, maxiter=0) # we get a wrapper back\n res._results.params = params\n res._results.cov_params_default = cov\n cov_type = fit_kwds.get('cov_type', 'nonrobust')\n if cov_type != 'nonrobust':\n res._results.normalized_cov_params = cov / res_constr.scale\n else:\n res._results.normalized_cov_params = None\n res._results.scale = res_constr.scale\n k_constr = len(q)\n res._results.df_resid += k_constr\n res._results.df_model -= k_constr\n res._results.constraints = LinearConstraints.from_patsy(lc)\n res._results.k_constr = k_constr\n res._results.results_constrained = res_constr\n return res\n\n\nclass GLMResults(base.LikelihoodModelResults):\n \"\"\"\n Class to contain GLM results.\n\n GLMResults inherits from statsmodels.LikelihoodModelResults\n\n Attributes\n ----------\n df_model : float\n See GLM.df_model\n df_resid : float\n See GLM.df_resid\n fit_history : dict\n Contains information about the iterations. Its keys are `iterations`,\n `deviance` and `params`.\n model : class instance\n Pointer to GLM model instance that called fit.\n nobs : float\n The number of observations n.\n normalized_cov_params : ndarray\n See GLM docstring\n params : ndarray\n The coefficients of the fitted model. Note that interpretation\n of the coefficients often depends on the distribution family and the\n data.\n pvalues : ndarray\n The two-tailed p-values for the parameters.\n scale : float\n The estimate of the scale / dispersion for the model fit.\n See GLM.fit and GLM.estimate_scale for more information.\n stand_errors : ndarray\n The standard errors of the fitted GLM. #TODO still named bse\n\n See Also\n --------\n statsmodels.base.model.LikelihoodModelResults\n \"\"\"\n\n def __init__(self, model, params, normalized_cov_params, scale,\n cov_type='nonrobust', cov_kwds=None, use_t=None):\n super(GLMResults, self).__init__(\n model,\n params,\n normalized_cov_params=normalized_cov_params,\n scale=scale)\n self.family = model.family\n self._endog = model.endog\n self.nobs = model.endog.shape[0]\n self._freq_weights = model.freq_weights\n self._var_weights = model.var_weights\n self._iweights = model.iweights\n if isinstance(self.family, families.Binomial):\n self._n_trials = self.model.n_trials\n else:\n self._n_trials = 1\n self.df_resid = model.df_resid\n self.df_model = model.df_model\n self._cache = {}\n # are these intermediate results needed or can we just\n # call the model's attributes?\n\n # for remove data and pickle without large arrays\n self._data_attr.extend(['results_constrained', '_freq_weights',\n '_var_weights', '_iweights'])\n self._data_in_cache.extend(['null', 'mu'])\n self._data_attr_model = getattr(self, '_data_attr_model', [])\n self._data_attr_model.append('mu')\n\n # robust covariance\n from statsmodels.base.covtype import get_robustcov_results\n if use_t is None:\n self.use_t = False # TODO: class default\n else:\n self.use_t = use_t\n\n # temporary warning\n ct = (cov_type == 'nonrobust') or (cov_type.upper().startswith('HC'))\n if self.model._has_freq_weights and not ct:\n import warnings\n from statsmodels.tools.sm_exceptions import SpecificationWarning\n warnings.warn('cov_type not fully supported with freq_weights',\n SpecificationWarning)\n\n if self.model._has_var_weights and not ct:\n import warnings\n from statsmodels.tools.sm_exceptions import SpecificationWarning\n warnings.warn('cov_type not fully supported with var_weights',\n SpecificationWarning)\n\n if cov_type == 'nonrobust':\n self.cov_type = 'nonrobust'\n self.cov_kwds = {'description': 'Standard Errors assume that the' +\n ' covariance matrix of the errors is correctly ' +\n 'specified.'}\n\n else:\n if cov_kwds is None:\n cov_kwds = {}\n get_robustcov_results(self, cov_type=cov_type, use_self=True,\n use_t=use_t, **cov_kwds)\n\n @cached_data\n def resid_response(self):\n \"\"\"\n Response residuals. The response residuals are defined as\n `endog` - `fittedvalues`\n \"\"\"\n return self._n_trials * (self._endog-self.mu)\n\n @cached_data\n def resid_pearson(self):\n \"\"\"\n Pearson residuals. The Pearson residuals are defined as\n (`endog` - `mu`)/sqrt(VAR(`mu`)) where VAR is the distribution\n specific variance function. See statsmodels.families.family and\n statsmodels.families.varfuncs for more information.\n \"\"\"\n return (np.sqrt(self._n_trials) * (self._endog-self.mu) *\n np.sqrt(self._var_weights) /\n np.sqrt(self.family.variance(self.mu)))\n\n @cached_data\n def resid_working(self):\n \"\"\"\n Working residuals. The working residuals are defined as\n `resid_response`/link'(`mu`). See statsmodels.family.links for the\n derivatives of the link functions. They are defined analytically.\n \"\"\"\n # Isn't self.resid_response is already adjusted by _n_trials?\n val = (self.resid_response * self.family.link.deriv(self.mu))\n val *= self._n_trials\n return val\n\n @cached_data\n def resid_anscombe(self):\n \"\"\"\n Anscombe residuals. See statsmodels.families.family for distribution-\n specific Anscombe residuals. Currently, the unscaled residuals are\n provided. In a future version, the scaled residuals will be provided.\n \"\"\"\n import warnings\n warnings.warn('Anscombe residuals currently unscaled. After the 0.12 '\n 'release, they will be scaled.', category=FutureWarning)\n return self.family.resid_anscombe(self._endog, self.fittedvalues,\n var_weights=self._var_weights,\n scale=1.)\n\n @cached_data\n def resid_anscombe_scaled(self):\n \"\"\"\n Scaled Anscombe residuals. See statsmodels.families.family for\n distribution-specific Anscombe residuals.\n \"\"\"\n return self.family.resid_anscombe(self._endog, self.fittedvalues,\n var_weights=self._var_weights,\n scale=self.scale)\n\n @cached_data\n def resid_anscombe_unscaled(self):\n \"\"\"\n Unscaled Anscombe residuals. See statsmodels.families.family for\n distribution-specific Anscombe residuals.\n \"\"\"\n return self.family.resid_anscombe(self._endog, self.fittedvalues,\n var_weights=self._var_weights,\n scale=1.)\n\n @cached_data\n def resid_deviance(self):\n \"\"\"\n Deviance residuals. See statsmodels.families.family for distribution-\n specific deviance residuals.\n \"\"\"\n dev = self.family.resid_dev(self._endog, self.fittedvalues,\n var_weights=self._var_weights,\n scale=1.)\n return dev\n\n @cached_value\n def pearson_chi2(self):\n \"\"\"\n Pearson's Chi-Squared statistic is defined as the sum of the squares\n of the Pearson residuals.\n \"\"\"\n chisq = (self._endog - self.mu)**2 / self.family.variance(self.mu)\n chisq *= self._iweights * self._n_trials\n chisqsum = np.sum(chisq)\n return chisqsum\n\n @cached_data\n def fittedvalues(self):\n \"\"\"\n The estimated mean response.\n\n This is the value of the inverse of the link function at\n lin_pred, where lin_pred is the linear predicted value\n obtained by multiplying the design matrix by the coefficient\n vector.\n \"\"\"\n return self.mu\n\n @cached_data\n def mu(self):\n \"\"\"\n See GLM docstring.\n \"\"\"\n return self.model.predict(self.params)\n\n @cache_readonly\n def null(self):\n \"\"\"\n Fitted values of the null model\n \"\"\"\n endog = self._endog\n model = self.model\n exog = np.ones((len(endog), 1))\n\n kwargs = model._get_init_kwds()\n kwargs.pop('family')\n if hasattr(self.model, '_offset_exposure'):\n return GLM(endog, exog, family=self.family,\n **kwargs).fit().fittedvalues\n else:\n # correct if fitted is identical across observations\n wls_model = lm.WLS(endog, exog,\n weights=self._iweights * self._n_trials)\n return wls_model.fit().fittedvalues\n\n @cache_readonly\n def deviance(self):\n \"\"\"\n See statsmodels.families.family for the distribution-specific deviance\n functions.\n \"\"\"\n return self.family.deviance(self._endog, self.mu, self._var_weights,\n self._freq_weights)\n\n @cache_readonly\n def null_deviance(self):\n \"\"\"The value of the deviance function for the model fit with a constant\n as the only regressor.\"\"\"\n return self.family.deviance(self._endog, self.null, self._var_weights,\n self._freq_weights)\n\n @cache_readonly\n def llnull(self):\n \"\"\"\n Log-likelihood of the model fit with a constant as the only regressor\n \"\"\"\n return self.family.loglike(self._endog, self.null,\n var_weights=self._var_weights,\n freq_weights=self._freq_weights,\n scale=self.scale)\n\n @cached_value\n def llf(self):\n \"\"\"\n Value of the loglikelihood function evalued at params.\n See statsmodels.families.family for distribution-specific\n loglikelihoods.\n \"\"\"\n _modelfamily = self.family\n if (isinstance(self.family, families.Gaussian) and\n isinstance(self.family.link, families.links.Power) and\n (self.family.link.power == 1.)):\n scale = (np.power(self._endog - self.mu, 2) * self._iweights).sum()\n scale /= self.model.wnobs\n else:\n scale = self.scale\n val = _modelfamily.loglike(self._endog, self.mu,\n var_weights=self._var_weights,\n freq_weights=self._freq_weights,\n scale=scale)\n return val\n\n @cached_value\n def aic(self):\n \"\"\"\n Akaike Information Criterion\n -2 * `llf` + 2 * (`df_model` + 1)\n \"\"\"\n return -2 * self.llf + 2 * (self.df_model + 1)\n\n @property\n def bic(self):\n \"\"\"\n Bayes Information Criterion\n\n `deviance` - `df_resid` * log(`nobs`)\n\n .. warning::\n\n The current definition is base don the deviance rather than the\n log-likelihood. This is not consistent with the AIC definition,\n and after 0.13 both will make use of the log-likelihood definition.\n\n Notes\n -----\n The log-likelihood version is defined\n -2 * `llf` + (`df_model` + 1)*log(n)\n \"\"\"\n if _use_bic_helper.use_bic_llf not in (True, False):\n warnings.warn(\n \"The bic value is computed using the deviance formula. After \"\n \"0.13 this will change to the log-likelihood based formula. \"\n \"This change has no impact on the relative rank of models \"\n \"compared using BIC. You can directly access the \"\n \"log-likelihood version using the `bic_llf` attribute. You \"\n \"can suppress this message by calling \"\n \"statsmodels.genmod.generalized_linear_model.SET_USE_BIC_LLF \"\n \"with True to get the LLF-based version now or False to retain\"\n \"the deviance version.\",\n FutureWarning\n )\n if bool(_use_bic_helper.use_bic_llf):\n return self.bic_llf\n\n return self.bic_deviance\n\n @cached_value\n def bic_deviance(self):\n \"\"\"\n Bayes Information Criterion\n\n Based on the deviance,\n `deviance` - `df_resid` * log(`nobs`)\n \"\"\"\n return (self.deviance -\n (self.model.wnobs - self.df_model - 1) *\n np.log(self.model.wnobs))\n\n @cached_value\n def bic_llf(self):\n \"\"\"\n Bayes Information Criterion\n\n Based on the log-likelihood,\n -2 * `llf` + log(n) * (`df_model` + 1)\n \"\"\"\n return -2*self.llf + (self.df_model+1)*np.log(\n self.df_model+self.df_resid+1\n )\n\n @Appender(pred.get_prediction_glm.__doc__)\n def get_prediction(self, exog=None, exposure=None, offset=None,\n transform=True, linear=False,\n row_labels=None):\n\n import statsmodels.regression._prediction as linpred\n\n pred_kwds = {'exposure': exposure, 'offset': offset, 'linear': True}\n\n # two calls to a get_prediction duplicates exog generation if patsy\n res_linpred = linpred.get_prediction(self, exog=exog,\n transform=transform,\n row_labels=row_labels,\n pred_kwds=pred_kwds)\n\n pred_kwds['linear'] = False\n res = pred.get_prediction_glm(self, exog=exog, transform=transform,\n row_labels=row_labels,\n linpred=res_linpred,\n link=self.model.family.link,\n pred_kwds=pred_kwds)\n\n return res\n\n def get_hat_matrix_diag(self, observed=True):\n \"\"\"\n Compute the diagonal of the hat matrix\n\n Parameters\n ----------\n observed : bool\n If true, then observed hessian is used in the hat matrix\n computation. If false, then the expected hessian is used.\n In the case of a canonical link function both are the same.\n\n Returns\n -------\n hat_matrix_diag : ndarray\n The diagonal of the hat matrix computed from the observed\n or expected hessian.\n \"\"\"\n weights = self.model.hessian_factor(self.params, observed=observed)\n wexog = np.sqrt(weights)[:, None] * self.model.exog\n\n hd = (wexog * np.linalg.pinv(wexog).T).sum(1)\n return hd\n\n def get_influence(self, observed=True):\n \"\"\"\n Get an instance of GLMInfluence with influence and outlier measures\n\n Parameters\n ----------\n observed : bool\n If true, then observed hessian is used in the hat matrix\n computation. If false, then the expected hessian is used.\n In the case of a canonical link function both are the same.\n\n Returns\n -------\n infl : GLMInfluence instance\n The instance has methods to calculate the main influence and\n outlier measures as attributes.\n\n See Also\n --------\n statsmodels.stats.outliers_influence.GLMInfluence\n \"\"\"\n from statsmodels.stats.outliers_influence import GLMInfluence\n\n weights = self.model.hessian_factor(self.params, observed=observed)\n weights_sqrt = np.sqrt(weights)\n wexog = weights_sqrt[:, None] * self.model.exog\n wendog = weights_sqrt * self.model.endog\n\n # using get_hat_matrix_diag has duplicated computation\n hat_matrix_diag = self.get_hat_matrix_diag(observed=observed)\n infl = GLMInfluence(self, endog=wendog, exog=wexog,\n resid=self.resid_pearson,\n hat_matrix_diag=hat_matrix_diag)\n return infl\n\n @Appender(base.LikelihoodModelResults.remove_data.__doc__)\n def remove_data(self):\n # GLM has alias/reference in result instance\n self._data_attr.extend([i for i in self.model._data_attr\n if '_data.' not in i])\n super(self.__class__, self).remove_data()\n\n # TODO: what are these in results?\n self._endog = None\n self._freq_weights = None\n self._var_weights = None\n self._iweights = None\n self._n_trials = None\n\n @Appender(_plot_added_variable_doc % {'extra_params_doc': ''})\n def plot_added_variable(self, focus_exog, resid_type=None,\n use_glm_weights=True, fit_kwargs=None,\n ax=None):\n\n from statsmodels.graphics.regressionplots import plot_added_variable\n\n fig = plot_added_variable(self, focus_exog,\n resid_type=resid_type,\n use_glm_weights=use_glm_weights,\n fit_kwargs=fit_kwargs, ax=ax)\n\n return fig\n\n @Appender(_plot_partial_residuals_doc % {'extra_params_doc': ''})\n def plot_partial_residuals(self, focus_exog, ax=None):\n\n from statsmodels.graphics.regressionplots import plot_partial_residuals\n\n return plot_partial_residuals(self, focus_exog, ax=ax)\n\n @Appender(_plot_ceres_residuals_doc % {'extra_params_doc': ''})\n def plot_ceres_residuals(self, focus_exog, frac=0.66, cond_means=None,\n ax=None):\n\n from statsmodels.graphics.regressionplots import plot_ceres_residuals\n\n return plot_ceres_residuals(self, focus_exog, frac,\n cond_means=cond_means, ax=ax)\n\n def summary(self, yname=None, xname=None, title=None, alpha=.05):\n \"\"\"\n Summarize the Regression Results\n\n Parameters\n ----------\n yname : str, optional\n Default is `y`\n xname : list[str], optional\n Names for the exogenous variables, default is `var_#` for ## in\n the number of regressors. Must match the number of parameters in\n the model\n title : str, optional\n Title for the top table. If not None, then this replaces the\n default title\n alpha : float\n significance level for the confidence intervals\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary : class to hold summary results\n \"\"\"\n\n top_left = [('Dep. Variable:', None),\n ('Model:', None),\n ('Model Family:', [self.family.__class__.__name__]),\n ('Link Function:', [self.family.link.__class__.__name__]),\n ('Method:', [self.method]),\n ('Date:', None),\n ('Time:', None),\n ('No. Iterations:',\n [\"%d\" % self.fit_history['iteration']]),\n ]\n\n top_right = [('No. Observations:', None),\n ('Df Residuals:', None),\n ('Df Model:', None),\n ('Scale:', [\"%#8.5g\" % self.scale]),\n ('Log-Likelihood:', None),\n ('Deviance:', [\"%#8.5g\" % self.deviance]),\n ('Pearson chi2:', [\"%#6.3g\" % self.pearson_chi2])\n ]\n\n if hasattr(self, 'cov_type'):\n top_left.append(('Covariance Type:', [self.cov_type]))\n\n if title is None:\n title = \"Generalized Linear Model Regression Results\"\n\n # create summary tables\n from statsmodels.iolib.summary import Summary\n smry = Summary()\n smry.add_table_2cols(self, gleft=top_left, gright=top_right,\n yname=yname, xname=xname, title=title)\n smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,\n use_t=self.use_t)\n\n if hasattr(self, 'constraints'):\n smry.add_extra_txt(['Model has been estimated subject to linear '\n 'equality constraints.'])\n return smry\n\n def summary2(self, yname=None, xname=None, title=None, alpha=.05,\n float_format=\"%.4f\"):\n \"\"\"Experimental summary for regression Results\n\n Parameters\n ----------\n yname : str\n Name of the dependent variable (optional)\n xname : list[str], optional\n Names for the exogenous variables, default is `var_#` for ## in\n the number of regressors. Must match the number of parameters in\n the model\n title : str, optional\n Title for the top table. If not None, then this replaces the\n default title\n alpha : float\n significance level for the confidence intervals\n float_format : str\n print format for floats in parameters summary\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary2.Summary : class to hold summary results\n \"\"\"\n self.method = 'IRLS'\n from statsmodels.iolib import summary2\n smry = summary2.Summary()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", FutureWarning)\n smry.add_base(results=self, alpha=alpha, float_format=float_format,\n xname=xname, yname=yname, title=title)\n if hasattr(self, 'constraints'):\n smry.add_text('Model has been estimated subject to linear '\n 'equality constraints.')\n\n return smry\n\n\nclass GLMResultsWrapper(lm.RegressionResultsWrapper):\n _attrs = {\n 'resid_anscombe': 'rows',\n 'resid_deviance': 'rows',\n 'resid_pearson': 'rows',\n 'resid_response': 'rows',\n 'resid_working': 'rows'\n }\n _wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,\n _attrs)\n\n\nwrap.populate_wrapper(GLMResultsWrapper, GLMResults)\n\nif __name__ == \"__main__\":\n import statsmodels.api as sm\n data = sm.datasets.longley.load(as_pandas=False)\n # data.exog = add_constant(data.exog)\n GLMmod = GLM(data.endog, data.exog).fit()\n GLMT = GLMmod.summary(returns='tables')\n # GLMT[0].extend_right(GLMT[1])\n # print(GLMT[0])\n # print(GLMT[2])\n GLMTp = GLMmod.summary(title='Test GLM')\n \"\"\"\nFrom Stata\n. webuse beetle\n. glm r i.beetle ldose, family(binomial n) link(cloglog)\n\nIteration 0: log likelihood = -79.012269\nIteration 1: log likelihood = -76.94951\nIteration 2: log likelihood = -76.945645\nIteration 3: log likelihood = -76.945645\n\nGeneralized linear models No. of obs = 24\nOptimization : ML Residual df = 20\n Scale parameter = 1\nDeviance = 73.76505595 (1/df) Deviance = 3.688253\nPearson = 71.8901173 (1/df) Pearson = 3.594506\n\nVariance function: V(u) = u*(1-u/n) [Binomial]\nLink function : g(u) = ln(-ln(1-u/n)) [Complementary log-log]\n\n AIC = 6.74547\nLog likelihood = -76.94564525 BIC = 10.20398\n\n------------------------------------------------------------------------------\n | OIM\n r | Coef. Std. Err. z P>|z| [95% Conf. Interval]\n-------------+----------------------------------------------------------------\n beetle |\n 2 | -.0910396 .1076132 -0.85 0.398 -.3019576 .1198783\n 3 | -1.836058 .1307125 -14.05 0.000 -2.09225 -1.579867\n |\n ldose | 19.41558 .9954265 19.50 0.000 17.46458 21.36658\n _cons | -34.84602 1.79333 -19.43 0.000 -38.36089 -31.33116\n------------------------------------------------------------------------------\n\"\"\"\n", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 30 16:22:29 2014\n\nAuthor: Josef Perktold\nLicense: BSD-3\n\n\"\"\"\nfrom io import StringIO\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_equal, assert_\n\nimport pandas as pd\nimport patsy\nimport pytest\n\nfrom statsmodels.discrete.discrete_model import Poisson\nfrom statsmodels.genmod.generalized_linear_model import GLM\nfrom statsmodels.genmod import families\nfrom statsmodels.base._constraints import fit_constrained\n\nfrom statsmodels.tools.tools import add_constant\nfrom statsmodels import datasets\n\nfrom .results import results_poisson_constrained as results\nfrom .results import results_glm_logit_constrained as reslogit\n\n\nspector_data = datasets.spector.load(as_pandas=False)\nspector_data.exog = add_constant(spector_data.exog, prepend=False)\n\n\nDEBUG = False\n\nss = '''\\\nagecat\tsmokes\tdeaths\tpyears\n1\t1\t32\t52407\n2\t1\t104\t43248\n3\t1\t206\t28612\n4\t1\t186\t12663\n5\t1\t102\t5317\n1\t0\t2\t18790\n2\t0\t12\t10673\n3\t0\t28\t5710\n4\t0\t28\t2585\n5\t0\t31\t1462'''\n\ndata = pd.read_csv(StringIO(ss), delimiter='\\t')\ndata = data.astype(int)\ndata['logpyears'] = np.log(data['pyears'])\n\n\nclass CheckPoissonConstrainedMixin(object):\n\n def test_basic(self):\n res1 = self.res1\n res2 = self.res2\n assert_allclose(res1[0], res2.params[self.idx], rtol=1e-6)\n # see below Stata has nan, we have zero\n bse1 = np.sqrt(np.diag(res1[1]))\n mask = (bse1 == 0) & np.isnan(res2.bse[self.idx])\n assert_allclose(bse1[~mask], res2.bse[self.idx][~mask], rtol=1e-6)\n\n def test_basic_method(self):\n if hasattr(self, 'res1m'):\n res1 = (self.res1m if not hasattr(self.res1m, '_results')\n else self.res1m._results)\n res2 = self.res2\n assert_allclose(res1.params, res2.params[self.idx], rtol=1e-6)\n\n # when a parameter is fixed, the Stata has bse=nan, we have bse=0\n mask = (res1.bse == 0) & np.isnan(res2.bse[self.idx])\n assert_allclose(res1.bse[~mask], res2.bse[self.idx][~mask],\n rtol=1e-6)\n\n tvalues = res2.params_table[self.idx, 2]\n # when a parameter is fixed, the Stata has tvalue=nan,\n # we have tvalue=inf\n mask = np.isinf(res1.tvalues) & np.isnan(tvalues)\n assert_allclose(res1.tvalues[~mask], tvalues[~mask], rtol=1e-6)\n pvalues = res2.params_table[self.idx, 3]\n # note most pvalues are very small\n # examples so far agree at 8 or more decimal, but rtol is stricter\n # see above\n mask = (res1.pvalues == 0) & np.isnan(pvalues)\n assert_allclose(res1.pvalues[~mask], pvalues[~mask], rtol=5e-5)\n\n ci_low = res2.params_table[self.idx, 4]\n ci_upp = res2.params_table[self.idx, 5]\n ci = np.column_stack((ci_low, ci_upp))\n # note most pvalues are very small\n # examples so far agree at 8 or more decimal, but rtol is stricter\n # see above: nan versus value\n assert_allclose(res1.conf_int()[~np.isnan(ci)], ci[~np.isnan(ci)],\n rtol=5e-5)\n\n # other\n assert_allclose(res1.llf, res2.ll, rtol=1e-6)\n assert_equal(res1.df_model, res2.df_m)\n # Stata does not have df_resid\n df_r = res2.N - res2.df_m - 1\n assert_equal(res1.df_resid, df_r)\n else:\n pytest.skip(\"not available yet\")\n\n def test_other(self):\n # some results may not be valid or available for all models\n if hasattr(self, 'res1m'):\n res1 = self.res1m\n res2 = self.res2\n\n if hasattr(res2, 'll_0'):\n assert_allclose(res1.llnull, res2.ll_0, rtol=1e-6)\n else:\n if DEBUG:\n import warnings\n message = ('test: ll_0 not available, llnull=%6.4F'\n % res1.llnull)\n warnings.warn(message)\n\n else:\n pytest.skip(\"not available yet\")\n\n\nclass TestPoissonConstrained1a(CheckPoissonConstrainedMixin):\n\n @classmethod\n def setup_class(cls):\n\n cls.res2 = results.results_noexposure_constraint\n # 2 is dropped baseline for categorical\n cls.idx = [7, 3, 4, 5, 6, 0, 1]\n\n # example without offset\n formula = 'deaths ~ logpyears + smokes + C(agecat)'\n mod = Poisson.from_formula(formula, data=data)\n # get start_params, example fails to converge on one py TravisCI\n k_vars = len(mod.exog_names)\n start_params = np.zeros(k_vars)\n start_params[0] = np.log(mod.endog.mean())\n # if we need it, this is desired params\n # p = np.array([-3.93478643, 1.37276214, 2.33077032, 2.71338891,\n # 2.71338891, 0.57966535, 0.97254074])\n\n constr = 'C(agecat)[T.4] = C(agecat)[T.5]'\n lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)\n cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,\n start_params=start_params,\n fit_kwds={'method': 'bfgs', 'disp': 0})\n # TODO: Newton fails\n\n # test method of Poisson, not monkey patched\n cls.res1m = mod.fit_constrained(constr, start_params=start_params,\n method='bfgs', disp=0)\n\n @pytest.mark.smoke\n def test_summary(self):\n # trailing text in summary, assumes it's the first extra string\n # NOTE: see comment about convergence in llnull for self.res1m\n summ = self.res1m.summary()\n assert_('linear equality constraints' in summ.extra_txt)\n\n @pytest.mark.smoke\n def test_summary2(self):\n # trailing text in summary, assumes it's the first extra string\n # NOTE: see comment about convergence in llnull for self.res1m\n summ = self.res1m.summary2()\n assert_('linear equality constraints' in summ.extra_txt[0])\n\n\nclass TestPoissonConstrained1b(CheckPoissonConstrainedMixin):\n\n @classmethod\n def setup_class(cls):\n\n cls.res2 = results.results_exposure_constraint\n cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical\n\n # example without offset\n formula = 'deaths ~ smokes + C(agecat)'\n mod = Poisson.from_formula(formula, data=data,\n exposure=data['pyears'].values)\n constr = 'C(agecat)[T.4] = C(agecat)[T.5]'\n lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)\n cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,\n fit_kwds={'method': 'newton',\n 'disp': 0})\n cls.constraints = lc\n # TODO: bfgs fails\n # test method of Poisson, not monkey patched\n cls.res1m = mod.fit_constrained(constr, method='newton',\n disp=0)\n\n\nclass TestPoissonConstrained1c(CheckPoissonConstrainedMixin):\n\n @classmethod\n def setup_class(cls):\n\n cls.res2 = results.results_exposure_constraint\n cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical\n\n # example without offset\n formula = 'deaths ~ smokes + C(agecat)'\n mod = Poisson.from_formula(formula, data=data,\n offset=np.log(data['pyears'].values))\n constr = 'C(agecat)[T.4] = C(agecat)[T.5]'\n lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)\n cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,\n fit_kwds={'method': 'newton',\n 'disp': 0})\n cls.constraints = lc\n # TODO: bfgs fails\n\n # test method of Poisson, not monkey patched\n cls.res1m = mod.fit_constrained(constr, method='newton', disp=0)\n\n\nclass TestPoissonNoConstrained(CheckPoissonConstrainedMixin):\n\n @classmethod\n def setup_class(cls):\n\n cls.res2 = results.results_exposure_noconstraint\n cls.idx = [6, 2, 3, 4, 5, 0] # 1 is dropped baseline for categorical\n\n # example without offset\n formula = 'deaths ~ smokes + C(agecat)'\n mod = Poisson.from_formula(formula, data=data,\n offset=np.log(data['pyears'].values))\n res1 = mod.fit(disp=0)._results\n # res1 is duplicate check, so we can follow the same pattern\n cls.res1 = (res1.params, res1.cov_params())\n cls.res1m = res1\n\n\nclass TestPoissonConstrained2a(CheckPoissonConstrainedMixin):\n\n @classmethod\n def setup_class(cls):\n\n cls.res2 = results.results_noexposure_constraint2\n # 2 is dropped baseline for categorical\n cls.idx = [7, 3, 4, 5, 6, 0, 1]\n\n # example without offset\n formula = 'deaths ~ logpyears + smokes + C(agecat)'\n mod = Poisson.from_formula(formula, data=data)\n\n # get start_params, example fails to converge on one py TravisCI\n k_vars = len(mod.exog_names)\n start_params = np.zeros(k_vars)\n start_params[0] = np.log(mod.endog.mean())\n # if we need it, this is desired params\n # p = np.array([-9.43762015, 1.52762442, 2.74155711, 3.58730007,\n # 4.08730007, 1.15987869, 0.12111539])\n\n constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'\n lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)\n cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,\n start_params=start_params,\n fit_kwds={'method': 'bfgs', 'disp': 0})\n # TODO: Newton fails\n\n # test method of Poisson, not monkey patched\n cls.res1m = mod.fit_constrained(constr, start_params=start_params,\n method='bfgs', disp=0)\n\n\nclass TestPoissonConstrained2b(CheckPoissonConstrainedMixin):\n\n @classmethod\n def setup_class(cls):\n\n cls.res2 = results.results_exposure_constraint2\n cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical\n\n # example without offset\n formula = 'deaths ~ smokes + C(agecat)'\n mod = Poisson.from_formula(formula, data=data,\n exposure=data['pyears'].values)\n constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'\n lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)\n cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,\n fit_kwds={'method': 'newton',\n 'disp': 0})\n cls.constraints = lc\n # TODO: bfgs fails to converge. overflow somewhere?\n\n # test method of Poisson, not monkey patched\n cls.res1m = mod.fit_constrained(constr, method='bfgs', disp=0,\n start_params=cls.res1[0])\n\n\nclass TestPoissonConstrained2c(CheckPoissonConstrainedMixin):\n\n @classmethod\n def setup_class(cls):\n\n cls.res2 = results.results_exposure_constraint2\n cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical\n\n # example without offset\n formula = 'deaths ~ smokes + C(agecat)'\n mod = Poisson.from_formula(formula, data=data,\n offset=np.log(data['pyears'].values))\n\n constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'\n lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)\n cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,\n fit_kwds={'method': 'newton', 'disp': 0})\n cls.constraints = lc\n # TODO: bfgs fails\n\n # test method of Poisson, not monkey patched\n cls.res1m = mod.fit_constrained(constr,\n method='bfgs', disp=0,\n start_params=cls.res1[0])\n\n\nclass TestGLMPoissonConstrained1a(CheckPoissonConstrainedMixin):\n\n @classmethod\n def setup_class(cls):\n from statsmodels.base._constraints import fit_constrained\n\n cls.res2 = results.results_noexposure_constraint\n # 2 is dropped baseline for categorical\n cls.idx = [7, 3, 4, 5, 6, 0, 1]\n\n # example without offset\n formula = 'deaths ~ logpyears + smokes + C(agecat)'\n mod = GLM.from_formula(formula, data=data,\n family=families.Poisson())\n\n constr = 'C(agecat)[T.4] = C(agecat)[T.5]'\n lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)\n cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,\n fit_kwds={'atol': 1e-10})\n cls.constraints = lc\n cls.res1m = mod.fit_constrained(constr, atol=1e-10)\n\n\nclass TestGLMPoissonConstrained1b(CheckPoissonConstrainedMixin):\n\n @classmethod\n def setup_class(cls):\n from statsmodels.genmod.generalized_linear_model import GLM\n from statsmodels.genmod import families\n from statsmodels.base._constraints import fit_constrained\n\n cls.res2 = results.results_exposure_constraint\n cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical\n\n # example with offset\n formula = 'deaths ~ smokes + C(agecat)'\n mod = GLM.from_formula(formula, data=data,\n family=families.Poisson(),\n offset=np.log(data['pyears'].values))\n\n constr = 'C(agecat)[T.4] = C(agecat)[T.5]'\n lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)\n\n cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,\n fit_kwds={'atol': 1e-10})\n cls.constraints = lc\n cls.res1m = mod.fit_constrained(constr, atol=1e-10)._results\n\n def test_compare_glm_poisson(self):\n res1 = self.res1m\n res2 = self.res2\n\n formula = 'deaths ~ smokes + C(agecat)'\n mod = Poisson.from_formula(formula, data=data,\n exposure=data['pyears'].values)\n\n constr = 'C(agecat)[T.4] = C(agecat)[T.5]'\n res2 = mod.fit_constrained(constr, start_params=self.res1m.params,\n method='newton', warn_convergence=False,\n disp=0)\n\n # we get high precision because we use the params as start_params\n\n # basic, just as check that we have the same model\n assert_allclose(res1.params, res2.params, rtol=1e-12)\n assert_allclose(res1.bse, res2.bse, rtol=1e-11)\n\n # check predict, fitted, ...\n\n predicted = res1.predict()\n assert_allclose(predicted, res2.predict(), rtol=1e-10)\n assert_allclose(res1.mu, predicted, rtol=1e-10)\n assert_allclose(res1.fittedvalues, predicted, rtol=1e-10)\n assert_allclose(res2.predict(linear=True), res2.predict(linear=True),\n rtol=1e-10)\n\n\nclass CheckGLMConstrainedMixin(CheckPoissonConstrainedMixin):\n # add tests for some GLM specific attributes\n\n def test_glm(self):\n res2 = self.res2 # reference results\n res1 = self.res1m\n\n # FIXME: dont leave commented-out\n # assert_allclose(res1.aic, res2.aic, rtol=1e-10) # far away\n # Stata aic in ereturn and in estat ic are very different\n # we have the same as estat ic\n # see issue GH#1733\n assert_allclose(res1.aic, res2.infocrit[4], rtol=1e-10)\n\n import warnings\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", FutureWarning)\n # FutureWarning for BIC changes\n assert_allclose(res1.bic, res2.bic, rtol=1e-10)\n # bic is deviance based\n # FIXME: dont leave commented-out\n # assert_allclose(res1.bic, res2.infocrit[5], rtol=1e-10)\n assert_allclose(res1.deviance, res2.deviance, rtol=1e-10)\n # FIXME: dont leave commented-out\n # TODO: which chi2 are these\n # assert_allclose(res1.pearson_chi2, res2.chi2, rtol=1e-10)\n\n\nclass TestGLMLogitConstrained1(CheckGLMConstrainedMixin):\n\n @classmethod\n def setup_class(cls):\n cls.idx = slice(None)\n # params sequence same as Stata, but Stata reports param = nan\n # and we have param = value = 0\n\n cls.res2 = reslogit.results_constraint1\n\n mod1 = GLM(spector_data.endog, spector_data.exog,\n family=families.Binomial())\n\n constr = 'x1 = 2.8'\n cls.res1m = mod1.fit_constrained(constr)\n\n R, q = cls.res1m.constraints\n cls.res1 = fit_constrained(mod1, R, q)\n\n\nclass TestGLMLogitConstrained2(CheckGLMConstrainedMixin):\n\n @classmethod\n def setup_class(cls):\n cls.idx = slice(None) # params sequence same as Stata\n cls.res2 = reslogit.results_constraint2\n\n mod1 = GLM(spector_data.endog, spector_data.exog,\n family=families.Binomial())\n\n constr = 'x1 - x3 = 0'\n cls.res1m = mod1.fit_constrained(constr, atol=1e-10)\n\n # patsy compatible constraints\n R, q = cls.res1m.constraints.coefs, cls.res1m.constraints.constants\n cls.res1 = fit_constrained(mod1, R, q, fit_kwds={'atol': 1e-10})\n cls.constraints_rq = (R, q)\n\n def test_predict(self):\n # results only available for this case\n res2 = self.res2 # reference results\n res1 = self.res1m\n\n predicted = res1.predict()\n assert_allclose(predicted, res2.predict_mu, atol=1e-7)\n assert_allclose(res1.mu, predicted, rtol=1e-10)\n assert_allclose(res1.fittedvalues, predicted, rtol=1e-10)\n\n @pytest.mark.smoke\n def test_summary(self):\n # trailing text in summary, assumes it's the first extra string\n summ = self.res1m.summary()\n assert_('linear equality constraints' in summ.extra_txt)\n\n lc_string = str(self.res1m.constraints)\n assert lc_string == \"x1 - x3 = 0.0\"\n\n @pytest.mark.smoke\n def test_summary2(self):\n # trailing text in summary, assumes it's the first extra string\n import warnings\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", FutureWarning)\n # FutureWarning for BIC changes\n summ = self.res1m.summary2()\n\n assert_('linear equality constraints' in summ.extra_txt[0])\n\n def test_fit_constrained_wrap(self):\n # minimal test\n res2 = self.res2 # reference results\n\n from statsmodels.base._constraints import fit_constrained_wrap\n res_wrap = fit_constrained_wrap(self.res1m.model, self.constraints_rq)\n assert_allclose(res_wrap.params, res2.params, rtol=1e-6)\n assert_allclose(res_wrap.params, res2.params, rtol=1e-6)\n\n\nclass TestGLMLogitConstrained2HC(CheckGLMConstrainedMixin):\n\n @classmethod\n def setup_class(cls):\n cls.idx = slice(None) # params sequence same as Stata\n cls.res2 = reslogit.results_constraint2_robust\n\n mod1 = GLM(spector_data.endog, spector_data.exog,\n family=families.Binomial())\n\n # not used to match Stata for HC\n # nobs, k_params = mod1.exog.shape\n # k_params -= 1 # one constraint\n cov_type = 'HC0'\n cov_kwds = {'scaling_factor': 32/31}\n # looks like nobs / (nobs - 1) and not (nobs - 1.) / (nobs - k_params)}\n constr = 'x1 - x3 = 0'\n cls.res1m = mod1.fit_constrained(constr, cov_type=cov_type,\n cov_kwds=cov_kwds, atol=1e-10)\n\n R, q = cls.res1m.constraints\n cls.res1 = fit_constrained(mod1, R, q, fit_kwds={'atol': 1e-10,\n 'cov_type': cov_type,\n 'cov_kwds': cov_kwds})\n cls.constraints_rq = (R, q)\n\n\ndef junk(): # FIXME: make this into a test, or move/remove\n # Singular Matrix in mod1a.fit()\n\n # same as Stata default\n formula2 = 'deaths ~ C(agecat) + C(smokes) : C(agecat)'\n\n mod = Poisson.from_formula(formula2, data=data,\n exposure=data['pyears'].values)\n\n mod.fit()\n\n constraints = 'C(smokes)[T.1]:C(agecat)[3] = C(smokes)[T.1]:C(agec`at)[4]'\n\n import patsy\n lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constraints)\n R, q = lc.coefs, lc.constants\n\n mod.fit_constrained(R, q, fit_kwds={'method': 'bfgs'})\n\n # example without offset\n formula1a = 'deaths ~ logpyears + smokes + C(agecat)'\n mod1a = Poisson.from_formula(formula1a, data=data)\n\n mod1a.fit()\n lc_1a = patsy.DesignInfo(mod1a.exog_names).linear_constraint(\n 'C(agecat)[T.4] = C(agecat)[T.5]')\n mod1a.fit_constrained(lc_1a.coefs, lc_1a.constants,\n fit_kwds={'method': 'newton'})\n", "\"\"\"\n===================================================================\nHermiteE Series, \"Probabilists\" (:mod:`numpy.polynomial.hermite_e`)\n===================================================================\n\nThis module provides a number of objects (mostly functions) useful for\ndealing with Hermite_e series, including a `HermiteE` class that\nencapsulates the usual arithmetic operations. (General information\non how this module represents and works with such polynomials is in the\ndocstring for its \"parent\" sub-package, `numpy.polynomial`).\n\nClasses\n-------\n.. autosummary::\n :toctree: generated/\n\n HermiteE\n\nConstants\n---------\n.. autosummary::\n :toctree: generated/\n\n hermedomain\n hermezero\n hermeone\n hermex\n\nArithmetic\n----------\n.. autosummary::\n :toctree: generated/\n\n hermeadd\n hermesub\n hermemulx\n hermemul\n hermediv\n hermepow\n hermeval\n hermeval2d\n hermeval3d\n hermegrid2d\n hermegrid3d\n\nCalculus\n--------\n.. autosummary::\n :toctree: generated/\n\n hermeder\n hermeint\n\nMisc Functions\n--------------\n.. autosummary::\n :toctree: generated/\n\n hermefromroots\n hermeroots\n hermevander\n hermevander2d\n hermevander3d\n hermegauss\n hermeweight\n hermecompanion\n hermefit\n hermetrim\n hermeline\n herme2poly\n poly2herme\n\nSee also\n--------\n`numpy.polynomial`\n\n\"\"\"\nimport numpy as np\nimport numpy.linalg as la\nfrom numpy.core.multiarray import normalize_axis_index\n\nfrom . import polyutils as pu\nfrom ._polybase import ABCPolyBase\n\n__all__ = [\n 'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline',\n 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv',\n 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly',\n 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim',\n 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d',\n 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion',\n 'hermegauss', 'hermeweight']\n\nhermetrim = pu.trimcoef\n\n\ndef poly2herme(pol):\n \"\"\"\n poly2herme(pol)\n\n Convert a polynomial to a Hermite series.\n\n Convert an array representing the coefficients of a polynomial (relative\n to the \"standard\" basis) ordered from lowest degree to highest, to an\n array of the coefficients of the equivalent Hermite series, ordered\n from lowest to highest degree.\n\n Parameters\n ----------\n pol : array_like\n 1-D array containing the polynomial coefficients\n\n Returns\n -------\n c : ndarray\n 1-D array containing the coefficients of the equivalent Hermite\n series.\n\n See Also\n --------\n herme2poly\n\n Notes\n -----\n The easy way to do conversions between polynomial basis sets\n is to use the convert method of a class instance.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import poly2herme\n >>> poly2herme(np.arange(4))\n array([ 2., 10., 2., 3.])\n\n \"\"\"\n [pol] = pu.as_series([pol])\n deg = len(pol) - 1\n res = 0\n for i in range(deg, -1, -1):\n res = hermeadd(hermemulx(res), pol[i])\n return res\n\n\ndef herme2poly(c):\n \"\"\"\n Convert a Hermite series to a polynomial.\n\n Convert an array representing the coefficients of a Hermite series,\n ordered from lowest degree to highest, to an array of the coefficients\n of the equivalent polynomial (relative to the \"standard\" basis) ordered\n from lowest to highest degree.\n\n Parameters\n ----------\n c : array_like\n 1-D array containing the Hermite series coefficients, ordered\n from lowest order term to highest.\n\n Returns\n -------\n pol : ndarray\n 1-D array containing the coefficients of the equivalent polynomial\n (relative to the \"standard\" basis) ordered from lowest order term\n to highest.\n\n See Also\n --------\n poly2herme\n\n Notes\n -----\n The easy way to do conversions between polynomial basis sets\n is to use the convert method of a class instance.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import herme2poly\n >>> herme2poly([ 2., 10., 2., 3.])\n array([0., 1., 2., 3.])\n\n \"\"\"\n from .polynomial import polyadd, polysub, polymulx\n\n [c] = pu.as_series([c])\n n = len(c)\n if n == 1:\n return c\n if n == 2:\n return c\n else:\n c0 = c[-2]\n c1 = c[-1]\n # i is the current degree of c1\n for i in range(n - 1, 1, -1):\n tmp = c0\n c0 = polysub(c[i - 2], c1*(i - 1))\n c1 = polyadd(tmp, polymulx(c1))\n return polyadd(c0, polymulx(c1))\n\n#\n# These are constant arrays are of integer type so as to be compatible\n# with the widest range of other types, such as Decimal.\n#\n\n# Hermite\nhermedomain = np.array([-1, 1])\n\n# Hermite coefficients representing zero.\nhermezero = np.array([0])\n\n# Hermite coefficients representing one.\nhermeone = np.array([1])\n\n# Hermite coefficients representing the identity x.\nhermex = np.array([0, 1])\n\n\ndef hermeline(off, scl):\n \"\"\"\n Hermite series whose graph is a straight line.\n\n Parameters\n ----------\n off, scl : scalars\n The specified line is given by ``off + scl*x``.\n\n Returns\n -------\n y : ndarray\n This module's representation of the Hermite series for\n ``off + scl*x``.\n\n See Also\n --------\n numpy.polynomial.polynomial.polyline\n numpy.polynomial.chebyshev.chebline\n numpy.polynomial.legendre.legline\n numpy.polynomial.laguerre.lagline\n numpy.polynomial.hermite.hermline\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermeline\n >>> from numpy.polynomial.hermite_e import hermeline, hermeval\n >>> hermeval(0,hermeline(3, 2))\n 3.0\n >>> hermeval(1,hermeline(3, 2))\n 5.0\n\n \"\"\"\n if scl != 0:\n return np.array([off, scl])\n else:\n return np.array([off])\n\n\ndef hermefromroots(roots):\n \"\"\"\n Generate a HermiteE series with given roots.\n\n The function returns the coefficients of the polynomial\n\n .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),\n\n in HermiteE form, where the `r_n` are the roots specified in `roots`.\n If a zero has multiplicity n, then it must appear in `roots` n times.\n For instance, if 2 is a root of multiplicity three and 3 is a root of\n multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The\n roots can appear in any order.\n\n If the returned coefficients are `c`, then\n\n .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x)\n\n The coefficient of the last term is not generally 1 for monic\n polynomials in HermiteE form.\n\n Parameters\n ----------\n roots : array_like\n Sequence containing the roots.\n\n Returns\n -------\n out : ndarray\n 1-D array of coefficients. If all roots are real then `out` is a\n real array, if some of the roots are complex, then `out` is complex\n even if all the coefficients in the result are real (see Examples\n below).\n\n See Also\n --------\n numpy.polynomial.polynomial.polyfromroots\n numpy.polynomial.legendre.legfromroots\n numpy.polynomial.laguerre.lagfromroots\n numpy.polynomial.hermite.hermfromroots\n numpy.polynomial.chebyshev.chebfromroots\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval\n >>> coef = hermefromroots((-1, 0, 1))\n >>> hermeval((-1, 0, 1), coef)\n array([0., 0., 0.])\n >>> coef = hermefromroots((-1j, 1j))\n >>> hermeval((-1j, 1j), coef)\n array([0.+0.j, 0.+0.j])\n\n \"\"\"\n return pu._fromroots(hermeline, hermemul, roots)\n\n\ndef hermeadd(c1, c2):\n \"\"\"\n Add one Hermite series to another.\n\n Returns the sum of two Hermite series `c1` + `c2`. The arguments\n are sequences of coefficients ordered from lowest order term to\n highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.\n\n Parameters\n ----------\n c1, c2 : array_like\n 1-D arrays of Hermite series coefficients ordered from low to\n high.\n\n Returns\n -------\n out : ndarray\n Array representing the Hermite series of their sum.\n\n See Also\n --------\n hermesub, hermemulx, hermemul, hermediv, hermepow\n\n Notes\n -----\n Unlike multiplication, division, etc., the sum of two Hermite series\n is a Hermite series (without having to \"reproject\" the result onto\n the basis set) so addition, just like that of \"standard\" polynomials,\n is simply \"component-wise.\"\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermeadd\n >>> hermeadd([1, 2, 3], [1, 2, 3, 4])\n array([2., 4., 6., 4.])\n\n \"\"\"\n return pu._add(c1, c2)\n\n\ndef hermesub(c1, c2):\n \"\"\"\n Subtract one Hermite series from another.\n\n Returns the difference of two Hermite series `c1` - `c2`. The\n sequences of coefficients are from lowest order term to highest, i.e.,\n [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.\n\n Parameters\n ----------\n c1, c2 : array_like\n 1-D arrays of Hermite series coefficients ordered from low to\n high.\n\n Returns\n -------\n out : ndarray\n Of Hermite series coefficients representing their difference.\n\n See Also\n --------\n hermeadd, hermemulx, hermemul, hermediv, hermepow\n\n Notes\n -----\n Unlike multiplication, division, etc., the difference of two Hermite\n series is a Hermite series (without having to \"reproject\" the result\n onto the basis set) so subtraction, just like that of \"standard\"\n polynomials, is simply \"component-wise.\"\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermesub\n >>> hermesub([1, 2, 3, 4], [1, 2, 3])\n array([0., 0., 0., 4.])\n\n \"\"\"\n return pu._sub(c1, c2)\n\n\ndef hermemulx(c):\n \"\"\"Multiply a Hermite series by x.\n\n Multiply the Hermite series `c` by x, where x is the independent\n variable.\n\n\n Parameters\n ----------\n c : array_like\n 1-D array of Hermite series coefficients ordered from low to\n high.\n\n Returns\n -------\n out : ndarray\n Array representing the result of the multiplication.\n\n Notes\n -----\n The multiplication uses the recursion relationship for Hermite\n polynomials in the form\n\n .. math::\n\n xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x)))\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermemulx\n >>> hermemulx([1, 2, 3])\n array([2., 7., 2., 3.])\n\n \"\"\"\n # c is a trimmed copy\n [c] = pu.as_series([c])\n # The zero series needs special treatment\n if len(c) == 1 and c[0] == 0:\n return c\n\n prd = np.empty(len(c) + 1, dtype=c.dtype)\n prd[0] = c[0]*0\n prd[1] = c[0]\n for i in range(1, len(c)):\n prd[i + 1] = c[i]\n prd[i - 1] += c[i]*i\n return prd\n\n\ndef hermemul(c1, c2):\n \"\"\"\n Multiply one Hermite series by another.\n\n Returns the product of two Hermite series `c1` * `c2`. The arguments\n are sequences of coefficients, from lowest order \"term\" to highest,\n e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.\n\n Parameters\n ----------\n c1, c2 : array_like\n 1-D arrays of Hermite series coefficients ordered from low to\n high.\n\n Returns\n -------\n out : ndarray\n Of Hermite series coefficients representing their product.\n\n See Also\n --------\n hermeadd, hermesub, hermemulx, hermediv, hermepow\n\n Notes\n -----\n In general, the (polynomial) product of two C-series results in terms\n that are not in the Hermite polynomial basis set. Thus, to express\n the product as a Hermite series, it is necessary to \"reproject\" the\n product onto said basis set, which may produce \"unintuitive\" (but\n correct) results; see Examples section below.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermemul\n >>> hermemul([1, 2, 3], [0, 1, 2])\n array([14., 15., 28., 7., 6.])\n\n \"\"\"\n # s1, s2 are trimmed copies\n [c1, c2] = pu.as_series([c1, c2])\n\n if len(c1) > len(c2):\n c = c2\n xs = c1\n else:\n c = c1\n xs = c2\n\n if len(c) == 1:\n c0 = c[0]*xs\n c1 = 0\n elif len(c) == 2:\n c0 = c[0]*xs\n c1 = c[1]*xs\n else:\n nd = len(c)\n c0 = c[-2]*xs\n c1 = c[-1]*xs\n for i in range(3, len(c) + 1):\n tmp = c0\n nd = nd - 1\n c0 = hermesub(c[-i]*xs, c1*(nd - 1))\n c1 = hermeadd(tmp, hermemulx(c1))\n return hermeadd(c0, hermemulx(c1))\n\n\ndef hermediv(c1, c2):\n \"\"\"\n Divide one Hermite series by another.\n\n Returns the quotient-with-remainder of two Hermite series\n `c1` / `c2`. The arguments are sequences of coefficients from lowest\n order \"term\" to highest, e.g., [1,2,3] represents the series\n ``P_0 + 2*P_1 + 3*P_2``.\n\n Parameters\n ----------\n c1, c2 : array_like\n 1-D arrays of Hermite series coefficients ordered from low to\n high.\n\n Returns\n -------\n [quo, rem] : ndarrays\n Of Hermite series coefficients representing the quotient and\n remainder.\n\n See Also\n --------\n hermeadd, hermesub, hermemulx, hermemul, hermepow\n\n Notes\n -----\n In general, the (polynomial) division of one Hermite series by another\n results in quotient and remainder terms that are not in the Hermite\n polynomial basis set. Thus, to express these results as a Hermite\n series, it is necessary to \"reproject\" the results onto the Hermite\n basis set, which may produce \"unintuitive\" (but correct) results; see\n Examples section below.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermediv\n >>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2])\n (array([1., 2., 3.]), array([0.]))\n >>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2])\n (array([1., 2., 3.]), array([1., 2.]))\n\n \"\"\"\n return pu._div(hermemul, c1, c2)\n\n\ndef hermepow(c, pow, maxpower=16):\n \"\"\"Raise a Hermite series to a power.\n\n Returns the Hermite series `c` raised to the power `pow`. The\n argument `c` is a sequence of coefficients ordered from low to high.\n i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``\n\n Parameters\n ----------\n c : array_like\n 1-D array of Hermite series coefficients ordered from low to\n high.\n pow : integer\n Power to which the series will be raised\n maxpower : integer, optional\n Maximum power allowed. This is mainly to limit growth of the series\n to unmanageable size. Default is 16\n\n Returns\n -------\n coef : ndarray\n Hermite series of power.\n\n See Also\n --------\n hermeadd, hermesub, hermemulx, hermemul, hermediv\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermepow\n >>> hermepow([1, 2, 3], 2)\n array([23., 28., 46., 12., 9.])\n\n \"\"\"\n return pu._pow(hermemul, c, pow, maxpower)\n\n\ndef hermeder(c, m=1, scl=1, axis=0):\n \"\"\"\n Differentiate a Hermite_e series.\n\n Returns the series coefficients `c` differentiated `m` times along\n `axis`. At each iteration the result is multiplied by `scl` (the\n scaling factor is for use in a linear change of variable). The argument\n `c` is an array of coefficients from low to high degree along each\n axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2``\n while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y)\n + 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1\n is ``y``.\n\n Parameters\n ----------\n c : array_like\n Array of Hermite_e series coefficients. If `c` is multidimensional\n the different axis correspond to different variables with the\n degree in each axis given by the corresponding index.\n m : int, optional\n Number of derivatives taken, must be non-negative. (Default: 1)\n scl : scalar, optional\n Each differentiation is multiplied by `scl`. The end result is\n multiplication by ``scl**m``. This is for use in a linear change of\n variable. (Default: 1)\n axis : int, optional\n Axis over which the derivative is taken. (Default: 0).\n\n .. versionadded:: 1.7.0\n\n Returns\n -------\n der : ndarray\n Hermite series of the derivative.\n\n See Also\n --------\n hermeint\n\n Notes\n -----\n In general, the result of differentiating a Hermite series does not\n resemble the same operation on a power series. Thus the result of this\n function may be \"unintuitive,\" albeit correct; see Examples section\n below.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermeder\n >>> hermeder([ 1., 1., 1., 1.])\n array([1., 2., 3.])\n >>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2)\n array([1., 2., 3.])\n\n \"\"\"\n c = np.array(c, ndmin=1, copy=True)\n if c.dtype.char in '?bBhHiIlLqQpP':\n c = c.astype(np.double)\n cnt = pu._deprecate_as_int(m, \"the order of derivation\")\n iaxis = pu._deprecate_as_int(axis, \"the axis\")\n if cnt < 0:\n raise ValueError(\"The order of derivation must be non-negative\")\n iaxis = normalize_axis_index(iaxis, c.ndim)\n\n if cnt == 0:\n return c\n\n c = np.moveaxis(c, iaxis, 0)\n n = len(c)\n if cnt >= n:\n return c[:1]*0\n else:\n for i in range(cnt):\n n = n - 1\n c *= scl\n der = np.empty((n,) + c.shape[1:], dtype=c.dtype)\n for j in range(n, 0, -1):\n der[j - 1] = j*c[j]\n c = der\n c = np.moveaxis(c, 0, iaxis)\n return c\n\n\ndef hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):\n \"\"\"\n Integrate a Hermite_e series.\n\n Returns the Hermite_e series coefficients `c` integrated `m` times from\n `lbnd` along `axis`. At each iteration the resulting series is\n **multiplied** by `scl` and an integration constant, `k`, is added.\n The scaling factor is for use in a linear change of variable. (\"Buyer\n beware\": note that, depending on what one is doing, one may want `scl`\n to be the reciprocal of what one might expect; for more information,\n see the Notes section below.) The argument `c` is an array of\n coefficients from low to high degree along each axis, e.g., [1,2,3]\n represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]\n represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +\n 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.\n\n Parameters\n ----------\n c : array_like\n Array of Hermite_e series coefficients. If c is multidimensional\n the different axis correspond to different variables with the\n degree in each axis given by the corresponding index.\n m : int, optional\n Order of integration, must be positive. (Default: 1)\n k : {[], list, scalar}, optional\n Integration constant(s). The value of the first integral at\n ``lbnd`` is the first value in the list, the value of the second\n integral at ``lbnd`` is the second value, etc. If ``k == []`` (the\n default), all constants are set to zero. If ``m == 1``, a single\n scalar can be given instead of a list.\n lbnd : scalar, optional\n The lower bound of the integral. (Default: 0)\n scl : scalar, optional\n Following each integration the result is *multiplied* by `scl`\n before the integration constant is added. (Default: 1)\n axis : int, optional\n Axis over which the integral is taken. (Default: 0).\n\n .. versionadded:: 1.7.0\n\n Returns\n -------\n S : ndarray\n Hermite_e series coefficients of the integral.\n\n Raises\n ------\n ValueError\n If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or\n ``np.ndim(scl) != 0``.\n\n See Also\n --------\n hermeder\n\n Notes\n -----\n Note that the result of each integration is *multiplied* by `scl`.\n Why is this important to note? Say one is making a linear change of\n variable :math:`u = ax + b` in an integral relative to `x`. Then\n :math:`dx = du/a`, so one will need to set `scl` equal to\n :math:`1/a` - perhaps not what one would have first thought.\n\n Also note that, in general, the result of integrating a C-series needs\n to be \"reprojected\" onto the C-series basis set. Thus, typically,\n the result of this function is \"unintuitive,\" albeit correct; see\n Examples section below.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermeint\n >>> hermeint([1, 2, 3]) # integrate once, value 0 at 0.\n array([1., 1., 1., 1.])\n >>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0\n array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) # may vary\n >>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0.\n array([2., 1., 1., 1.])\n >>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1\n array([-1., 1., 1., 1.])\n >>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1)\n array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) # may vary\n\n \"\"\"\n c = np.array(c, ndmin=1, copy=True)\n if c.dtype.char in '?bBhHiIlLqQpP':\n c = c.astype(np.double)\n if not np.iterable(k):\n k = [k]\n cnt = pu._deprecate_as_int(m, \"the order of integration\")\n iaxis = pu._deprecate_as_int(axis, \"the axis\")\n if cnt < 0:\n raise ValueError(\"The order of integration must be non-negative\")\n if len(k) > cnt:\n raise ValueError(\"Too many integration constants\")\n if np.ndim(lbnd) != 0:\n raise ValueError(\"lbnd must be a scalar.\")\n if np.ndim(scl) != 0:\n raise ValueError(\"scl must be a scalar.\")\n iaxis = normalize_axis_index(iaxis, c.ndim)\n\n if cnt == 0:\n return c\n\n c = np.moveaxis(c, iaxis, 0)\n k = list(k) + [0]*(cnt - len(k))\n for i in range(cnt):\n n = len(c)\n c *= scl\n if n == 1 and np.all(c[0] == 0):\n c[0] += k[i]\n else:\n tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)\n tmp[0] = c[0]*0\n tmp[1] = c[0]\n for j in range(1, n):\n tmp[j + 1] = c[j]/(j + 1)\n tmp[0] += k[i] - hermeval(lbnd, tmp)\n c = tmp\n c = np.moveaxis(c, 0, iaxis)\n return c\n\n\ndef hermeval(x, c, tensor=True):\n \"\"\"\n Evaluate an HermiteE series at points x.\n\n If `c` is of length `n + 1`, this function returns the value:\n\n .. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x)\n\n The parameter `x` is converted to an array only if it is a tuple or a\n list, otherwise it is treated as a scalar. In either case, either `x`\n or its elements must support multiplication and addition both with\n themselves and with the elements of `c`.\n\n If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If\n `c` is multidimensional, then the shape of the result depends on the\n value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +\n x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that\n scalars have shape (,).\n\n Trailing zeros in the coefficients will be used in the evaluation, so\n they should be avoided if efficiency is a concern.\n\n Parameters\n ----------\n x : array_like, compatible object\n If `x` is a list or tuple, it is converted to an ndarray, otherwise\n it is left unchanged and treated as a scalar. In either case, `x`\n or its elements must support addition and multiplication with\n with themselves and with the elements of `c`.\n c : array_like\n Array of coefficients ordered so that the coefficients for terms of\n degree n are contained in c[n]. If `c` is multidimensional the\n remaining indices enumerate multiple polynomials. In the two\n dimensional case the coefficients may be thought of as stored in\n the columns of `c`.\n tensor : boolean, optional\n If True, the shape of the coefficient array is extended with ones\n on the right, one for each dimension of `x`. Scalars have dimension 0\n for this action. The result is that every column of coefficients in\n `c` is evaluated for every element of `x`. If False, `x` is broadcast\n over the columns of `c` for the evaluation. This keyword is useful\n when `c` is multidimensional. The default value is True.\n\n .. versionadded:: 1.7.0\n\n Returns\n -------\n values : ndarray, algebra_like\n The shape of the return value is described above.\n\n See Also\n --------\n hermeval2d, hermegrid2d, hermeval3d, hermegrid3d\n\n Notes\n -----\n The evaluation uses Clenshaw recursion, aka synthetic division.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermeval\n >>> coef = [1,2,3]\n >>> hermeval(1, coef)\n 3.0\n >>> hermeval([[1,2],[3,4]], coef)\n array([[ 3., 14.],\n [31., 54.]])\n\n \"\"\"\n c = np.array(c, ndmin=1, copy=False)\n if c.dtype.char in '?bBhHiIlLqQpP':\n c = c.astype(np.double)\n if isinstance(x, (tuple, list)):\n x = np.asarray(x)\n if isinstance(x, np.ndarray) and tensor:\n c = c.reshape(c.shape + (1,)*x.ndim)\n\n if len(c) == 1:\n c0 = c[0]\n c1 = 0\n elif len(c) == 2:\n c0 = c[0]\n c1 = c[1]\n else:\n nd = len(c)\n c0 = c[-2]\n c1 = c[-1]\n for i in range(3, len(c) + 1):\n tmp = c0\n nd = nd - 1\n c0 = c[-i] - c1*(nd - 1)\n c1 = tmp + c1*x\n return c0 + c1*x\n\n\ndef hermeval2d(x, y, c):\n \"\"\"\n Evaluate a 2-D HermiteE series at points (x, y).\n\n This function returns the values:\n\n .. math:: p(x,y) = \\\\sum_{i,j} c_{i,j} * He_i(x) * He_j(y)\n\n The parameters `x` and `y` are converted to arrays only if they are\n tuples or a lists, otherwise they are treated as a scalars and they\n must have the same shape after conversion. In either case, either `x`\n and `y` or their elements must support multiplication and addition both\n with themselves and with the elements of `c`.\n\n If `c` is a 1-D array a one is implicitly appended to its shape to make\n it 2-D. The shape of the result will be c.shape[2:] + x.shape.\n\n Parameters\n ----------\n x, y : array_like, compatible objects\n The two dimensional series is evaluated at the points `(x, y)`,\n where `x` and `y` must have the same shape. If `x` or `y` is a list\n or tuple, it is first converted to an ndarray, otherwise it is left\n unchanged and if it isn't an ndarray it is treated as a scalar.\n c : array_like\n Array of coefficients ordered so that the coefficient of the term\n of multi-degree i,j is contained in ``c[i,j]``. If `c` has\n dimension greater than two the remaining indices enumerate multiple\n sets of coefficients.\n\n Returns\n -------\n values : ndarray, compatible object\n The values of the two dimensional polynomial at points formed with\n pairs of corresponding values from `x` and `y`.\n\n See Also\n --------\n hermeval, hermegrid2d, hermeval3d, hermegrid3d\n\n Notes\n -----\n\n .. versionadded:: 1.7.0\n\n \"\"\"\n return pu._valnd(hermeval, c, x, y)\n\n\ndef hermegrid2d(x, y, c):\n \"\"\"\n Evaluate a 2-D HermiteE series on the Cartesian product of x and y.\n\n This function returns the values:\n\n .. math:: p(a,b) = \\\\sum_{i,j} c_{i,j} * H_i(a) * H_j(b)\n\n where the points `(a, b)` consist of all pairs formed by taking\n `a` from `x` and `b` from `y`. The resulting points form a grid with\n `x` in the first dimension and `y` in the second.\n\n The parameters `x` and `y` are converted to arrays only if they are\n tuples or a lists, otherwise they are treated as a scalars. In either\n case, either `x` and `y` or their elements must support multiplication\n and addition both with themselves and with the elements of `c`.\n\n If `c` has fewer than two dimensions, ones are implicitly appended to\n its shape to make it 2-D. The shape of the result will be c.shape[2:] +\n x.shape.\n\n Parameters\n ----------\n x, y : array_like, compatible objects\n The two dimensional series is evaluated at the points in the\n Cartesian product of `x` and `y`. If `x` or `y` is a list or\n tuple, it is first converted to an ndarray, otherwise it is left\n unchanged and, if it isn't an ndarray, it is treated as a scalar.\n c : array_like\n Array of coefficients ordered so that the coefficients for terms of\n degree i,j are contained in ``c[i,j]``. If `c` has dimension\n greater than two the remaining indices enumerate multiple sets of\n coefficients.\n\n Returns\n -------\n values : ndarray, compatible object\n The values of the two dimensional polynomial at points in the Cartesian\n product of `x` and `y`.\n\n See Also\n --------\n hermeval, hermeval2d, hermeval3d, hermegrid3d\n\n Notes\n -----\n\n .. versionadded:: 1.7.0\n\n \"\"\"\n return pu._gridnd(hermeval, c, x, y)\n\n\ndef hermeval3d(x, y, z, c):\n \"\"\"\n Evaluate a 3-D Hermite_e series at points (x, y, z).\n\n This function returns the values:\n\n .. math:: p(x,y,z) = \\\\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z)\n\n The parameters `x`, `y`, and `z` are converted to arrays only if\n they are tuples or a lists, otherwise they are treated as a scalars and\n they must have the same shape after conversion. In either case, either\n `x`, `y`, and `z` or their elements must support multiplication and\n addition both with themselves and with the elements of `c`.\n\n If `c` has fewer than 3 dimensions, ones are implicitly appended to its\n shape to make it 3-D. The shape of the result will be c.shape[3:] +\n x.shape.\n\n Parameters\n ----------\n x, y, z : array_like, compatible object\n The three dimensional series is evaluated at the points\n `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If\n any of `x`, `y`, or `z` is a list or tuple, it is first converted\n to an ndarray, otherwise it is left unchanged and if it isn't an\n ndarray it is treated as a scalar.\n c : array_like\n Array of coefficients ordered so that the coefficient of the term of\n multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension\n greater than 3 the remaining indices enumerate multiple sets of\n coefficients.\n\n Returns\n -------\n values : ndarray, compatible object\n The values of the multidimensional polynomial on points formed with\n triples of corresponding values from `x`, `y`, and `z`.\n\n See Also\n --------\n hermeval, hermeval2d, hermegrid2d, hermegrid3d\n\n Notes\n -----\n\n .. versionadded:: 1.7.0\n\n \"\"\"\n return pu._valnd(hermeval, c, x, y, z)\n\n\ndef hermegrid3d(x, y, z, c):\n \"\"\"\n Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z.\n\n This function returns the values:\n\n .. math:: p(a,b,c) = \\\\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c)\n\n where the points `(a, b, c)` consist of all triples formed by taking\n `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form\n a grid with `x` in the first dimension, `y` in the second, and `z` in\n the third.\n\n The parameters `x`, `y`, and `z` are converted to arrays only if they\n are tuples or a lists, otherwise they are treated as a scalars. In\n either case, either `x`, `y`, and `z` or their elements must support\n multiplication and addition both with themselves and with the elements\n of `c`.\n\n If `c` has fewer than three dimensions, ones are implicitly appended to\n its shape to make it 3-D. The shape of the result will be c.shape[3:] +\n x.shape + y.shape + z.shape.\n\n Parameters\n ----------\n x, y, z : array_like, compatible objects\n The three dimensional series is evaluated at the points in the\n Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a\n list or tuple, it is first converted to an ndarray, otherwise it is\n left unchanged and, if it isn't an ndarray, it is treated as a\n scalar.\n c : array_like\n Array of coefficients ordered so that the coefficients for terms of\n degree i,j are contained in ``c[i,j]``. If `c` has dimension\n greater than two the remaining indices enumerate multiple sets of\n coefficients.\n\n Returns\n -------\n values : ndarray, compatible object\n The values of the two dimensional polynomial at points in the Cartesian\n product of `x` and `y`.\n\n See Also\n --------\n hermeval, hermeval2d, hermegrid2d, hermeval3d\n\n Notes\n -----\n\n .. versionadded:: 1.7.0\n\n \"\"\"\n return pu._gridnd(hermeval, c, x, y, z)\n\n\ndef hermevander(x, deg):\n \"\"\"Pseudo-Vandermonde matrix of given degree.\n\n Returns the pseudo-Vandermonde matrix of degree `deg` and sample points\n `x`. The pseudo-Vandermonde matrix is defined by\n\n .. math:: V[..., i] = He_i(x),\n\n where `0 <= i <= deg`. The leading indices of `V` index the elements of\n `x` and the last index is the degree of the HermiteE polynomial.\n\n If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the\n array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and\n ``hermeval(x, c)`` are the same up to roundoff. This equivalence is\n useful both for least squares fitting and for the evaluation of a large\n number of HermiteE series of the same degree and sample points.\n\n Parameters\n ----------\n x : array_like\n Array of points. The dtype is converted to float64 or complex128\n depending on whether any of the elements are complex. If `x` is\n scalar it is converted to a 1-D array.\n deg : int\n Degree of the resulting matrix.\n\n Returns\n -------\n vander : ndarray\n The pseudo-Vandermonde matrix. The shape of the returned matrix is\n ``x.shape + (deg + 1,)``, where The last index is the degree of the\n corresponding HermiteE polynomial. The dtype will be the same as\n the converted `x`.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermevander\n >>> x = np.array([-1, 0, 1])\n >>> hermevander(x, 3)\n array([[ 1., -1., 0., 2.],\n [ 1., 0., -1., -0.],\n [ 1., 1., 0., -2.]])\n\n \"\"\"\n ideg = pu._deprecate_as_int(deg, \"deg\")\n if ideg < 0:\n raise ValueError(\"deg must be non-negative\")\n\n x = np.array(x, copy=False, ndmin=1) + 0.0\n dims = (ideg + 1,) + x.shape\n dtyp = x.dtype\n v = np.empty(dims, dtype=dtyp)\n v[0] = x*0 + 1\n if ideg > 0:\n v[1] = x\n for i in range(2, ideg + 1):\n v[i] = (v[i-1]*x - v[i-2]*(i - 1))\n return np.moveaxis(v, 0, -1)\n\n\ndef hermevander2d(x, y, deg):\n \"\"\"Pseudo-Vandermonde matrix of given degrees.\n\n Returns the pseudo-Vandermonde matrix of degrees `deg` and sample\n points `(x, y)`. The pseudo-Vandermonde matrix is defined by\n\n .. math:: V[..., (deg[1] + 1)*i + j] = He_i(x) * He_j(y),\n\n where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of\n `V` index the points `(x, y)` and the last index encodes the degrees of\n the HermiteE polynomials.\n\n If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V`\n correspond to the elements of a 2-D coefficient array `c` of shape\n (xdeg + 1, ydeg + 1) in the order\n\n .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...\n\n and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same\n up to roundoff. This equivalence is useful both for least squares\n fitting and for the evaluation of a large number of 2-D HermiteE\n series of the same degrees and sample points.\n\n Parameters\n ----------\n x, y : array_like\n Arrays of point coordinates, all of the same shape. The dtypes\n will be converted to either float64 or complex128 depending on\n whether any of the elements are complex. Scalars are converted to\n 1-D arrays.\n deg : list of ints\n List of maximum degrees of the form [x_deg, y_deg].\n\n Returns\n -------\n vander2d : ndarray\n The shape of the returned matrix is ``x.shape + (order,)``, where\n :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same\n as the converted `x` and `y`.\n\n See Also\n --------\n hermevander, hermevander3d, hermeval2d, hermeval3d\n\n Notes\n -----\n\n .. versionadded:: 1.7.0\n\n \"\"\"\n return pu._vander_nd_flat((hermevander, hermevander), (x, y), deg)\n\n\ndef hermevander3d(x, y, z, deg):\n \"\"\"Pseudo-Vandermonde matrix of given degrees.\n\n Returns the pseudo-Vandermonde matrix of degrees `deg` and sample\n points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,\n then Hehe pseudo-Vandermonde matrix is defined by\n\n .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z),\n\n where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading\n indices of `V` index the points `(x, y, z)` and the last index encodes\n the degrees of the HermiteE polynomials.\n\n If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns\n of `V` correspond to the elements of a 3-D coefficient array `c` of\n shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order\n\n .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...\n\n and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the\n same up to roundoff. This equivalence is useful both for least squares\n fitting and for the evaluation of a large number of 3-D HermiteE\n series of the same degrees and sample points.\n\n Parameters\n ----------\n x, y, z : array_like\n Arrays of point coordinates, all of the same shape. The dtypes will\n be converted to either float64 or complex128 depending on whether\n any of the elements are complex. Scalars are converted to 1-D\n arrays.\n deg : list of ints\n List of maximum degrees of the form [x_deg, y_deg, z_deg].\n\n Returns\n -------\n vander3d : ndarray\n The shape of the returned matrix is ``x.shape + (order,)``, where\n :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will\n be the same as the converted `x`, `y`, and `z`.\n\n See Also\n --------\n hermevander, hermevander3d, hermeval2d, hermeval3d\n\n Notes\n -----\n\n .. versionadded:: 1.7.0\n\n \"\"\"\n return pu._vander_nd_flat((hermevander, hermevander, hermevander), (x, y, z), deg)\n\n\ndef hermefit(x, y, deg, rcond=None, full=False, w=None):\n \"\"\"\n Least squares fit of Hermite series to data.\n\n Return the coefficients of a HermiteE series of degree `deg` that is\n the least squares fit to the data values `y` given at points `x`. If\n `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D\n multiple fits are done, one for each column of `y`, and the resulting\n coefficients are stored in the corresponding columns of a 2-D return.\n The fitted polynomial(s) are in the form\n\n .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x),\n\n where `n` is `deg`.\n\n Parameters\n ----------\n x : array_like, shape (M,)\n x-coordinates of the M sample points ``(x[i], y[i])``.\n y : array_like, shape (M,) or (M, K)\n y-coordinates of the sample points. Several data sets of sample\n points sharing the same x-coordinates can be fitted at once by\n passing in a 2D-array that contains one dataset per column.\n deg : int or 1-D array_like\n Degree(s) of the fitting polynomials. If `deg` is a single integer\n all terms up to and including the `deg`'th term are included in the\n fit. For NumPy versions >= 1.11.0 a list of integers specifying the\n degrees of the terms to include may be used instead.\n rcond : float, optional\n Relative condition number of the fit. Singular values smaller than\n this relative to the largest singular value will be ignored. The\n default value is len(x)*eps, where eps is the relative precision of\n the float type, about 2e-16 in most cases.\n full : bool, optional\n Switch determining nature of return value. When it is False (the\n default) just the coefficients are returned, when True diagnostic\n information from the singular value decomposition is also returned.\n w : array_like, shape (`M`,), optional\n Weights. If not None, the contribution of each point\n ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the\n weights are chosen so that the errors of the products ``w[i]*y[i]``\n all have the same variance. The default value is None.\n\n Returns\n -------\n coef : ndarray, shape (M,) or (M, K)\n Hermite coefficients ordered from low to high. If `y` was 2-D,\n the coefficients for the data in column k of `y` are in column\n `k`.\n\n [residuals, rank, singular_values, rcond] : list\n These values are only returned if `full` = True\n\n resid -- sum of squared residuals of the least squares fit\n rank -- the numerical rank of the scaled Vandermonde matrix\n sv -- singular values of the scaled Vandermonde matrix\n rcond -- value of `rcond`.\n\n For more details, see `numpy.linalg.lstsq`.\n\n Warns\n -----\n RankWarning\n The rank of the coefficient matrix in the least-squares fit is\n deficient. The warning is only raised if `full` = False. The\n warnings can be turned off by\n\n >>> import warnings\n >>> warnings.simplefilter('ignore', np.RankWarning)\n\n See Also\n --------\n numpy.polynomial.chebyshev.chebfit\n numpy.polynomial.legendre.legfit\n numpy.polynomial.polynomial.polyfit\n numpy.polynomial.hermite.hermfit\n numpy.polynomial.laguerre.lagfit\n hermeval : Evaluates a Hermite series.\n hermevander : pseudo Vandermonde matrix of Hermite series.\n hermeweight : HermiteE weight function.\n numpy.linalg.lstsq : Computes a least-squares fit from the matrix.\n scipy.interpolate.UnivariateSpline : Computes spline fits.\n\n Notes\n -----\n The solution is the coefficients of the HermiteE series `p` that\n minimizes the sum of the weighted squared errors\n\n .. math:: E = \\\\sum_j w_j^2 * |y_j - p(x_j)|^2,\n\n where the :math:`w_j` are the weights. This problem is solved by\n setting up the (typically) overdetermined matrix equation\n\n .. math:: V(x) * c = w * y,\n\n where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c`\n are the coefficients to be solved for, and the elements of `y` are the\n observed values. This equation is then solved using the singular value\n decomposition of `V`.\n\n If some of the singular values of `V` are so small that they are\n neglected, then a `RankWarning` will be issued. This means that the\n coefficient values may be poorly determined. Using a lower order fit\n will usually get rid of the warning. The `rcond` parameter can also be\n set to a value smaller than its default, but the resulting fit may be\n spurious and have large contributions from roundoff error.\n\n Fits using HermiteE series are probably most useful when the data can\n be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE\n weight. In that case the weight ``sqrt(w(x[i]))`` should be used\n together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is\n available as `hermeweight`.\n\n References\n ----------\n .. [1] Wikipedia, \"Curve fitting\",\n https://en.wikipedia.org/wiki/Curve_fitting\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermefit, hermeval\n >>> x = np.linspace(-10, 10)\n >>> np.random.seed(123)\n >>> err = np.random.randn(len(x))/10\n >>> y = hermeval(x, [1, 2, 3]) + err\n >>> hermefit(x, y, 2)\n array([ 1.01690445, 1.99951418, 2.99948696]) # may vary\n\n \"\"\"\n return pu._fit(hermevander, x, y, deg, rcond, full, w)\n\n\ndef hermecompanion(c):\n \"\"\"\n Return the scaled companion matrix of c.\n\n The basis polynomials are scaled so that the companion matrix is\n symmetric when `c` is an HermiteE basis polynomial. This provides\n better eigenvalue estimates than the unscaled case and for basis\n polynomials the eigenvalues are guaranteed to be real if\n `numpy.linalg.eigvalsh` is used to obtain them.\n\n Parameters\n ----------\n c : array_like\n 1-D array of HermiteE series coefficients ordered from low to high\n degree.\n\n Returns\n -------\n mat : ndarray\n Scaled companion matrix of dimensions (deg, deg).\n\n Notes\n -----\n\n .. versionadded:: 1.7.0\n\n \"\"\"\n # c is a trimmed copy\n [c] = pu.as_series([c])\n if len(c) < 2:\n raise ValueError('Series must have maximum degree of at least 1.')\n if len(c) == 2:\n return np.array([[-c[0]/c[1]]])\n\n n = len(c) - 1\n mat = np.zeros((n, n), dtype=c.dtype)\n scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1))))\n scl = np.multiply.accumulate(scl)[::-1]\n top = mat.reshape(-1)[1::n+1]\n bot = mat.reshape(-1)[n::n+1]\n top[...] = np.sqrt(np.arange(1, n))\n bot[...] = top\n mat[:, -1] -= scl*c[:-1]/c[-1]\n return mat\n\n\ndef hermeroots(c):\n \"\"\"\n Compute the roots of a HermiteE series.\n\n Return the roots (a.k.a. \"zeros\") of the polynomial\n\n .. math:: p(x) = \\\\sum_i c[i] * He_i(x).\n\n Parameters\n ----------\n c : 1-D array_like\n 1-D array of coefficients.\n\n Returns\n -------\n out : ndarray\n Array of the roots of the series. If all the roots are real,\n then `out` is also real, otherwise it is complex.\n\n See Also\n --------\n numpy.polynomial.polynomial.polyroots\n numpy.polynomial.legendre.legroots\n numpy.polynomial.laguerre.lagroots\n numpy.polynomial.hermite.hermroots\n numpy.polynomial.chebyshev.chebroots\n\n Notes\n -----\n The root estimates are obtained as the eigenvalues of the companion\n matrix, Roots far from the origin of the complex plane may have large\n errors due to the numerical instability of the series for such\n values. Roots with multiplicity greater than 1 will also show larger\n errors as the value of the series near such points is relatively\n insensitive to errors in the roots. Isolated roots near the origin can\n be improved by a few iterations of Newton's method.\n\n The HermiteE series basis polynomials aren't powers of `x` so the\n results of this function may seem unintuitive.\n\n Examples\n --------\n >>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots\n >>> coef = hermefromroots([-1, 0, 1])\n >>> coef\n array([0., 2., 0., 1.])\n >>> hermeroots(coef)\n array([-1., 0., 1.]) # may vary\n\n \"\"\"\n # c is a trimmed copy\n [c] = pu.as_series([c])\n if len(c) <= 1:\n return np.array([], dtype=c.dtype)\n if len(c) == 2:\n return np.array([-c[0]/c[1]])\n\n # rotated companion matrix reduces error\n m = hermecompanion(c)[::-1,::-1]\n r = la.eigvals(m)\n r.sort()\n return r\n\n\ndef _normed_hermite_e_n(x, n):\n \"\"\"\n Evaluate a normalized HermiteE polynomial.\n\n Compute the value of the normalized HermiteE polynomial of degree ``n``\n at the points ``x``.\n\n\n Parameters\n ----------\n x : ndarray of double.\n Points at which to evaluate the function\n n : int\n Degree of the normalized HermiteE function to be evaluated.\n\n Returns\n -------\n values : ndarray\n The shape of the return value is described above.\n\n Notes\n -----\n .. versionadded:: 1.10.0\n\n This function is needed for finding the Gauss points and integration\n weights for high degrees. The values of the standard HermiteE functions\n overflow when n >= 207.\n\n \"\"\"\n if n == 0:\n return np.full(x.shape, 1/np.sqrt(np.sqrt(2*np.pi)))\n\n c0 = 0.\n c1 = 1./np.sqrt(np.sqrt(2*np.pi))\n nd = float(n)\n for i in range(n - 1):\n tmp = c0\n c0 = -c1*np.sqrt((nd - 1.)/nd)\n c1 = tmp + c1*x*np.sqrt(1./nd)\n nd = nd - 1.0\n return c0 + c1*x\n\n\ndef hermegauss(deg):\n \"\"\"\n Gauss-HermiteE quadrature.\n\n Computes the sample points and weights for Gauss-HermiteE quadrature.\n These sample points and weights will correctly integrate polynomials of\n degree :math:`2*deg - 1` or less over the interval :math:`[-\\\\inf, \\\\inf]`\n with the weight function :math:`f(x) = \\\\exp(-x^2/2)`.\n\n Parameters\n ----------\n deg : int\n Number of sample points and weights. It must be >= 1.\n\n Returns\n -------\n x : ndarray\n 1-D ndarray containing the sample points.\n y : ndarray\n 1-D ndarray containing the weights.\n\n Notes\n -----\n\n .. versionadded:: 1.7.0\n\n The results have only been tested up to degree 100, higher degrees may\n be problematic. The weights are determined by using the fact that\n\n .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k))\n\n where :math:`c` is a constant independent of :math:`k` and :math:`x_k`\n is the k'th root of :math:`He_n`, and then scaling the results to get\n the right value when integrating 1.\n\n \"\"\"\n ideg = pu._deprecate_as_int(deg, \"deg\")\n if ideg <= 0:\n raise ValueError(\"deg must be a positive integer\")\n\n # first approximation of roots. We use the fact that the companion\n # matrix is symmetric in this case in order to obtain better zeros.\n c = np.array([0]*deg + [1])\n m = hermecompanion(c)\n x = la.eigvalsh(m)\n\n # improve roots by one application of Newton\n dy = _normed_hermite_e_n(x, ideg)\n df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg)\n x -= dy/df\n\n # compute the weights. We scale the factor to avoid possible numerical\n # overflow.\n fm = _normed_hermite_e_n(x, ideg - 1)\n fm /= np.abs(fm).max()\n w = 1/(fm * fm)\n\n # for Hermite_e we can also symmetrize\n w = (w + w[::-1])/2\n x = (x - x[::-1])/2\n\n # scale w to get the right value\n w *= np.sqrt(2*np.pi) / w.sum()\n\n return x, w\n\n\ndef hermeweight(x):\n \"\"\"Weight function of the Hermite_e polynomials.\n\n The weight function is :math:`\\\\exp(-x^2/2)` and the interval of\n integration is :math:`[-\\\\inf, \\\\inf]`. the HermiteE polynomials are\n orthogonal, but not normalized, with respect to this weight function.\n\n Parameters\n ----------\n x : array_like\n Values at which the weight function will be computed.\n\n Returns\n -------\n w : ndarray\n The weight function at `x`.\n\n Notes\n -----\n\n .. versionadded:: 1.7.0\n\n \"\"\"\n w = np.exp(-.5*x**2)\n return w\n\n\n#\n# HermiteE series class\n#\n\nclass HermiteE(ABCPolyBase):\n \"\"\"An HermiteE series class.\n\n The HermiteE class provides the standard Python numerical methods\n '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the\n attributes and methods listed in the `ABCPolyBase` documentation.\n\n Parameters\n ----------\n coef : array_like\n HermiteE coefficients in order of increasing degree, i.e,\n ``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``.\n domain : (2,) array_like, optional\n Domain to use. The interval ``[domain[0], domain[1]]`` is mapped\n to the interval ``[window[0], window[1]]`` by shifting and scaling.\n The default value is [-1, 1].\n window : (2,) array_like, optional\n Window, see `domain` for its use. The default value is [-1, 1].\n\n .. versionadded:: 1.6.0\n\n \"\"\"\n # Virtual Functions\n _add = staticmethod(hermeadd)\n _sub = staticmethod(hermesub)\n _mul = staticmethod(hermemul)\n _div = staticmethod(hermediv)\n _pow = staticmethod(hermepow)\n _val = staticmethod(hermeval)\n _int = staticmethod(hermeint)\n _der = staticmethod(hermeder)\n _fit = staticmethod(hermefit)\n _line = staticmethod(hermeline)\n _roots = staticmethod(hermeroots)\n _fromroots = staticmethod(hermefromroots)\n\n # Virtual properties\n domain = np.array(hermedomain)\n window = np.array(hermedomain)\n basis_name = 'He'\n", "\"\"\"\n\"Cholesky Factor Algorithm\" (CFA) simulation smoothing for state space models\n\nAuthor: Chad Fulton\nLicense: BSD-3\n\"\"\"\n\nimport numpy as np\n\nfrom . import tools\n\n\nclass CFASimulationSmoother(object):\n r\"\"\"\n \"Cholesky Factor Algorithm\" (CFA) simulation smoother\n\n Parameters\n ----------\n model : Representation\n The state space model.\n\n Notes\n -----\n This class allows simulation smoothing by the \"Cholesky Factor Algorithm\"\n (CFA) described in [1]_ and [2]_, which essentially takes advantage of the\n existence of an efficient sparse Cholesky factor algorithm for banded\n matrices that are held in a sparse matrix format.\n\n In particular, this simulation smoother computes the joint posterior mean\n and covariance matrix for the unobserved state vector all at once, rather\n than using the recursive computations of the Kalman filter and smoother. It\n then uses these posterior moments to sample directly from this joint\n posterior. For some models, it can be more computationally efficient than\n the simulation smoother based on the Kalman filter and smoother.\n\n **Important caveat**:\n\n However, this simulation smoother cannot be used with all state space\n models, including several of the most popular. In particular, the CFA\n algorithm cannot support degenerate distributions (i.e. positive\n semi-definite covariance matrices) for the initial state (which is the\n prior for the first state) or the observation or state innovations.\n\n One practical problem with this algorithm is that an autoregressive term\n with order higher than one is typically put into state space form by\n augmenting the states using identities. As identities, these augmenting\n terms will not be subject to random innovations, and so the state\n innovation will be degenerate. It is possible to take these higher order\n terms directly into account when constructing the posterior covariance\n matrix, but this has not yet been implemented.\n\n Similarly, some state space forms of SARIMA and VARMA models make\n the observation equation an identity, which is not compatible with the CFA\n simulation smoothing approach.\n\n This simulation smoother has so-far found most of its use with dynamic\n factor and stochastic volatility models, which satisfy the restrictions\n described above.\n\n **Not-yet-implemented**:\n\n There are several features that are not yet available with this simulation\n smoother:\n\n - It does not yet allow diffuse initialization of the state vector.\n - It produces simulated states only for exactly the observations in the\n model (i.e. it cannot produce simulations for a subset of the model\n observations or for observations outside the model).\n\n\n References\n ----------\n .. [1] McCausland, William J., Shirley Miller, and Denis Pelletier.\n \"Simulation smoothing for state-space models: A computational\n efficiency analysis.\"\n Computational Statistics & Data Analysis 55, no. 1 (2011): 199-212.\n .. [2] Chan, Joshua CC, and Ivan Jeliazkov.\n \"Efficient simulation and integrated likelihood estimation in\n state space models.\"\n International Journal of Mathematical Modelling and Numerical\n Optimisation 1, no. 1-2 (2009): 101-120.\n \"\"\"\n\n def __init__(self, model, cfa_simulation_smoother_classes=None):\n self.model = model\n\n # Get the simulation smoother classes\n self.prefix_simulation_smoother_map = (\n cfa_simulation_smoother_classes\n if cfa_simulation_smoother_classes is not None\n else tools.prefix_cfa_simulation_smoother_map.copy())\n\n self._simulation_smoothers = {}\n\n self._posterior_mean = None\n self._posterior_cov_inv_chol = None\n self._posterior_cov = None\n self._simulated_state = None\n\n @property\n def _simulation_smoother(self):\n prefix = self.model.prefix\n if prefix in self._simulation_smoothers:\n return self._simulation_smoothers[prefix]\n return None\n\n @property\n def posterior_mean(self):\n r\"\"\"\n Posterior mean of the states conditional on the data\n\n Notes\n -----\n\n .. math::\n\n \\hat \\alpha_t = E[\\alpha_t \\mid Y^n ]\n\n This posterior mean is identical to the `smoothed_state` computed by\n the Kalman smoother.\n \"\"\"\n if self._posterior_mean is None:\n self._posterior_mean = np.array(\n self._simulation_smoother.posterior_mean, copy=True)\n return self._posterior_mean\n\n @property\n def posterior_cov_inv_chol_sparse(self):\n r\"\"\"\n Sparse Cholesky factor of inverse posterior covariance matrix\n\n Notes\n -----\n This attribute holds in sparse diagonal banded storage the Cholesky\n factor of the inverse of the posterior covariance matrix. If we denote\n :math:`P = Var[\\alpha \\mid Y^n ]`, then the this attribute holds the\n lower Cholesky factor :math:`L`, defined from :math:`L L' = P^{-1}`.\n This attribute uses the sparse diagonal banded storage described in the\n documentation of, for example, the SciPy function\n `scipy.linalg.solveh_banded`.\n \"\"\"\n if self._posterior_cov_inv_chol is None:\n self._posterior_cov_inv_chol = np.array(\n self._simulation_smoother.posterior_cov_inv_chol, copy=True)\n return self._posterior_cov_inv_chol\n\n @property\n def posterior_cov(self):\n r\"\"\"\n Posterior covariance of the states conditional on the data\n\n Notes\n -----\n **Warning**: the matrix computed when accessing this property can be\n extremely large: it is shaped `(nobs * k_states, nobs * k_states)`. In\n most cases, it is better to use the `posterior_cov_inv_chol_sparse`\n property if possible, which holds in sparse diagonal banded storage\n the Cholesky factor of the inverse of the posterior covariance matrix.\n\n .. math::\n\n Var[\\alpha \\mid Y^n ]\n\n This posterior covariance matrix is *not* identical to the\n `smoothed_state_cov` attribute produced by the Kalman smoother, because\n it additionally contains all cross-covariance terms. Instead,\n `smoothed_state_cov` contains the `(k_states, k_states)` block\n diagonal entries of this posterior covariance matrix.\n \"\"\"\n if self._posterior_cov is None:\n from scipy.linalg import cho_solve_banded\n inv_chol = self.posterior_cov_inv_chol_sparse\n self._posterior_cov = cho_solve_banded(\n (inv_chol, True), np.eye(inv_chol.shape[1]))\n return self._posterior_cov\n\n def simulate(self, variates=None, update_posterior=True):\n r\"\"\"\n Perform simulation smoothing (via Cholesky factor algorithm)\n\n Does not return anything, but populates the object's `simulated_state`\n attribute, and also makes available the attributes `posterior_mean`,\n `posterior_cov`, and `posterior_cov_inv_chol_sparse`.\n\n Parameters\n ----------\n variates : array_like, optional\n Random variates, distributed standard Normal. Usually only\n specified if results are to be replicated (e.g. to enforce a seed)\n or for testing. If not specified, random variates are drawn. Must\n be shaped (nobs, k_states).\n\n Notes\n -----\n The first step in simulating from the joint posterior of the state\n vector conditional on the data is to compute the two relevant moments\n of the joint posterior distribution:\n\n .. math::\n\n \\alpha \\mid Y_n \\sim N(\\hat \\alpha, Var(\\alpha \\mid Y_n))\n\n Let :math:`L L' = Var(\\alpha \\mid Y_n)^{-1}`. Then simulation proceeds\n according to the following steps:\n\n 1. Draw :math:`u \\sim N(0, I)`\n 2. Compute :math:`x = \\hat \\alpha + (L')^{-1} u`\n\n And then :math:`x` is a draw from the joint posterior of the states.\n The output of the function is as follows:\n\n - The simulated draw :math:`x` is held in the `simulated_state`\n attribute.\n - The posterior mean :math:`\\hat \\alpha` is held in the\n `posterior_mean` attribute.\n - The (lower triangular) Cholesky factor of the inverse posterior\n covariance matrix, :math:`L`, is held in sparse diagonal banded\n storage in the `posterior_cov_inv_chol` attribute.\n - The posterior covariance matrix :math:`Var(\\alpha \\mid Y_n)` can be\n computed on demand by accessing the `posterior_cov` property. Note\n that this matrix can be extremely large, so care must be taken when\n accessing this property. In most cases, it will be preferred to make\n use of the `posterior_cov_inv_chol` attribute rather than the\n `posterior_cov` attribute.\n\n \"\"\"\n # (Re) initialize the _statespace representation\n prefix, dtype, create = self.model._initialize_representation()\n\n # Validate variates and get in required datatype\n if variates is not None:\n tools.validate_matrix_shape('variates', variates.shape,\n self.model.k_states,\n self.model.nobs, 1)\n variates = np.ravel(variates, order='F').astype(dtype)\n\n # (Re) initialize the state\n self.model._initialize_state(prefix=prefix)\n\n # Construct the Cython simulation smoother instance, if necessary\n if create or prefix not in self._simulation_smoothers:\n cls = self.prefix_simulation_smoother_map[prefix]\n self._simulation_smoothers[prefix] = cls(\n self.model._statespaces[prefix])\n sim = self._simulation_smoothers[prefix]\n\n # Update posterior moments, if requested\n if update_posterior:\n sim.update_sparse_posterior_moments()\n self._posterior_mean = None\n self._posterior_cov_inv_chol = None\n self._posterior_cov = None\n\n # Perform simulation smoothing\n self.simulated_state = sim.simulate(variates=variates)\n", "# Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers\n\"\"\"\nTests for numerical integration.\n\"\"\"\nimport numpy as np\nfrom numpy import (arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp,\n allclose)\n\nfrom numpy.testing import (\n assert_, assert_array_almost_equal,\n assert_allclose, assert_array_equal, assert_equal, assert_warns)\nfrom pytest import raises as assert_raises\nfrom scipy.integrate import odeint, ode, complex_ode\n\n#------------------------------------------------------------------------------\n# Test ODE integrators\n#------------------------------------------------------------------------------\n\n\nclass TestOdeint(object):\n # Check integrate.odeint\n\n def _do_problem(self, problem):\n t = arange(0.0, problem.stop_t, 0.05)\n\n # Basic case\n z, infodict = odeint(problem.f, problem.z0, t, full_output=True)\n assert_(problem.verify(z, t))\n\n # Use tfirst=True\n z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,\n full_output=True, tfirst=True)\n assert_(problem.verify(z, t))\n\n if hasattr(problem, 'jac'):\n # Use Dfun\n z, infodict = odeint(problem.f, problem.z0, t, Dfun=problem.jac,\n full_output=True)\n assert_(problem.verify(z, t))\n\n # Use Dfun and tfirst=True\n z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,\n Dfun=lambda t, y: problem.jac(y, t),\n full_output=True, tfirst=True)\n assert_(problem.verify(z, t))\n\n def test_odeint(self):\n for problem_cls in PROBLEMS:\n problem = problem_cls()\n if problem.cmplx:\n continue\n self._do_problem(problem)\n\n\nclass TestODEClass(object):\n\n ode_class = None # Set in subclass.\n\n def _do_problem(self, problem, integrator, method='adams'):\n\n # ode has callback arguments in different order than odeint\n f = lambda t, z: problem.f(z, t)\n jac = None\n if hasattr(problem, 'jac'):\n jac = lambda t, z: problem.jac(z, t)\n\n integrator_params = {}\n if problem.lband is not None or problem.uband is not None:\n integrator_params['uband'] = problem.uband\n integrator_params['lband'] = problem.lband\n\n ig = self.ode_class(f, jac)\n ig.set_integrator(integrator,\n atol=problem.atol/10,\n rtol=problem.rtol/10,\n method=method,\n **integrator_params)\n\n ig.set_initial_value(problem.z0, t=0.0)\n z = ig.integrate(problem.stop_t)\n\n assert_array_equal(z, ig.y)\n assert_(ig.successful(), (problem, method))\n assert_(ig.get_return_code() > 0, (problem, method))\n assert_(problem.verify(array([z]), problem.stop_t), (problem, method))\n\n\nclass TestOde(TestODEClass):\n\n ode_class = ode\n\n def test_vode(self):\n # Check the vode solver\n for problem_cls in PROBLEMS:\n problem = problem_cls()\n if problem.cmplx:\n continue\n if not problem.stiff:\n self._do_problem(problem, 'vode', 'adams')\n self._do_problem(problem, 'vode', 'bdf')\n\n def test_zvode(self):\n # Check the zvode solver\n for problem_cls in PROBLEMS:\n problem = problem_cls()\n if not problem.stiff:\n self._do_problem(problem, 'zvode', 'adams')\n self._do_problem(problem, 'zvode', 'bdf')\n\n def test_lsoda(self):\n # Check the lsoda solver\n for problem_cls in PROBLEMS:\n problem = problem_cls()\n if problem.cmplx:\n continue\n self._do_problem(problem, 'lsoda')\n\n def test_dopri5(self):\n # Check the dopri5 solver\n for problem_cls in PROBLEMS:\n problem = problem_cls()\n if problem.cmplx:\n continue\n if problem.stiff:\n continue\n if hasattr(problem, 'jac'):\n continue\n self._do_problem(problem, 'dopri5')\n\n def test_dop853(self):\n # Check the dop853 solver\n for problem_cls in PROBLEMS:\n problem = problem_cls()\n if problem.cmplx:\n continue\n if problem.stiff:\n continue\n if hasattr(problem, 'jac'):\n continue\n self._do_problem(problem, 'dop853')\n\n def test_concurrent_fail(self):\n for sol in ('vode', 'zvode', 'lsoda'):\n f = lambda t, y: 1.0\n\n r = ode(f).set_integrator(sol)\n r.set_initial_value(0, 0)\n\n r2 = ode(f).set_integrator(sol)\n r2.set_initial_value(0, 0)\n\n r.integrate(r.t + 0.1)\n r2.integrate(r2.t + 0.1)\n\n assert_raises(RuntimeError, r.integrate, r.t + 0.1)\n\n def test_concurrent_ok(self):\n f = lambda t, y: 1.0\n\n for k in range(3):\n for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'):\n r = ode(f).set_integrator(sol)\n r.set_initial_value(0, 0)\n\n r2 = ode(f).set_integrator(sol)\n r2.set_initial_value(0, 0)\n\n r.integrate(r.t + 0.1)\n r2.integrate(r2.t + 0.1)\n r2.integrate(r2.t + 0.1)\n\n assert_allclose(r.y, 0.1)\n assert_allclose(r2.y, 0.2)\n\n for sol in ('dopri5', 'dop853'):\n r = ode(f).set_integrator(sol)\n r.set_initial_value(0, 0)\n\n r2 = ode(f).set_integrator(sol)\n r2.set_initial_value(0, 0)\n\n r.integrate(r.t + 0.1)\n r.integrate(r.t + 0.1)\n r2.integrate(r2.t + 0.1)\n r.integrate(r.t + 0.1)\n r2.integrate(r2.t + 0.1)\n\n assert_allclose(r.y, 0.3)\n assert_allclose(r2.y, 0.2)\n\n\nclass TestComplexOde(TestODEClass):\n\n ode_class = complex_ode\n\n def test_vode(self):\n # Check the vode solver\n for problem_cls in PROBLEMS:\n problem = problem_cls()\n if not problem.stiff:\n self._do_problem(problem, 'vode', 'adams')\n else:\n self._do_problem(problem, 'vode', 'bdf')\n\n def test_lsoda(self):\n # Check the lsoda solver\n for problem_cls in PROBLEMS:\n problem = problem_cls()\n self._do_problem(problem, 'lsoda')\n\n def test_dopri5(self):\n # Check the dopri5 solver\n for problem_cls in PROBLEMS:\n problem = problem_cls()\n if problem.stiff:\n continue\n if hasattr(problem, 'jac'):\n continue\n self._do_problem(problem, 'dopri5')\n\n def test_dop853(self):\n # Check the dop853 solver\n for problem_cls in PROBLEMS:\n problem = problem_cls()\n if problem.stiff:\n continue\n if hasattr(problem, 'jac'):\n continue\n self._do_problem(problem, 'dop853')\n\n\nclass TestSolout(object):\n # Check integrate.ode correctly handles solout for dopri5 and dop853\n def _run_solout_test(self, integrator):\n # Check correct usage of solout\n ts = []\n ys = []\n t0 = 0.0\n tend = 10.0\n y0 = [1.0, 2.0]\n\n def solout(t, y):\n ts.append(t)\n ys.append(y.copy())\n\n def rhs(t, y):\n return [y[0] + y[1], -y[1]**2]\n\n ig = ode(rhs).set_integrator(integrator)\n ig.set_solout(solout)\n ig.set_initial_value(y0, t0)\n ret = ig.integrate(tend)\n assert_array_equal(ys[0], y0)\n assert_array_equal(ys[-1], ret)\n assert_equal(ts[0], t0)\n assert_equal(ts[-1], tend)\n\n def test_solout(self):\n for integrator in ('dopri5', 'dop853'):\n self._run_solout_test(integrator)\n\n def _run_solout_after_initial_test(self, integrator):\n # Check if solout works even if it is set after the initial value.\n ts = []\n ys = []\n t0 = 0.0\n tend = 10.0\n y0 = [1.0, 2.0]\n\n def solout(t, y):\n ts.append(t)\n ys.append(y.copy())\n\n def rhs(t, y):\n return [y[0] + y[1], -y[1]**2]\n\n ig = ode(rhs).set_integrator(integrator)\n ig.set_initial_value(y0, t0)\n ig.set_solout(solout)\n ret = ig.integrate(tend)\n assert_array_equal(ys[0], y0)\n assert_array_equal(ys[-1], ret)\n assert_equal(ts[0], t0)\n assert_equal(ts[-1], tend)\n\n def test_solout_after_initial(self):\n for integrator in ('dopri5', 'dop853'):\n self._run_solout_after_initial_test(integrator)\n\n def _run_solout_break_test(self, integrator):\n # Check correct usage of stopping via solout\n ts = []\n ys = []\n t0 = 0.0\n tend = 10.0\n y0 = [1.0, 2.0]\n\n def solout(t, y):\n ts.append(t)\n ys.append(y.copy())\n if t > tend/2.0:\n return -1\n\n def rhs(t, y):\n return [y[0] + y[1], -y[1]**2]\n\n ig = ode(rhs).set_integrator(integrator)\n ig.set_solout(solout)\n ig.set_initial_value(y0, t0)\n ret = ig.integrate(tend)\n assert_array_equal(ys[0], y0)\n assert_array_equal(ys[-1], ret)\n assert_equal(ts[0], t0)\n assert_(ts[-1] > tend/2.0)\n assert_(ts[-1] < tend)\n\n def test_solout_break(self):\n for integrator in ('dopri5', 'dop853'):\n self._run_solout_break_test(integrator)\n\n\nclass TestComplexSolout(object):\n # Check integrate.ode correctly handles solout for dopri5 and dop853\n def _run_solout_test(self, integrator):\n # Check correct usage of solout\n ts = []\n ys = []\n t0 = 0.0\n tend = 20.0\n y0 = [0.0]\n\n def solout(t, y):\n ts.append(t)\n ys.append(y.copy())\n\n def rhs(t, y):\n return [1.0/(t - 10.0 - 1j)]\n\n ig = complex_ode(rhs).set_integrator(integrator)\n ig.set_solout(solout)\n ig.set_initial_value(y0, t0)\n ret = ig.integrate(tend)\n assert_array_equal(ys[0], y0)\n assert_array_equal(ys[-1], ret)\n assert_equal(ts[0], t0)\n assert_equal(ts[-1], tend)\n\n def test_solout(self):\n for integrator in ('dopri5', 'dop853'):\n self._run_solout_test(integrator)\n\n def _run_solout_break_test(self, integrator):\n # Check correct usage of stopping via solout\n ts = []\n ys = []\n t0 = 0.0\n tend = 20.0\n y0 = [0.0]\n\n def solout(t, y):\n ts.append(t)\n ys.append(y.copy())\n if t > tend/2.0:\n return -1\n\n def rhs(t, y):\n return [1.0/(t - 10.0 - 1j)]\n\n ig = complex_ode(rhs).set_integrator(integrator)\n ig.set_solout(solout)\n ig.set_initial_value(y0, t0)\n ret = ig.integrate(tend)\n assert_array_equal(ys[0], y0)\n assert_array_equal(ys[-1], ret)\n assert_equal(ts[0], t0)\n assert_(ts[-1] > tend/2.0)\n assert_(ts[-1] < tend)\n\n def test_solout_break(self):\n for integrator in ('dopri5', 'dop853'):\n self._run_solout_break_test(integrator)\n\n\n#------------------------------------------------------------------------------\n# Test problems\n#------------------------------------------------------------------------------\n\n\nclass ODE:\n \"\"\"\n ODE problem\n \"\"\"\n stiff = False\n cmplx = False\n stop_t = 1\n z0 = []\n\n lband = None\n uband = None\n\n atol = 1e-6\n rtol = 1e-5\n\n\nclass SimpleOscillator(ODE):\n r\"\"\"\n Free vibration of a simple oscillator::\n m \\ddot{u} + k u = 0, u(0) = u_0 \\dot{u}(0) \\dot{u}_0\n Solution::\n u(t) = u_0*cos(sqrt(k/m)*t)+\\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m)\n \"\"\"\n stop_t = 1 + 0.09\n z0 = array([1.0, 0.1], float)\n\n k = 4.0\n m = 1.0\n\n def f(self, z, t):\n tmp = zeros((2, 2), float)\n tmp[0, 1] = 1.0\n tmp[1, 0] = -self.k / self.m\n return dot(tmp, z)\n\n def verify(self, zs, t):\n omega = sqrt(self.k / self.m)\n u = self.z0[0]*cos(omega*t) + self.z0[1]*sin(omega*t)/omega\n return allclose(u, zs[:, 0], atol=self.atol, rtol=self.rtol)\n\n\nclass ComplexExp(ODE):\n r\"\"\"The equation :lm:`\\dot u = i u`\"\"\"\n stop_t = 1.23*pi\n z0 = exp([1j, 2j, 3j, 4j, 5j])\n cmplx = True\n\n def f(self, z, t):\n return 1j*z\n\n def jac(self, z, t):\n return 1j*eye(5)\n\n def verify(self, zs, t):\n u = self.z0 * exp(1j*t)\n return allclose(u, zs, atol=self.atol, rtol=self.rtol)\n\n\nclass Pi(ODE):\n r\"\"\"Integrate 1/(t + 1j) from t=-10 to t=10\"\"\"\n stop_t = 20\n z0 = [0]\n cmplx = True\n\n def f(self, z, t):\n return array([1./(t - 10 + 1j)])\n\n def verify(self, zs, t):\n u = -2j * np.arctan(10)\n return allclose(u, zs[-1, :], atol=self.atol, rtol=self.rtol)\n\n\nclass CoupledDecay(ODE):\n r\"\"\"\n 3 coupled decays suited for banded treatment\n (banded mode makes it necessary when N>>3)\n \"\"\"\n\n stiff = True\n stop_t = 0.5\n z0 = [5.0, 7.0, 13.0]\n lband = 1\n uband = 0\n\n lmbd = [0.17, 0.23, 0.29] # fictitious decay constants\n\n def f(self, z, t):\n lmbd = self.lmbd\n return np.array([-lmbd[0]*z[0],\n -lmbd[1]*z[1] + lmbd[0]*z[0],\n -lmbd[2]*z[2] + lmbd[1]*z[1]])\n\n def jac(self, z, t):\n # The full Jacobian is\n #\n # [-lmbd[0] 0 0 ]\n # [ lmbd[0] -lmbd[1] 0 ]\n # [ 0 lmbd[1] -lmbd[2]]\n #\n # The lower and upper bandwidths are lband=1 and uband=0, resp.\n # The representation of this array in packed format is\n #\n # [-lmbd[0] -lmbd[1] -lmbd[2]]\n # [ lmbd[0] lmbd[1] 0 ]\n\n lmbd = self.lmbd\n j = np.zeros((self.lband + self.uband + 1, 3), order='F')\n\n def set_j(ri, ci, val):\n j[self.uband + ri - ci, ci] = val\n set_j(0, 0, -lmbd[0])\n set_j(1, 0, lmbd[0])\n set_j(1, 1, -lmbd[1])\n set_j(2, 1, lmbd[1])\n set_j(2, 2, -lmbd[2])\n return j\n\n def verify(self, zs, t):\n # Formulae derived by hand\n lmbd = np.array(self.lmbd)\n d10 = lmbd[1] - lmbd[0]\n d21 = lmbd[2] - lmbd[1]\n d20 = lmbd[2] - lmbd[0]\n e0 = np.exp(-lmbd[0] * t)\n e1 = np.exp(-lmbd[1] * t)\n e2 = np.exp(-lmbd[2] * t)\n u = np.vstack((\n self.z0[0] * e0,\n self.z0[1] * e1 + self.z0[0] * lmbd[0] / d10 * (e0 - e1),\n self.z0[2] * e2 + self.z0[1] * lmbd[1] / d21 * (e1 - e2) +\n lmbd[1] * lmbd[0] * self.z0[0] / d10 *\n (1 / d20 * (e0 - e2) - 1 / d21 * (e1 - e2)))).transpose()\n return allclose(u, zs, atol=self.atol, rtol=self.rtol)\n\n\nPROBLEMS = [SimpleOscillator, ComplexExp, Pi, CoupledDecay]\n\n#------------------------------------------------------------------------------\n\n\ndef f(t, x):\n dxdt = [x[1], -x[0]]\n return dxdt\n\n\ndef jac(t, x):\n j = array([[0.0, 1.0],\n [-1.0, 0.0]])\n return j\n\n\ndef f1(t, x, omega):\n dxdt = [omega*x[1], -omega*x[0]]\n return dxdt\n\n\ndef jac1(t, x, omega):\n j = array([[0.0, omega],\n [-omega, 0.0]])\n return j\n\n\ndef f2(t, x, omega1, omega2):\n dxdt = [omega1*x[1], -omega2*x[0]]\n return dxdt\n\n\ndef jac2(t, x, omega1, omega2):\n j = array([[0.0, omega1],\n [-omega2, 0.0]])\n return j\n\n\ndef fv(t, x, omega):\n dxdt = [omega[0]*x[1], -omega[1]*x[0]]\n return dxdt\n\n\ndef jacv(t, x, omega):\n j = array([[0.0, omega[0]],\n [-omega[1], 0.0]])\n return j\n\n\nclass ODECheckParameterUse(object):\n \"\"\"Call an ode-class solver with several cases of parameter use.\"\"\"\n\n # solver_name must be set before tests can be run with this class.\n\n # Set these in subclasses.\n solver_name = ''\n solver_uses_jac = False\n\n def _get_solver(self, f, jac):\n solver = ode(f, jac)\n if self.solver_uses_jac:\n solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7,\n with_jacobian=self.solver_uses_jac)\n else:\n # XXX Shouldn't set_integrator *always* accept the keyword arg\n # 'with_jacobian', and perhaps raise an exception if it is set\n # to True if the solver can't actually use it?\n solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7)\n return solver\n\n def _check_solver(self, solver):\n ic = [1.0, 0.0]\n solver.set_initial_value(ic, 0.0)\n solver.integrate(pi)\n assert_array_almost_equal(solver.y, [-1.0, 0.0])\n\n def test_no_params(self):\n solver = self._get_solver(f, jac)\n self._check_solver(solver)\n\n def test_one_scalar_param(self):\n solver = self._get_solver(f1, jac1)\n omega = 1.0\n solver.set_f_params(omega)\n if self.solver_uses_jac:\n solver.set_jac_params(omega)\n self._check_solver(solver)\n\n def test_two_scalar_params(self):\n solver = self._get_solver(f2, jac2)\n omega1 = 1.0\n omega2 = 1.0\n solver.set_f_params(omega1, omega2)\n if self.solver_uses_jac:\n solver.set_jac_params(omega1, omega2)\n self._check_solver(solver)\n\n def test_vector_param(self):\n solver = self._get_solver(fv, jacv)\n omega = [1.0, 1.0]\n solver.set_f_params(omega)\n if self.solver_uses_jac:\n solver.set_jac_params(omega)\n self._check_solver(solver)\n\n def test_warns_on_failure(self):\n # Set nsteps small to ensure failure\n solver = self._get_solver(f, jac)\n solver.set_integrator(self.solver_name, nsteps=1)\n ic = [1.0, 0.0]\n solver.set_initial_value(ic, 0.0)\n assert_warns(UserWarning, solver.integrate, pi)\n\n\nclass TestDOPRI5CheckParameterUse(ODECheckParameterUse):\n solver_name = 'dopri5'\n solver_uses_jac = False\n\n\nclass TestDOP853CheckParameterUse(ODECheckParameterUse):\n solver_name = 'dop853'\n solver_uses_jac = False\n\n\nclass TestVODECheckParameterUse(ODECheckParameterUse):\n solver_name = 'vode'\n solver_uses_jac = True\n\n\nclass TestZVODECheckParameterUse(ODECheckParameterUse):\n solver_name = 'zvode'\n solver_uses_jac = True\n\n\nclass TestLSODACheckParameterUse(ODECheckParameterUse):\n solver_name = 'lsoda'\n solver_uses_jac = True\n\n\ndef test_odeint_trivial_time():\n # Test that odeint succeeds when given a single time point\n # and full_output=True. This is a regression test for gh-4282.\n y0 = 1\n t = [0]\n y, info = odeint(lambda y, t: -y, y0, t, full_output=True)\n assert_array_equal(y, np.array([[y0]]))\n\n\ndef test_odeint_banded_jacobian():\n # Test the use of the `Dfun`, `ml` and `mu` options of odeint.\n\n def func(y, t, c):\n return c.dot(y)\n\n def jac(y, t, c):\n return c\n\n def jac_transpose(y, t, c):\n return c.T.copy(order='C')\n\n def bjac_rows(y, t, c):\n jac = np.row_stack((np.r_[0, np.diag(c, 1)],\n np.diag(c),\n np.r_[np.diag(c, -1), 0],\n np.r_[np.diag(c, -2), 0, 0]))\n return jac\n\n def bjac_cols(y, t, c):\n return bjac_rows(y, t, c).T.copy(order='C')\n\n c = array([[-205, 0.01, 0.00, 0.0],\n [0.1, -2.50, 0.02, 0.0],\n [1e-3, 0.01, -2.0, 0.01],\n [0.00, 0.00, 0.1, -1.0]])\n\n y0 = np.ones(4)\n t = np.array([0, 5, 10, 100])\n\n # Use the full Jacobian.\n sol1, info1 = odeint(func, y0, t, args=(c,), full_output=True,\n atol=1e-13, rtol=1e-11, mxstep=10000,\n Dfun=jac)\n\n # Use the transposed full Jacobian, with col_deriv=True.\n sol2, info2 = odeint(func, y0, t, args=(c,), full_output=True,\n atol=1e-13, rtol=1e-11, mxstep=10000,\n Dfun=jac_transpose, col_deriv=True)\n\n # Use the banded Jacobian.\n sol3, info3 = odeint(func, y0, t, args=(c,), full_output=True,\n atol=1e-13, rtol=1e-11, mxstep=10000,\n Dfun=bjac_rows, ml=2, mu=1)\n\n # Use the transposed banded Jacobian, with col_deriv=True.\n sol4, info4 = odeint(func, y0, t, args=(c,), full_output=True,\n atol=1e-13, rtol=1e-11, mxstep=10000,\n Dfun=bjac_cols, ml=2, mu=1, col_deriv=True)\n\n assert_allclose(sol1, sol2, err_msg=\"sol1 != sol2\")\n assert_allclose(sol1, sol3, atol=1e-12, err_msg=\"sol1 != sol3\")\n assert_allclose(sol3, sol4, err_msg=\"sol3 != sol4\")\n\n # Verify that the number of jacobian evaluations was the same for the\n # calls of odeint with a full jacobian and with a banded jacobian. This is\n # a regression test--there was a bug in the handling of banded jacobians\n # that resulted in an incorrect jacobian matrix being passed to the LSODA\n # code. That would cause errors or excessive jacobian evaluations.\n assert_array_equal(info1['nje'], info2['nje'])\n assert_array_equal(info3['nje'], info4['nje'])\n\n # Test the use of tfirst\n sol1ty, info1ty = odeint(lambda t, y, c: func(y, t, c), y0, t, args=(c,),\n full_output=True, atol=1e-13, rtol=1e-11,\n mxstep=10000,\n Dfun=lambda t, y, c: jac(y, t, c), tfirst=True)\n # The code should execute the exact same sequence of floating point\n # calculations, so these should be exactly equal. We'll be safe and use\n # a small tolerance.\n assert_allclose(sol1, sol1ty, rtol=1e-12, err_msg=\"sol1 != sol1ty\")\n\n\ndef test_odeint_errors():\n def sys1d(x, t):\n return -100*x\n\n def bad1(x, t):\n return 1.0/0\n\n def bad2(x, t):\n return \"foo\"\n\n def bad_jac1(x, t):\n return 1.0/0\n\n def bad_jac2(x, t):\n return [[\"foo\"]]\n\n def sys2d(x, t):\n return [-100*x[0], -0.1*x[1]]\n\n def sys2d_bad_jac(x, t):\n return [[1.0/0, 0], [0, -0.1]]\n\n assert_raises(ZeroDivisionError, odeint, bad1, 1.0, [0, 1])\n assert_raises(ValueError, odeint, bad2, 1.0, [0, 1])\n\n assert_raises(ZeroDivisionError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac1)\n assert_raises(ValueError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac2)\n\n assert_raises(ZeroDivisionError, odeint, sys2d, [1.0, 1.0], [0, 1],\n Dfun=sys2d_bad_jac)\n\n\ndef test_odeint_bad_shapes():\n # Tests of some errors that can occur with odeint.\n\n def badrhs(x, t):\n return [1, -1]\n\n def sys1(x, t):\n return -100*x\n\n def badjac(x, t):\n return [[0, 0, 0]]\n\n # y0 must be at most 1-d.\n bad_y0 = [[0, 0], [0, 0]]\n assert_raises(ValueError, odeint, sys1, bad_y0, [0, 1])\n\n # t must be at most 1-d.\n bad_t = [[0, 1], [2, 3]]\n assert_raises(ValueError, odeint, sys1, [10.0], bad_t)\n\n # y0 is 10, but badrhs(x, t) returns [1, -1].\n assert_raises(RuntimeError, odeint, badrhs, 10, [0, 1])\n\n # shape of array returned by badjac(x, t) is not correct.\n assert_raises(RuntimeError, odeint, sys1, [10, 10], [0, 1], Dfun=badjac)\n\n\ndef test_repeated_t_values():\n \"\"\"Regression test for gh-8217.\"\"\"\n\n def func(x, t):\n return -0.25*x\n\n t = np.zeros(10)\n sol = odeint(func, [1.], t)\n assert_array_equal(sol, np.ones((len(t), 1)))\n\n tau = 4*np.log(2)\n t = [0]*9 + [tau, 2*tau, 2*tau, 3*tau]\n sol = odeint(func, [1, 2], t, rtol=1e-12, atol=1e-12)\n expected_sol = np.array([[1.0, 2.0]]*9 +\n [[0.5, 1.0],\n [0.25, 0.5],\n [0.25, 0.5],\n [0.125, 0.25]])\n assert_allclose(sol, expected_sol)\n\n # Edge case: empty t sequence.\n sol = odeint(func, [1.], [])\n assert_array_equal(sol, np.array([], dtype=np.float64).reshape((0, 1)))\n\n # t values are not monotonic.\n assert_raises(ValueError, odeint, func, [1.], [0, 1, 0.5, 0])\n assert_raises(ValueError, odeint, func, [1, 2, 3], [0, -1, -2, 3])\n" ]
[ [ "pandas._testing.assert_almost_equal", "pandas.DataFrame", "pandas.tests.plotting.common.TestPlotBase.setup_method", "matplotlib.rcdefaults", "numpy.random.randn", "pandas._testing.RNGContext", "numpy.random.randint", "pandas._testing.assert_numpy_array_equal", "pandas._testing.makeTimeSeries", "pandas.Index", "matplotlib.pyplot.gcf", "matplotlib.pyplot.subplot", "pandas._testing.assert_produces_warning", "numpy.random.choice", "pandas._testing.close", "numpy.random.rand", "pandas.tests.plotting.common._check_plot_works", "pandas.plotting._matplotlib.hist._grouped_hist", "numpy.random.normal" ], [ "numpy.testing.assert_equal", "pandas.concat", "pandas.date_range", "numpy.testing.assert_almost_equal", "numpy.testing.assert_raises", "numpy.testing.assert_", "numpy.testing.assert_allclose", "numpy.array" ], [ "numpy.linspace" ], [ "numpy.testing.assert_equal", "numpy.allclose", "numpy.random.seed", "numpy.testing.suppress_warnings", "scipy.sparse.eye", "numpy.eye", "scipy.sparse.rand", "numpy.nextafter", "numpy.ones", "numpy.random.rand", "scipy.linalg.norm", "numpy.testing.assert_", "scipy.sparse.linalg.isolve.gcrotmk", "numpy.zeros", "numpy.array", "scipy.sparse.linalg.interface.LinearOperator", "numpy.testing.assert_allclose" ], [ "numpy.testing.assert_equal", "numpy.dot", "scipy.sparse.sputils.matrix", "numpy.asarray", "numpy.arange", "scipy.sparse.linalg.interface.LinearOperator", "scipy.sparse.linalg.interface.aslinearoperator", "scipy.sparse.csr_matrix", "numpy.ones", "numpy.dtype", "numpy.random.randn", "numpy.random.rand", "numpy.testing.assert_", "numpy.array", "numpy.zeros", "scipy.sparse.linalg.interface.IdentityOperator" ], [ "numpy.log", "numpy.testing.assert_allclose" ], [ "pandas.compat._optional.import_optional_dependency" ], [ "matplotlib.colors.LogNorm", "numpy.abs", "numpy.linspace", "numpy.cos", "numpy.sin", "matplotlib.testing.decorators.image_comparison", "numpy.arctan2", "matplotlib.ticker.MaxNLocator", "matplotlib.pyplot.figure" ], [ "numpy.log", "numpy.power", "numpy.asarray", "numpy.reshape", "scipy.optimize.minimize_scalar", "numpy.std", "numpy.mean", "numpy.any", "numpy.var", "numpy.exp", "numpy.isclose" ], [ "numpy.abs", "numpy.random.seed", "numpy.linspace", "scipy.stats.norm.pdf", "scipy.stats.gaussian_kde.__init__", "numpy.linalg.inv", "matplotlib.pylab.hist", "matplotlib.pylab.title", "numpy.linalg.det", "scipy.stats.gaussian_kde", "matplotlib.pylab.figure", "numpy.random.randn", "matplotlib.pylab.plot", "matplotlib.pylab.legend", "numpy.sum" ], [ "scipy.optimize._basinhopping.Metropolis", "numpy.mean", "numpy.var", "numpy.bool_", "scipy.optimize.basinhopping", "numpy.random.default_rng", "numpy.testing.assert_equal", "scipy.optimize.OptimizeResult", "scipy.optimize._basinhopping.AdaptiveStepsize", "numpy.sin", "scipy.optimize._basinhopping.RandomDisplacement", "numpy.testing.assert_almost_equal", "numpy.zeros", "scipy._lib._pep440.Version", "numpy.testing.assert_", "numpy.errstate", "numpy.array", "numpy.random.seed", "numpy.cos", "scipy.optimize._basinhopping.Storage", "numpy.shape" ], [ "matplotlib.pyplot.switch_backend", "matplotlib.testing.compare.compare_images", "matplotlib.rcdefaults", "matplotlib.get_backend", "matplotlib.pyplot.get_fignums", "matplotlib.pyplot.close" ], [ "numpy.dot", "numpy.linalg.cholesky", "numpy.sqrt", "numpy.abs", "numpy.unique", "numpy.linalg.inv", "numpy.asarray", "numpy.eye", "numpy.squeeze", "numpy.lib.recfunctions.append_fields", "pandas.DataFrame", "numpy.linalg.eigh", "numpy.column_stack", "numpy.repeat", "numpy.array", "numpy.where" ], [ "numpy.diag", "numpy.testing.assert_equal", "numpy.linspace", "numpy.eye", "numpy.ones", "numpy.atleast_1d", "numpy.column_stack", "numpy.testing.assert_allclose" ], [ "numpy.allclose", "numpy.linspace", "numpy.sqrt", "numpy.asarray", "matplotlib.patches.Rectangle", "numpy.cumsum", "numpy.any", "numpy.iterable", "numpy.ndindex", "numpy.array" ], [ "pandas.read_csv", "numpy.asarray", "numpy.column_stack", "numpy.array", "numpy.testing.assert_allclose" ], [ "numpy.array" ], [ "matplotlib.rcdefaults", "matplotlib.gridspec.SubplotSpec._from_subplot_args", "matplotlib.image.imread", "matplotlib.artist.setp", "matplotlib._api.make_keyword_only", "matplotlib.widgets.SubplotTool", "matplotlib._api.rename_parameter", "matplotlib.cm.get_cmap", "matplotlib._pylab_helpers.Gcf._set_new_active_manager", "matplotlib.figure.figaspect", "matplotlib.image.imsave", "matplotlib.artist.getp", "matplotlib._pylab_helpers.Gcf.has_fignum", "matplotlib.artist.get", "numpy.asanyarray", "matplotlib.patheffects.withStroke", "matplotlib.set_loglevel", "matplotlib.interactive", "matplotlib._pylab_helpers.Gcf.destroy", "matplotlib._pylab_helpers.Gcf.set_active", "matplotlib.gridspec.GridSpec._check_gridspec_exists", "matplotlib.docstring.copy", "matplotlib.rcParams.copy", "matplotlib._pylab_helpers.Gcf.destroy_fig", "matplotlib.is_interactive", "matplotlib._api.warn_external", "matplotlib._pylab_helpers.Gcf.get_all_fig_managers", "matplotlib.get_backend", "matplotlib.figure.Figure.legend.__doc__.replace", "matplotlib.rc", "matplotlib.rc_context", "matplotlib.cbook._get_running_interactive_framework", "matplotlib.cbook._backend_module_name", "matplotlib._pylab_helpers.Gcf.destroy_all", "matplotlib._pylab_helpers.Gcf.get_active", "matplotlib._pylab_helpers.Gcf.get_fig_manager" ], [ "numpy.min" ], [ "numpy.distutils.ccompiler.simple_version_match", "numpy.distutils.msvc9compiler.MSVCCompiler.initialize", "numpy.distutils.msvc9compiler.MSVCCompiler.__init__" ], [ "numpy.testing.assert_equal", "numpy.maximum", "numpy.linspace", "numpy.asarray", "numpy.arange", "numpy.isnan", "pandas.DataFrame", "numpy.ones", "numpy.testing.assert_array_less", "numpy.testing.assert_almost_equal", "numpy.testing.assert_raises", "numpy.testing.assert_allclose", "numpy.repeat", "numpy.array" ], [ "matplotlib._api.check_in_list", "numpy.asarray", "numpy.cumsum", "matplotlib.font_manager.FontProperties._from_any", "matplotlib.transforms.Bbox.from_bounds", "matplotlib.offsetbox.HPacker", "matplotlib.offsetbox.DrawingArea", "matplotlib._api.warn_external", "matplotlib.transforms.BboxTransformFrom", "matplotlib.font_manager.FontProperties", "matplotlib.cbook.silent_list", "matplotlib.transforms.BboxTransformTo", "matplotlib.patches.Shadow", "numpy.iterable", "numpy.array", "matplotlib.transforms.TransformedBbox", "numpy.tile", "matplotlib.colors.to_rgba_array", "matplotlib.patches.FancyBboxPatch", "matplotlib.docstring.interpd.update", "matplotlib.offsetbox.TextArea", "matplotlib.offsetbox.VPacker" ], [ "numpy.random.seed", "numpy.asarray", "numpy.arange", "pandas.DataFrame", "numpy.ones", "numpy.random.uniform", "numpy.random.normal", "numpy.testing.assert_allclose", "numpy.outer", "numpy.exp", "numpy.where", "numpy.sum" ], [ "matplotlib.pyplot.legend", "numpy.random.seed", "numpy.linspace", "matplotlib.pyplot.title", "numpy.arange", "numpy.sin", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "numpy.random.randn", "numpy.argsort", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.log", "numpy.absolute", "scipy.special.kolmogorov", "numpy.sqrt", "numpy.expand_dims", "scipy.stats.distributions.norm.cdf", "numpy.arange", "numpy.sort", "numpy.concatenate", "numpy.max", "scipy.stats.distributions.ksone.sf", "numpy.random.randn", "numpy.searchsorted", "numpy.exp", "scipy.stats.t.rvs", "numpy.array", "scipy.stats.distributions.norm.rvs" ], [ "numpy.complex128", "numpy.uint32", "numpy.int32", "numpy.datetime64", "numpy.timedelta64", "numpy.int64", "numpy.ones", "numpy.uint64", "numpy.float64", "numpy.float32", "numpy.bool_", "numpy.complex64" ], [ "numpy.testing.assert_equal", "scipy.stats.rankdata", "numpy.unique", "numpy.random.choice", "numpy.arange", "numpy.ones", "numpy.testing.assert_array_equal", "scipy.stats.tiecorrect", "numpy.array", "numpy.zeros" ], [ "pandas.Series", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "numpy.max", "pandas.testing.assert_frame_equal", "numpy.random.randn", "numpy.mean", "numpy.nanmean", "pandas.NamedAgg", "pandas._testing.assert_frame_equal", "pandas.Index", "numpy.std", "pandas.Int64Index", "pandas._testing.assert_series_equal", "pandas._testing.assert_index_equal", "pandas.concat", "pandas.core.dtypes.common.is_integer_dtype", "pandas._testing.assert_produces_warning", "pandas.interval_range", "pandas.MultiIndex", "numpy.min", "pandas.array", "pandas.MultiIndex.from_product", "pandas.date_range", "numpy.array", "pandas.CategoricalIndex", "pandas.period_range", "numpy.percentile", "pandas.MultiIndex.from_arrays", "pandas._testing.makeTimeDataFrame", "pandas.Timestamp" ], [ "pandas.DataFrame", "numpy.all", "numpy.append", "numpy.diff", "numpy.insert", "numpy.array" ], [ "numpy.array", "pandas._testing.assert_almost_equal", "pandas._testing.assert_numpy_array_equal", "pandas.Series", "numpy.abs", "numpy.asarray", "pandas.array", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "pandas._testing.assert_extension_array_equal", "numpy.exp", "numpy.add", "pandas._testing.assert_series_equal", "pandas._testing.assert_frame_equal" ], [ "numpy.diag", "numpy.linalg.solve", "numpy.sqrt", "numpy.abs", "numpy.linalg.matrix_rank", "numpy.linalg.inv", "numpy.eye", "numpy.ones", "numpy.linalg.pinv", "numpy.size", "numpy.column_stack", "numpy.array", "numpy.zeros", "scipy.stats.chi2.sf" ], [ "numpy.sort", "numpy.isfinite", "pandas.DataFrame", "numpy.linspace" ], [ "numpy.ma.isMaskedArray", "numpy.mintypecode", "numpy.issubdtype", "numpy.ndarray", "numpy.dtype", "numpy.all", "numpy.extract", "numpy.shape", "numpy.broadcast_arrays", "numpy.random.RandomState", "numpy.empty" ], [ "numpy.testing.assert_almost_equal", "numpy.log10" ], [ "numpy.dot", "numpy.split", "numpy.imag", "numpy.take", "numpy.asarray", "numpy.around", "numpy.issubdtype", "numpy.concatenate", "numpy.lib.stride_tricks.as_strided", "numpy.mean", "numpy.any", "scipy.fft.next_fast_len", "numpy.iscomplexobj", "numpy.moveaxis", "scipy.fft.fftfreq", "numpy.where", "scipy.spatial.cKDTree", "numpy.polyval", "numpy.hstack", "numpy.swapaxes", "scipy.fft.ifft", "scipy.fft.fft2", "numpy.pad", "numpy.unique", "numpy.reshape", "numpy.arange", "scipy.linalg.lstsq", "numpy.finfo", "numpy.atleast_1d", "numpy.roots", "numpy.size", "numpy.real", "numpy.isrealobj", "numpy.ravel", "numpy.zeros", "scipy.fft.ifft2", "numpy.log", "scipy.special.lambertw", "scipy.linalg.companion", "numpy.polymul", "scipy.fft.irfft", "numpy.atleast_2d", "numpy.polydiv", "scipy._lib._util.prod", "scipy.fft._helper._init_nd_shape_and_axes", "numpy.array", "numpy.sum", "numpy.polyadd", "numpy.correlate", "numpy.convolve", "numpy.linalg.solve", "numpy.abs", "scipy.fft.rfft", "numpy.empty", "numpy.ones", "numpy.result_type", "numpy.polysub", "numpy.prod", "numpy.angle", "scipy.fft.fft" ], [ "numpy.dot", "numpy.random.seed", "scipy.sparse.eye", "scipy.io.mmread", "scipy.optimize.fmin_bfgs", "scipy.optimize.fmin_ncg", "numpy.random.rand", "scipy.linalg.norm", "matplotlib.pyplot.figure" ], [ "numpy.dot", "numpy.sqrt", "numpy.linalg.matrix_rank", "numpy.asarray", "scipy.stats.distributions.poisson", "numpy.exp", "scipy.stats.distributions.binom", "scipy.stats.distributions.gamma", "numpy.allclose", "numpy.empty_like", "numpy.column_stack", "numpy.zeros", "numpy.log", "numpy.multiply", "numpy.power", "numpy.isnan", "scipy.optimize.minimize", "numpy.array", "numpy.sum", "numpy.linalg.solve", "numpy.ones", "numpy.linalg.pinv", "numpy.shape", "scipy.optimize.brentq", "scipy.stats.chi2.sf" ], [ "numpy.diag", "numpy.testing.assert_equal", "numpy.log", "numpy.isinf", "numpy.isnan", "numpy.testing.assert_", "numpy.testing.assert_allclose", "numpy.zeros", "numpy.column_stack" ], [ "numpy.linalg.eigvals", "numpy.iterable", "numpy.sqrt", "numpy.abs", "numpy.asarray", "numpy.arange", "numpy.multiply.accumulate", "numpy.ndim", "numpy.all", "numpy.core.multiarray.normalize_axis_index", "numpy.moveaxis", "numpy.exp", "numpy.linalg.eigvalsh", "numpy.array", "numpy.zeros", "numpy.empty" ], [ "numpy.ravel", "numpy.eye", "numpy.array" ], [ "numpy.diag", "numpy.dot", "numpy.sqrt", "numpy.arctan", "numpy.vstack", "scipy.integrate.odeint", "numpy.exp", "numpy.testing.assert_equal", "numpy.allclose", "numpy.arange", "numpy.eye", "numpy.sin", "scipy.integrate.complex_ode", "numpy.zeros", "numpy.testing.assert_array_almost_equal", "numpy.log", "numpy.testing.assert_", "numpy.testing.assert_allclose", "numpy.array", "numpy.testing.assert_warns", "numpy.cos", "numpy.ones", "numpy.testing.assert_array_equal", "scipy.integrate.ode" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.4", "1.5", "1.7", "1.3" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.7", "1.0", "1.2", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.9", "1.5", "1.7", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
RussellM2020/maml_gps
[ "631560dfd4e23dc2da9bfbbd2e3c5252aa9775c5", "631560dfd4e23dc2da9bfbbd2e3c5252aa9775c5", "631560dfd4e23dc2da9bfbbd2e3c5252aa9775c5" ]
[ "rllab/optimizers/conjugate_gradient_optimizer.py", "rllab/misc/logger.py", "rllab/envs/mujoco/hill/swimmer3d_hill_env.py" ]
[ "import numpy as np\nimport theano\nimport theano.tensor as TT\n\nfrom rllab.core.serializable import Serializable\nfrom rllab.misc import ext\nfrom rllab.misc import krylov\nfrom rllab.misc import logger\nfrom rllab.misc.ext import sliced_fun\n\n\nclass PerlmutterHvp(Serializable):\n\n def __init__(self, num_slices=1):\n Serializable.quick_init(self, locals())\n self.target = None\n self.reg_coeff = None\n self.opt_fun = None\n self._num_slices = num_slices\n\n def update_opt(self, f, target, inputs, reg_coeff):\n self.target = target\n self.reg_coeff = reg_coeff\n params = target.get_params(trainable=True)\n\n constraint_grads = theano.grad(\n f, wrt=params, disconnected_inputs='warn')\n xs = tuple([ext.new_tensor_like(\"%s x\" % p.name, p) for p in params])\n\n def Hx_plain():\n Hx_plain_splits = TT.grad(\n TT.sum([TT.sum(g * x)\n for g, x in zip(constraint_grads, xs)]),\n wrt=params,\n disconnected_inputs='warn'\n )\n return TT.concatenate([TT.flatten(s) for s in Hx_plain_splits])\n\n self.opt_fun = ext.lazydict(\n f_Hx_plain=lambda: ext.compile_function(\n inputs=inputs + xs,\n outputs=Hx_plain(),\n log_name=\"f_Hx_plain\",\n ),\n )\n\n def build_eval(self, inputs):\n def eval(x):\n xs = tuple(self.target.flat_to_params(x, trainable=True))\n ret = sliced_fun(self.opt_fun[\"f_Hx_plain\"], self._num_slices)(\n inputs, xs) + self.reg_coeff * x\n return ret\n\n return eval\n\n\nclass FiniteDifferenceHvp(Serializable):\n\n def __init__(self, base_eps=1e-8, symmetric=True, grad_clip=None, num_slices=1):\n Serializable.quick_init(self, locals())\n self.base_eps = base_eps\n self.symmetric = symmetric\n self.grad_clip = grad_clip\n self._num_slices = num_slices\n\n def update_opt(self, f, target, inputs, reg_coeff):\n self.target = target\n self.reg_coeff = reg_coeff\n\n params = target.get_params(trainable=True)\n\n constraint_grads = theano.grad(\n f, wrt=params, disconnected_inputs='warn')\n flat_grad = ext.flatten_tensor_variables(constraint_grads)\n\n def f_Hx_plain(*args):\n inputs_ = args[:len(inputs)]\n xs = args[len(inputs):]\n flat_xs = np.concatenate([np.reshape(x, (-1,)) for x in xs])\n param_val = self.target.get_param_values(trainable=True)\n eps = np.cast['float32'](\n self.base_eps / (np.linalg.norm(param_val) + 1e-8))\n self.target.set_param_values(\n param_val + eps * flat_xs, trainable=True)\n flat_grad_dvplus = self.opt_fun[\"f_grad\"](*inputs_)\n if self.symmetric:\n self.target.set_param_values(\n param_val - eps * flat_xs, trainable=True)\n flat_grad_dvminus = self.opt_fun[\"f_grad\"](*inputs_)\n hx = (flat_grad_dvplus - flat_grad_dvminus) / (2 * eps)\n self.target.set_param_values(param_val, trainable=True)\n else:\n self.target.set_param_values(param_val, trainable=True)\n flat_grad = self.opt_fun[\"f_grad\"](*inputs_)\n hx = (flat_grad_dvplus - flat_grad) / eps\n return hx\n\n self.opt_fun = ext.lazydict(\n f_grad=lambda: ext.compile_function(\n inputs=inputs,\n outputs=flat_grad,\n log_name=\"f_grad\",\n ),\n f_Hx_plain=lambda: f_Hx_plain,\n )\n\n def build_eval(self, inputs):\n def eval(x):\n xs = tuple(self.target.flat_to_params(x, trainable=True))\n ret = sliced_fun(self.opt_fun[\"f_Hx_plain\"], self._num_slices)(\n inputs, xs) + self.reg_coeff * x\n return ret\n\n return eval\n\n\nclass ConjugateGradientOptimizer(Serializable):\n \"\"\"\n Performs constrained optimization via line search. The search direction is computed using a conjugate gradient\n algorithm, which gives x = A^{-1}g, where A is a second order approximation of the constraint and g is the gradient\n of the loss function.\n \"\"\"\n\n def __init__(\n self,\n cg_iters=10,\n reg_coeff=1e-5,\n subsample_factor=1.,\n backtrack_ratio=0.8,\n max_backtracks=15,\n accept_violation=False,\n hvp_approach=None,\n num_slices=1):\n \"\"\"\n\n :param cg_iters: The number of CG iterations used to calculate A^-1 g\n :param reg_coeff: A small value so that A -> A + reg*I\n :param subsample_factor: Subsampling factor to reduce samples when using \"conjugate gradient. Since the\n computation time for the descent direction dominates, this can greatly reduce the overall computation time.\n :param accept_violation: whether to accept the descent step if it violates the line search condition after\n exhausting all backtracking budgets\n :return:\n \"\"\"\n Serializable.quick_init(self, locals())\n self._cg_iters = cg_iters\n self._reg_coeff = reg_coeff\n self._subsample_factor = subsample_factor\n self._backtrack_ratio = backtrack_ratio\n self._max_backtracks = max_backtracks\n self._num_slices = num_slices\n\n self._opt_fun = None\n self._target = None\n self._max_constraint_val = None\n self._constraint_name = None\n self._accept_violation = accept_violation\n if hvp_approach is None:\n hvp_approach = PerlmutterHvp(num_slices)\n self._hvp_approach = hvp_approach\n\n def update_opt(self, loss, target, leq_constraint, inputs, extra_inputs=None, constraint_name=\"constraint\", *args,\n **kwargs):\n \"\"\"\n :param loss: Symbolic expression for the loss function.\n :param target: A parameterized object to optimize over. It should implement methods of the\n :class:`rllab.core.paramerized.Parameterized` class.\n :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.\n :param inputs: A list of symbolic variables as inputs, which could be subsampled if needed. It is assumed\n that the first dimension of these inputs should correspond to the number of data points\n :param extra_inputs: A list of symbolic variables as extra inputs which should not be subsampled\n :return: No return value.\n \"\"\"\n\n inputs = tuple(inputs)\n if extra_inputs is None:\n extra_inputs = tuple()\n else:\n extra_inputs = tuple(extra_inputs)\n\n constraint_term, constraint_value = leq_constraint\n\n params = target.get_params(trainable=True)\n grads = theano.grad(loss, wrt=params, disconnected_inputs='warn')\n flat_grad = ext.flatten_tensor_variables(grads)\n\n self._hvp_approach.update_opt(f=constraint_term, target=target, inputs=inputs + extra_inputs,\n reg_coeff=self._reg_coeff)\n\n self._target = target\n self._max_constraint_val = constraint_value\n self._constraint_name = constraint_name\n\n self._opt_fun = ext.lazydict(\n f_loss=lambda: ext.compile_function(\n inputs=inputs + extra_inputs,\n outputs=loss,\n log_name=\"f_loss\",\n ),\n f_grad=lambda: ext.compile_function(\n inputs=inputs + extra_inputs,\n outputs=flat_grad,\n log_name=\"f_grad\",\n ),\n f_constraint=lambda: ext.compile_function(\n inputs=inputs + extra_inputs,\n outputs=constraint_term,\n log_name=\"constraint\",\n ),\n f_loss_constraint=lambda: ext.compile_function(\n inputs=inputs + extra_inputs,\n outputs=[loss, constraint_term],\n log_name=\"f_loss_constraint\",\n ),\n )\n\n def loss(self, inputs, extra_inputs=None):\n inputs = tuple(inputs)\n if extra_inputs is None:\n extra_inputs = tuple()\n return sliced_fun(self._opt_fun[\"f_loss\"], self._num_slices)(inputs, extra_inputs)\n\n def constraint_val(self, inputs, extra_inputs=None):\n inputs = tuple(inputs)\n if extra_inputs is None:\n extra_inputs = tuple()\n return sliced_fun(self._opt_fun[\"f_constraint\"], self._num_slices)(inputs, extra_inputs)\n\n def optimize(self, inputs, extra_inputs=None, subsample_grouped_inputs=None):\n\n inputs = tuple(inputs)\n if extra_inputs is None:\n extra_inputs = tuple()\n\n if self._subsample_factor < 1:\n if subsample_grouped_inputs is None:\n subsample_grouped_inputs = [inputs]\n subsample_inputs = tuple()\n for inputs_grouped in subsample_grouped_inputs:\n n_samples = len(inputs_grouped[0])\n inds = np.random.choice(\n n_samples, int(n_samples * self._subsample_factor), replace=False)\n subsample_inputs += tuple([x[inds] for x in inputs_grouped])\n else:\n subsample_inputs = inputs\n\n logger.log(\"computing loss before\")\n loss_before = sliced_fun(self._opt_fun[\"f_loss\"], self._num_slices)(\n inputs, extra_inputs)\n logger.log(\"performing update\")\n logger.log(\"computing descent direction\")\n\n flat_g = sliced_fun(self._opt_fun[\"f_grad\"], self._num_slices)(\n inputs, extra_inputs)\n\n Hx = self._hvp_approach.build_eval(subsample_inputs + extra_inputs)\n\n descent_direction = krylov.cg(Hx, flat_g, cg_iters=self._cg_iters)\n\n initial_step_size = np.sqrt(\n 2.0 * self._max_constraint_val *\n (1. / (descent_direction.dot(Hx(descent_direction)) + 1e-8))\n )\n if np.isnan(initial_step_size):\n initial_step_size = 1.\n flat_descent_step = initial_step_size * descent_direction\n\n logger.log(\"descent direction computed\")\n\n prev_param = np.copy(self._target.get_param_values(trainable=True))\n n_iter = 0\n for n_iter, ratio in enumerate(self._backtrack_ratio ** np.arange(self._max_backtracks)):\n cur_step = ratio * flat_descent_step\n cur_param = prev_param - cur_step\n self._target.set_param_values(cur_param, trainable=True)\n loss, constraint_val = sliced_fun(\n self._opt_fun[\"f_loss_constraint\"], self._num_slices)(inputs, extra_inputs)\n if loss < loss_before and constraint_val <= self._max_constraint_val:\n break\n if (np.isnan(loss) or np.isnan(constraint_val) or loss >= loss_before or constraint_val >=\n self._max_constraint_val) and not self._accept_violation:\n logger.log(\"Line search condition violated. Rejecting the step!\")\n if np.isnan(loss):\n logger.log(\"Violated because loss is NaN\")\n if np.isnan(constraint_val):\n logger.log(\"Violated because constraint %s is NaN\" %\n self._constraint_name)\n if loss >= loss_before:\n logger.log(\"Violated because loss not improving\")\n if constraint_val >= self._max_constraint_val:\n logger.log(\n \"Violated because constraint %s is violated\" % self._constraint_name)\n self._target.set_param_values(prev_param, trainable=True)\n logger.log(\"backtrack iters: %d\" % n_iter)\n logger.log(\"computing loss after\")\n logger.log(\"optimization finished\")\n", "from enum import Enum\n\nfrom rllab.misc.tabulate import tabulate\nfrom rllab.misc.console import mkdir_p, colorize\nfrom rllab.misc.autoargs import get_all_parameters\nfrom contextlib import contextmanager\nimport numpy as np\nimport os\nimport os.path as osp\nimport sys\nimport datetime\nimport dateutil.tz\nimport csv\nimport joblib\nimport json\nimport pickle\nimport base64\nfrom collections import OrderedDict\n\n_prefixes = []\n_prefix_str = ''\n\n_tabular_prefixes = []\n_tabular_prefix_str = ''\n\n_tabular = []\n\n_text_outputs = []\n_tabular_outputs = []\n\n_text_fds = {}\n_tabular_fds = {}\n_tabular_header_written = set()\n\n_snapshot_dir = None\n_snapshot_mode = 'all'\n_snapshot_gap = 1\n\n_log_tabular_only = False\n_header_printed = False\n\n\ndef _add_output(file_name, arr, fds, mode='a'):\n if file_name not in arr:\n mkdir_p(os.path.dirname(file_name))\n arr.append(file_name)\n fds[file_name] = open(file_name, mode)\n\n\ndef _remove_output(file_name, arr, fds):\n if file_name in arr:\n fds[file_name].close()\n del fds[file_name]\n arr.remove(file_name)\n\n\ndef push_prefix(prefix):\n _prefixes.append(prefix)\n global _prefix_str\n _prefix_str = ''.join(_prefixes)\n\n\ndef add_text_output(file_name):\n _add_output(file_name, _text_outputs, _text_fds, mode='a')\n\n\ndef remove_text_output(file_name):\n _remove_output(file_name, _text_outputs, _text_fds)\n\n\ndef add_tabular_output(file_name):\n _add_output(file_name, _tabular_outputs, _tabular_fds, mode='w')\n\n\ndef remove_tabular_output(file_name):\n if _tabular_fds[file_name] in _tabular_header_written:\n _tabular_header_written.remove(_tabular_fds[file_name])\n _remove_output(file_name, _tabular_outputs, _tabular_fds)\n\n\ndef set_snapshot_dir(dir_name):\n global _snapshot_dir\n _snapshot_dir = dir_name\n\n\ndef get_snapshot_dir():\n return _snapshot_dir\n\n\ndef get_snapshot_mode():\n return _snapshot_mode\n\n\ndef set_snapshot_mode(mode):\n global _snapshot_mode\n _snapshot_mode = mode\n\ndef get_snapshot_gap():\n return _snapshot_gap\n\ndef set_snapshot_gap(gap):\n global _snapshot_gap\n _snapshot_gap = gap\n\ndef set_log_tabular_only(log_tabular_only):\n global _log_tabular_only\n _log_tabular_only = log_tabular_only\n\n\ndef get_log_tabular_only():\n return _log_tabular_only\n\n\ndef log(s, with_prefix=True, with_timestamp=True, color=None):\n out = s\n if with_prefix:\n out = _prefix_str + out\n if with_timestamp:\n now = datetime.datetime.now(dateutil.tz.tzlocal())\n timestamp = now.strftime('%Y-%m-%d %H:%M:%S.%f %Z')\n out = \"%s | %s\" % (timestamp, out)\n if color is not None:\n out = colorize(out, color)\n if not _log_tabular_only:\n # Also log to stdout\n print(out)\n for fd in list(_text_fds.values()):\n fd.write(out + '\\n')\n fd.flush()\n sys.stdout.flush()\n\n\ndef record_tabular(key, val, front=False):\n if not front:\n _tabular.append((_tabular_prefix_str + str(key), str(val)))\n elif front:\n _tabular.insert(0,(_tabular_prefix_str + str(key), str(val)))\n\n\ndef push_tabular_prefix(key):\n _tabular_prefixes.append(key)\n global _tabular_prefix_str\n _tabular_prefix_str = ''.join(_tabular_prefixes)\n\n\ndef pop_tabular_prefix():\n del _tabular_prefixes[-1]\n global _tabular_prefix_str\n _tabular_prefix_str = ''.join(_tabular_prefixes)\n\n\n@contextmanager\ndef prefix(key):\n push_prefix(key)\n try:\n yield\n finally:\n pop_prefix()\n\n\n@contextmanager\ndef tabular_prefix(key):\n push_tabular_prefix(key)\n yield\n pop_tabular_prefix()\n\n\nclass TerminalTablePrinter(object):\n def __init__(self):\n self.headers = None\n self.tabulars = []\n\n def print_tabular(self, new_tabular):\n if self.headers is None:\n self.headers = [x[0] for x in new_tabular]\n else:\n assert len(self.headers) == len(new_tabular)\n self.tabulars.append([x[1] for x in new_tabular])\n self.refresh()\n\n def refresh(self):\n import os\n rows, columns = os.popen('stty size', 'r').read().split()\n tabulars = self.tabulars[-(int(rows) - 3):]\n sys.stdout.write(\"\\x1b[2J\\x1b[H\")\n sys.stdout.write(tabulate(tabulars, self.headers))\n sys.stdout.write(\"\\n\")\n\n\ntable_printer = TerminalTablePrinter()\n\n\ndef dump_tabular(*args, **kwargs):\n wh = kwargs.pop(\"write_header\", None)\n if len(_tabular) > 0:\n if _log_tabular_only:\n table_printer.print_tabular(_tabular)\n else:\n for line in tabulate(_tabular).split('\\n'):\n log(line, *args, **kwargs)\n tabular_dict = OrderedDict(_tabular)\n # Also write to the csv files\n # This assumes that the keys in each iteration won't change!\n for tabular_fd in list(_tabular_fds.values()):\n writer = csv.DictWriter(tabular_fd, fieldnames=list(tabular_dict.keys()))\n if wh or (wh is None and tabular_fd not in _tabular_header_written):\n writer.writeheader()\n _tabular_header_written.add(tabular_fd)\n writer.writerow(tabular_dict)\n tabular_fd.flush()\n del _tabular[:]\n\n\ndef pop_prefix():\n del _prefixes[-1]\n global _prefix_str\n _prefix_str = ''.join(_prefixes)\n\n\ndef save_itr_params(itr, params):\n if _snapshot_dir:\n if _snapshot_mode == 'all':\n file_name = osp.join(_snapshot_dir, 'itr_%d.pkl' % itr)\n joblib.dump(params, file_name, compress=3)\n elif _snapshot_mode == 'last':\n # override previous params\n file_name = osp.join(_snapshot_dir, 'params.pkl')\n joblib.dump(params, file_name, compress=3)\n elif _snapshot_mode == \"gap\":\n if itr % _snapshot_gap == 0:\n file_name = osp.join(_snapshot_dir, 'itr_%d.pkl' % itr)\n joblib.dump(params, file_name, compress=3)\n elif _snapshot_mode == 'none':\n pass\n else:\n raise NotImplementedError\n\n\ndef log_parameters(log_file, args, classes):\n log_params = {}\n for param_name, param_value in args.__dict__.items():\n if any([param_name.startswith(x) for x in list(classes.keys())]):\n continue\n log_params[param_name] = param_value\n for name, cls in classes.items():\n if isinstance(cls, type):\n params = get_all_parameters(cls, args)\n params[\"_name\"] = getattr(args, name)\n log_params[name] = params\n else:\n log_params[name] = getattr(cls, \"__kwargs\", dict())\n log_params[name][\"_name\"] = cls.__module__ + \".\" + cls.__class__.__name__\n mkdir_p(os.path.dirname(log_file))\n with open(log_file, \"w\") as f:\n json.dump(log_params, f, indent=2, sort_keys=True)\n\n\ndef stub_to_json(stub_sth):\n from rllab.misc import instrument\n if isinstance(stub_sth, instrument.StubObject):\n assert len(stub_sth.args) == 0\n data = dict()\n for k, v in stub_sth.kwargs.items():\n data[k] = stub_to_json(v)\n data[\"_name\"] = stub_sth.proxy_class.__module__ + \".\" + stub_sth.proxy_class.__name__\n return data\n elif isinstance(stub_sth, instrument.StubAttr):\n return dict(\n obj=stub_to_json(stub_sth.obj),\n attr=stub_to_json(stub_sth.attr_name)\n )\n elif isinstance(stub_sth, instrument.StubMethodCall):\n return dict(\n obj=stub_to_json(stub_sth.obj),\n method_name=stub_to_json(stub_sth.method_name),\n args=stub_to_json(stub_sth.args),\n kwargs=stub_to_json(stub_sth.kwargs),\n )\n elif isinstance(stub_sth, instrument.BinaryOp):\n return \"binary_op\"\n elif isinstance(stub_sth, instrument.StubClass):\n return stub_sth.proxy_class.__module__ + \".\" + stub_sth.proxy_class.__name__\n elif isinstance(stub_sth, dict):\n return {stub_to_json(k): stub_to_json(v) for k, v in stub_sth.items()}\n elif isinstance(stub_sth, (list, tuple)):\n return list(map(stub_to_json, stub_sth))\n elif type(stub_sth) == type(lambda: None):\n if stub_sth.__module__ is not None:\n return stub_sth.__module__ + \".\" + stub_sth.__name__\n return stub_sth.__name__\n elif \"theano\" in str(type(stub_sth)):\n return repr(stub_sth)\n return stub_sth\n\n\nclass MyEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, type):\n return {'$class': o.__module__ + \".\" + o.__name__}\n elif isinstance(o, Enum):\n return {'$enum': o.__module__ + \".\" + o.__class__.__name__ + '.' + o.name}\n return json.JSONEncoder.default(self, o)\n\n\ndef log_parameters_lite(log_file, args):\n log_params = {}\n for param_name, param_value in args.__dict__.items():\n log_params[param_name] = param_value\n if args.args_data is not None:\n try:\n stub_method = pickle.loads(base64.b64decode(args.args_data))\n method_args = stub_method.kwargs\n log_params[\"json_args\"] = dict()\n for k, v in list(method_args.items()):\n log_params[\"json_args\"][k] = stub_to_json(v)\n kwargs = stub_method.obj.kwargs\n for k in [\"baseline\", \"env\", \"policy\"]:\n if k in kwargs:\n log_params[\"json_args\"][k] = stub_to_json(kwargs.pop(k))\n log_params[\"json_args\"][\"algo\"] = stub_to_json(stub_method.obj)\n except:\n pass\n mkdir_p(os.path.dirname(log_file))\n with open(log_file, \"w\") as f:\n json.dump(log_params, f, indent=2, sort_keys=True, cls=MyEncoder)\n\n\ndef log_variant(log_file, variant_data):\n mkdir_p(os.path.dirname(log_file))\n if hasattr(variant_data, \"dump\"):\n variant_data = variant_data.dump()\n variant_json = stub_to_json(variant_data)\n with open(log_file, \"w\") as f:\n json.dump(variant_json, f, indent=2, sort_keys=True, cls=MyEncoder)\n\n\ndef record_tabular_misc_stat(key, values):\n record_tabular(key + \"Average\", np.average(values))\n record_tabular(key + \"Std\", np.std(values))\n record_tabular(key + \"Median\", np.median(values))\n record_tabular(key + \"Min\", np.amin(values))\n record_tabular(key + \"Max\", np.amax(values))\n", "import numpy as np\n\nfrom rllab.envs.mujoco.hill.hill_env import HillEnv\nfrom rllab.envs.mujoco.swimmer3d_env import Swimmer3DEnv\nfrom rllab.misc.overrides import overrides\nimport rllab.envs.mujoco.hill.terrain as terrain\nfrom rllab.spaces import Box\n\nclass Swimmer3DHillEnv(HillEnv):\n\n MODEL_CLASS = Swimmer3DEnv\n \n @overrides\n def _mod_hfield(self, hfield):\n # clear a flat patch for the robot to start off from\n return terrain.clear_patch(hfield, Box(np.array([-3.0, -1.5]), np.array([0.0, -0.5])))" ]
[ [ "numpy.isnan", "numpy.arange", "numpy.linalg.norm", "numpy.reshape" ], [ "numpy.amax", "numpy.amin", "numpy.median", "numpy.std", "numpy.average" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ngwenbin/ExpenseTracker
[ "f50793c9a4c6efc4f58cc7d759b45f2e16b7832e" ]
[ "app.py" ]
[ "from flask import Flask, render_template, redirect, url_for, flash, request, abort\nfrom functions import UserLogin, UserRegistration, NewExpense\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import func\nfrom datetime import datetime, timedelta, date\nfrom flask_bcrypt import Bcrypt\nfrom flask_login import LoginManager, UserMixin, login_user, current_user, logout_user, login_required\nfrom matplotlib import pyplot as plt\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nfrom itertools import zip_longest\nimport os\nimport io\nimport base64\nimport numpy as np\n\napp = Flask(__name__)\nSECRET_KEY = os.urandom(16)\napp.config['SECRET_KEY'] = SECRET_KEY\napp.config['SQLALCHEMY_DATABASE_URI'] = ' '\ndb = SQLAlchemy(app)\nbcrypt = Bcrypt(app)\nlogin_manager = LoginManager(app)\nlogin_manager.login_view = 'login'\nlogin_manager.login_message_category = 'info'\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\nclass User(db.Model, UserMixin):\n __tablename__ = 'user'\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String(30), unique=True, nullable=False)\n username = db.Column(db.String(10), unique=True, nullable=False)\n password = db.Column(db.String(128), nullable=False)\n expense_id = db.relationship('UserExpense', backref='expensedate', lazy='dynamic')\n\n def __repr__(self):\n return f\"User('{self.username}', '{self.email}')\"\n\nclass UserExpense(db.Model):\n __tablename__ = 'user_expenses'\n id = db.Column(db.Integer, primary_key=True)\n userid = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n category = db.Column(db.String(30))\n description = db.Column(db.String(50))\n expense = db.Column(db.Numeric(scale=2, asdecimal=True))\n expense_date = db.Column(db.Date, default=date.today())\n\n def __repr__(self):\n return f\"UserExpense('{self.category}', '{self.description}', '{self.expense}', '{self.expense_date}')\"\n\[email protected]('/', methods=['GET', 'POST'])\ndef login():\n form = UserLogin()\n if current_user.is_authenticated:\n return redirect(url_for('overview'))\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n return redirect(url_for('overview'))\n else:\n flash('Invalid login', 'danger')\n return render_template('login.html', form=form)\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('overview'))\n form = UserRegistration()\n if form.validate_on_submit():\n password_hashed = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user = User(username=form.username.data, email=form.email.data, password=password_hashed)\n db.session.add(user)\n db.session.commit()\n flash('Account created!', 'success')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\[email protected]('/logout')\ndef logout():\n logout_user()\n flash('Logged out!', 'success')\n return redirect(url_for('login'))\n\[email protected]('/overview', methods=['GET','POST'])\n@login_required\ndef overview():\n form = NewExpense()\n userids = current_user.id\n name = current_user.username\n\n # Forms\n if form.validate_on_submit():\n expenses = UserExpense(category=form.category.data, description=form.description.data,\n expense=form.expense.data, expensedate=current_user)\n db.session.add(expenses)\n db.session.commit()\n\n # Queries\n filters = db.session.query(UserExpense.expense_date).filter(UserExpense.userid==userids).distinct()\n\n date_list=[] #List of distinct dates\n for u in filters:\n date_list.append(f'{u.expense_date}')\n\n date_expense_list=[] #List of expenses for that specific date\n for item in date_list:\n date_expense = db.session.query(func.sum(UserExpense.expense)).filter(UserExpense.userid==userids, UserExpense.expense_date==item).scalar()\n date_expense_list.append(f'{date_expense}')\n\n item = list(zip_longest(date_list,date_expense_list,date_list, fillvalue=\"\"))\n\n # Matplotlib\n fig, ax = plt.subplots(figsize=(11, 5))\n ax.plot(date_list, [float(g) for g in date_expense_list], label=\"Expenses\")\n ax.legend()\n fig.suptitle('Expense pattern')\n\n patternpngImage = io.BytesIO()\n FigureCanvas(fig).print_png(patternpngImage)\n\n patternpngImageString = \"data:image/png;base64,\"\n patternpngImageString += base64.b64encode(patternpngImage.getvalue()).decode('utf8')\n\n\n return render_template('overview.html', normal='normal', title='Expenses',image=patternpngImageString,\n form=form, name=name, item=item)\n\n\[email protected]('/expense/<string:wkex_id>', methods=['GET','POST'])\n@login_required\ndef userexpenses(wkex_id):\n form = NewExpense()\n userids = current_user.id\n name = current_user.username\n\n # Queries\n items = db.session.query(UserExpense).filter(UserExpense.userid==userids, UserExpense.expense_date==wkex_id)\n\n todays = str(date.today())\n state=\"not\"\n if (wkex_id == todays) is True:\n state=\"today\"\n if (wkex_id > todays) is True:\n abort(404)\n\n # Forms\n if form.validate_on_submit():\n expenses = UserExpense(category=form.category.data, description=form.description.data,\n expense=form.expense.data, expensedate=current_user)\n db.session.add(expenses)\n db.session.commit()\n flash('Expense added!', 'success')\n return redirect(url_for('userexpenses', wkex_id=wkex_id))\n\n return render_template('expenses.html', normal='normal', title='Expenses',\n form=form, items=items, name=name, ids=wkex_id, state=state)\n\[email protected]('/expense/<string:wkex_id>/<int:ex_id>/delete', methods=['GET','POST'])\n@login_required\ndef delete_expense(wkex_id, ex_id):\n expenses = db.session.query(UserExpense).get_or_404(ex_id) # Query for valid access\n if expenses.expensedate != current_user:\n abort(403)\n db.session.delete(expenses)\n db.session.commit()\n flash('Expense deleted', 'success')\n return redirect(url_for('overview'))\n\[email protected](\"/expense/<string:wkex_id>/<int:ex_id>/update\", methods=['GET', 'POST'])\n@login_required\ndef update_expense(wkex_id, ex_id):\n name = current_user.username\n expenses = db.session.query(UserExpense).get_or_404(ex_id) # Query for valid access\n if expenses.expensedate != current_user:\n abort(403)\n form = NewExpense()\n\n if form.validate_on_submit():\n expenses.category = form.category.data\n expenses.description = form.description.data\n expenses.expense = form.expense.data\n db.session.commit()\n flash('Expense updated', 'success')\n return redirect(url_for('overview'))\n\n elif request.method=='GET':\n form.category.data = expenses.category\n form.description.data =expenses.description\n form.expense.data = expenses.expense\n return render_template('expenses.html', title='Expenses',form=form, name=name, wkex_id=wkex_id, state='today')\n\[email protected](\"/expense/<string:day_id>/charts\", methods=['GET', 'POST'])\n@login_required\ndef charts(day_id):\n userids = current_user.id\n name = current_user.username\n # Queries\n categories = db.session.query(UserExpense.category).filter(UserExpense.userid==userids,\n UserExpense.expense_date==day_id).distinct()\n cat_list=[]\n for u in categories:\n cat_list.append(f'{u.category}')\n\n counts_list=[]\n for item in cat_list:\n counts = db.session.query(UserExpense.category).filter(UserExpense.userid==userids,\n UserExpense.expense_date==day_id,\n UserExpense.category==item).count()\n counts_list.append(counts)\n\n sum_list=[]\n for item in cat_list:\n Sums = db.session.query(func.sum(UserExpense.expense)).filter(UserExpense.userid==userids,\n UserExpense.expense_date==day_id,\n UserExpense.category==item).scalar()\n sum_list.append(f'{Sums}')\n\n # Highest expenditure graph\n fig, axs = plt.subplots(figsize=(10, 5))\n axs.bar(cat_list, [float(g) for g in sum_list])\n fig.suptitle('Expenditure breakdown')\n\n # Frequency graph\n fig1, ax1 = plt.subplots(figsize=(10, 5), subplot_kw=dict(aspect=\"equal\"))\n\n wedges, texts = ax1.pie(counts_list, wedgeprops=dict(width=0.5), startangle=-40)\n\n bbox_props = dict(boxstyle=\"square,pad=0.3\", fc=\"w\", ec=\"k\", lw=0.72)\n kw = dict(arrowprops=dict(arrowstyle=\"-\"),\n bbox=bbox_props, zorder=0, va=\"top\")\n\n for i, p in enumerate(wedges):\n ang = (p.theta2 - p.theta1)/2. + p.theta1\n y = np.sin(np.deg2rad(ang))\n x = np.cos(np.deg2rad(ang))\n horizontalalignment = {-1: \"right\", 1: \"left\"}[int(np.sign(x))]\n connectionstyle = \"angle,angleA=0,angleB={}\".format(ang)\n kw[\"arrowprops\"].update({\"connectionstyle\": connectionstyle})\n ax1.annotate(cat_list[i], xy=(x, y), xytext=(1.35*np.sign(x), 1.4*y),\n horizontalalignment=horizontalalignment, **kw)\n\n ax1.set_title(\"Expenses category frequency\")\n\n # Convert plot to PNG image\n highpngImage = io.BytesIO()\n freqpngImage = io.BytesIO()\n FigureCanvas(fig).print_png(highpngImage)\n FigureCanvas(fig1).print_png(freqpngImage)\n\n # Encode PNG image to base64 string\n highpngImageString = \"data:image/png;base64,\"\n highpngImageString += base64.b64encode(highpngImage.getvalue()).decode('utf8')\n\n freqpngImageString = \"data:image/png;base64,\"\n freqpngImageString += base64.b64encode(freqpngImage.getvalue()).decode('utf8')\n\n return render_template('charts.html',title ='History', name=name,\n image1=highpngImageString, image2=freqpngImageString, day_id=day_id)\n\n if __name__ == '__main__':\n app.run()\n" ]
[ [ "matplotlib.backends.backend_agg.FigureCanvasAgg", "numpy.deg2rad", "matplotlib.pyplot.subplots", "numpy.sign" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
comp5331-Xtimeseries/mWDN
[ "3805f90230b93d04f86201079358ec1f6dd6bb2d" ]
[ "utils.py" ]
[ "import torch\nimport numpy as np;\nfrom torch.autograd import Variable\n\n\ndef normal_std(x):\n return x.std() * np.sqrt((len(x) - 1.)/(len(x)))\n\nclass Data_utility(object):\n # train and valid is the ratio of training set and validation set. test = 1 - train - valid\n def __init__(self, dSet, train, valid, cuda, horizon, window, normalize = 2):\n self.cuda = cuda;\n self.P = window;\n self.h = horizon\n self.rawdat = dSet\n self.dat = np.zeros(self.rawdat.shape);\n self.n, self.m = self.dat.shape;\n self.normalize = 2\n self.scale = np.ones(self.m);\n self._normalized(normalize);\n self._split(int(train * self.n), int((train+valid) * self.n), self.n);\n \n self.scale = torch.from_numpy(self.scale).float();\n tmp = self.test[1] * self.scale.expand(self.test[1].size(0), self.m);\n \n if self.cuda:\n self.scale = self.scale.cuda();\n self.scale = Variable(self.scale);\n \n self.rse = normal_std(tmp);\n self.rae = torch.mean(torch.abs(tmp - torch.mean(tmp)));\n \n def _normalized(self, normalize):\n #normalized by the maximum value of entire matrix.\n \n if (normalize == 0):\n self.dat = self.rawdat\n \n if (normalize == 1):\n self.dat = self.rawdat / np.max(self.rawdat);\n \n #normlized by the maximum value of each row(sensor).\n if (normalize == 2):\n for i in range(self.m):\n self.scale[i] = np.max(np.abs(self.rawdat[:,i]));\n self.dat[:,i] = self.rawdat[:,i] / np.max(np.abs(self.rawdat[:,i]));\n \n \n def _split(self, train, valid, test):\n \n train_set = range(self.P+self.h-1, train);\n valid_set = range(train, valid);\n test_set = range(valid, self.n);\n self.train = self._batchify(train_set, self.h);\n self.valid = self._batchify(valid_set, self.h);\n self.test = self._batchify(test_set, self.h);\n \n \n def _batchify(self, idx_set, horizon):\n \n n = len(idx_set);\n X = torch.zeros((n,self.P,self.m));\n Y = torch.zeros((n,self.m));\n \n for i in range(n):\n end = idx_set[i] - self.h + 1;\n start = end - self.P;\n X[i,:,:] = torch.from_numpy(self.dat[start:end, :]);\n Y[i,:] = torch.from_numpy(self.dat[idx_set[i], :]);\n\n return [X, Y];\n\n def get_batches(self, inputs, targets, batch_size, shuffle=True):\n length = len(inputs)\n if shuffle:\n index = torch.randperm(length)\n else:\n index = torch.LongTensor(range(length))\n start_idx = 0\n while (start_idx < length):\n end_idx = min(length, start_idx + batch_size)\n excerpt = index[start_idx:end_idx]\n X = inputs[excerpt]; Y = targets[excerpt];\n # if (self.cuda):\n # X = X.cuda();\n # Y = Y.cuda();\n yield Variable(X), Variable(Y);\n start_idx += batch_size\n" ]
[ [ "torch.mean", "numpy.abs", "torch.zeros", "torch.randperm", "torch.from_numpy", "numpy.ones", "numpy.max", "numpy.zeros", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
timmyzhao/ptstat
[ "0401203e5b6053df6d62b2af9ab4b831f1b41660" ]
[ "ptstat/dist/categorical.py" ]
[ "import torch\nfrom ptstat.core import RandomVariable, _to_v\n\n\nclass Categorical(RandomVariable):\n \"\"\"\n Categorical over 0,...,N-1 with arbitrary probabilities, 1-dimensional rv, long type.\n \"\"\"\n def __init__(self, p=None, p_min=1E-6, size=None, cuda=False):\n super(Categorical, self).__init__()\n if size:\n assert len(size) == 2, str(size)\n p = _to_v(1 / size[1], size, cuda)\n else:\n assert len(p.size()) == 2, str(p.size())\n assert torch.min(p.data) >= 0, str(torch.min(p.data))\n assert torch.max(torch.abs(torch.sum(p.data, 1) - 1)) <= 1E-5\n self._p = torch.clamp(p, p_min)\n\n def _size(self):\n return self._p.size()[0], 1 # Type is Long.\n\n def _log_pdf(self, x):\n return torch.log(self._p.gather(1, x)).squeeze()\n\n def _sample(self):\n return self._p.multinomial(1, True)\n\n def _entropy(self):\n return - torch.sum(self._p * torch.log(self._p), 1).squeeze()\n" ]
[ [ "torch.clamp", "torch.sum", "torch.min", "torch.log" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SX-Aurora/orchespy
[ "6b85a78831c8e3e05df7143101ca3418817fcbbd" ]
[ "tests/device_tests/test_device_args_numpy_module.py" ]
[ "from orchespy import device\nfrom orchespy.devicetype import CUDAGPU, Host, VE\nimport sys\nimport pytest\n\nimport numpy as np\n\nif \"cupy\" in sys.modules:\n import cupy as cp\nif \"nlcpy\" in sys.modules:\n import nlcpy as vp\n\nno_nlcpy = pytest.mark.skipif(\n \"nlcpy\" not in sys.modules, reason=' test require nlcpy. ')\nno_cupy = pytest.mark.skipif(\n \"cupy\" not in sys.modules, reason=' test require cupy. ')\n\n\n# for tests with an argument\n@device(Host, numpy_module_arg='xp')\ndef create_array_init_5_at_host(shape, dtype, order, xp):\n return xp.full(shape, 5, dtype=dtype, order=order)\n\n\n@device(CUDAGPU, numpy_module_arg='xp')\ndef create_array_init_5_at_gpu(shape, dtype, order, xp):\n return xp.full(shape, 5, dtype=dtype, order=order)\n\n\n@device(VE, numpy_module_arg='xp')\ndef create_array_init_5_at_ve(shape, dtype, order, xp):\n return xp.full(shape, 5, dtype=dtype, order=order)\n\n\[email protected]('shape', [(2), (2, 2), (2, 2, 2), (2, 3), (2, 3, 4)])\[email protected]('dtype', [\n 'i4', 'i8', 'u4', 'u8', 'f4', 'f8', 'c8', 'c16'\n ])\[email protected]('order', ['C', 'F'])\nclass TestDeviceArgs:\n def test_device_args_host(self, shape, dtype, order):\n y = create_array_init_5_at_host(shape, dtype, order)\n assert(isinstance(y, np.ndarray))\n expected = np.full(shape, 5, dtype=dtype, order=order)\n assert((y == expected).all())\n\n @no_cupy\n def test_device_args_gpu(self, shape, dtype, order):\n y = create_array_init_5_at_gpu(shape, dtype, order)\n assert(isinstance(y, cp.ndarray))\n expected = cp.full(shape, 5, dtype=dtype, order=order)\n assert((y == expected).all())\n\n @no_nlcpy\n def test_device_args_ve(self, shape, dtype, order):\n y = create_array_init_5_at_ve(shape, dtype, order)\n assert(isinstance(y, vp.ndarray))\n expected = vp.full(shape, 5, dtype=dtype, order=order)\n assert((y == expected).all())\n" ]
[ [ "numpy.full" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ncfrey/mlmsynth
[ "99fc8fabba511aefd6f0a0be4e85c78c54dd3648" ]
[ "pumml/learners.py" ]
[ "\"\"\"\nDeploy semi-supervised PU machine learning models.\n\nThis module provides classes for training, testing, and deploying a PU\nlearning model for predicting material synthesizability. Utility functions\nfor plotting aid in visualizing and analyzing results.\n\nReferences:\n [1] DOI: 10.1021/acsnano.8b08014\n [2] DOI: 10.1145/1401890.1401920\n [3] DOI: 10.1016/j.patrec.2013.06.010\n\"\"\"\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.cluster import KMeans\nfrom sklearn.mixture import GaussianMixture, BayesianGaussianMixture\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.model_selection import RepeatedKFold\nfrom sklearn.utils import resample\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom monty.serialization import dumpfn\n\nimport pandas as pd\nimport seaborn as sns\nimport os\nimport pickle\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pylab import rcParams\n\n__author__ = \"Nathan C. Frey, Jin Wang\"\n__copyright__ = \"MIT License\"\n__version__ = \"0.0.1\"\n__maintainer__ = \"Nathan C. Frey\"\n__email__ = \"[email protected]\"\n__status__ = \"Development\"\n__date__ = \"Aug 2017\"\n\n\nclass PULearner:\n def __init__(self):\n \"\"\"A machine learning model that predicts material synthesizability.\n\n Positive samples are experimentally synthesized materials. Unlabeled\n samples are not-yet synthesized materials.\n\n Features for training data might be generated by first-principles \n (density functional theory) calculations, or structural or chemical\n data looked up from a table.\n\n Hyperparameters are initialized with sensible defaults, but any newly\n trained model should have hyperparams carefully converged.\n\n Attributes:\n pu_stats (dict): Outputs of cv_baggingDT\n df_U (DataFrame): Unlabeled data.\n df_P (DataFrame): Positive data.\n\n synth_scores (list): Synthesizability scores (between 0 and 1) of\n unlabeled samples.\n labels (list): Likely synthesizable (1) or not (0)\n feat_importances (DataFrame): Feature importances from trained\n decision tree classifiers. Index corresponds to feature index\n in original data. \n\n \"\"\"\n\n self.pu_stats = None\n self.df_U = None\n self.df_P = None\n self.synth_scores = None\n self.labels = None\n self.feat_importances = None\n\n def cv_baggingDT(self, pu_data, splits=10, repeats=10, bags=100, filename=\"\"):\n \"\"\"\n Train bagged decision tree base classifiers and do repeated \n k-fold CV.\n\n Synthesizability scores (0 = not synthesizable, 1 = already\n synthesized) are generated for an unlabeled sample by averaging\n the scores from the ensemble of decision tree classifiers that\n have not been trained on that sample. \n\n Args:\n pu_data (json): A file where each row describes a material.\n There MUST be a column called \"PU_label\" where a 1 value\n indicates a synthesized (positive) compound and a 0 value\n indicates an unlabeled compound.\n\n splits (int): Number of splits in k-fold CV.\n repeats (int): Number of repeated k-fold CV.\n bags (int): Number of bags in bootstrap aggregation.\n filename (string): Save model training results to file with\n filename ending in .json or .pkl.\n\n Returns:\n pu_stats (dict): Metrics and outputs of PU learning model\n training.\n\n \"\"\"\n \n print(\"Start PU Learning.\")\n\n # Preprocess data and set attributes\n df = pd.read_json(pu_data)\n df_P, df_U, X_P, X_U = self._process_pu_data(df)\n self.df_P = df_P\n self.df_U = df_U\n\n # Split data into training and test splits for k-fold CV\n kfold = RepeatedKFold(n_splits=splits, n_repeats=repeats, random_state=42)\n\n # Scores for PU learning (tpr = True Positive Rate)\n scores = []\n tprs = []\n\n # Predicted synthesis probability of CVed P and U sets\n prob_P = np.ones(shape=(X_P.shape[0], splits * repeats))\n prob_U = -np.ones(shape=(X_U.shape[0], splits * repeats))\n\n # Feature importance\n feat_rank = np.zeros(shape=(X_P.shape[1], splits * repeats))\n\n idsp = 0 # index of repeated k splits\n\n # Loop over P and U training/test samples\n for (ptrain, ptest), (utrain, utest) in zip(kfold.split(X_P), kfold.split(X_U)):\n\n # Number of P and U training samples\n N_ptrain = X_P[ptrain].shape[0]\n N_utrain = X_U[utrain].shape[0]\n\n d = X_P.shape[1]\n K = N_ptrain\n train_label = np.zeros(shape=(N_ptrain + K,))\n train_label[:N_ptrain] = 1.0 # Synthesized (positive)\n\n # Out of bag samples\n n_oob = np.zeros(shape=(N_utrain,))\n f_oob = np.zeros(shape=(N_utrain, 2))\n\n # Sums of probabilities of test sets\n f_ptest = np.zeros(shape=(X_P[ptest].shape[0], 2))\n f_utest = np.zeros(shape=(X_U[utest].shape[0], 2))\n\n # Bootstrap resampling for each bag\n for i in range(bags):\n bootstrap_sample = np.random.choice(\n np.arange(N_utrain), replace=True, size=K\n )\n\n # Positive samples and bootstrapped unlabeled samples\n data_bootstrap = np.concatenate(\n (X_P[ptrain], X_U[bootstrap_sample, :]), axis=0\n )\n\n # Train decision tree classifier\n model = DecisionTreeClassifier(\n max_depth=None,\n max_features=None,\n criterion=\"gini\",\n class_weight=\"balanced\",\n )\n\n model.fit(data_bootstrap, train_label)\n\n # Index for the oob samples\n idx_oob = sorted(\n set(range(N_utrain)) - set(np.unique(bootstrap_sample))\n )\n\n # Transductive learning on oob samples\n f_oob[idx_oob] += model.predict_proba(X_U[utrain][idx_oob])\n n_oob[idx_oob] += 1\n f_ptest += model.predict_proba(X_P[ptest])\n f_utest += model.predict_proba(X_U[utest])\n feat_rank[:, idsp] = model.feature_importances_\n\n # Predicted synthesis probabilities of unlabeled samples\n predict_utrain = f_oob[:, 1] / n_oob\n\n # Predicted probabilities for P and U test sets\n predict_ptest = f_ptest[:, 1] / bags\n predict_utest = f_utest[:, 1] / bags\n\n # Find predicted positives\n true_pos = predict_ptest[np.where(predict_ptest > 0.5)].shape[0]\n u_pos = predict_utest[np.where(predict_utest > 0.5)].shape[0]\n\n N_ptest = X_P[ptest].shape[0]\n N_utest = X_U[utest].shape[0]\n\n # Predicted positive ratio in test set\n p_pred_pos = (true_pos + u_pos) / (N_ptest + N_utest) + 0.0001\n\n # Compute PU recall (TPR) and score metrics\n recall = true_pos / N_ptest\n score = recall ** 2 / p_pred_pos\n scores.append(score)\n tprs.append(recall)\n\n # Predicted probabilities\n prob_P[ptest, idsp] = predict_ptest\n prob_U[utrain, idsp] = predict_utrain\n prob_U[utest, idsp] = predict_utest\n idsp += 1\n\n # Progress update\n if (idsp + 1) % splits == 0:\n tpr_tmp = np.asarray(tprs[-splits - 1 : -1])\n print(\n \"Performed Repeated \"\n + str(splits)\n + \"-fold: \"\n + str(idsp // splits + 1)\n + \" out of \"\n + str(repeats)\n )\n print(\n \"True Positive Rate: %0.2f (+/- %0.2f)\"\n % (tpr_tmp.mean(), tpr_tmp.std() * 2)\n )\n\n # Predicted labels from k-fold CV\n label_U = np.zeros(shape=(X_U.shape[0], splits * repeats + 1), dtype=int)\n label_U[:, : splits * repeats][np.where(prob_U > 0.5)] = 1\n label_U[:, splits * repeats] = np.sum(\n label_U[:, : splits * repeats + 1], axis=1\n )\n\n tprs = np.asarray(tprs)\n scores = np.asarray(scores)\n\n # Metrics for each model in the k-folds\n label_U_rp = np.zeros(shape=(X_U.shape[0], repeats), dtype=int)\n prob_U_rp = np.zeros(shape=(X_U.shape[0], repeats))\n feat_rank_rp = np.zeros(shape=(X_U.shape[1], repeats))\n tpr_rp = np.zeros(shape=(repeats,))\n scores_rp = np.zeros(shape=(repeats,))\n labels = np.zeros(shape=(X_U.shape[0],))\n\n for i in range(repeats):\n prob_U_rp[:, i] = prob_U[:, i * splits : (i + 1) * splits].mean(axis=1)\n feat_rank_rp[:, i] = feat_rank[:, i * splits : (i + 1) * splits].mean(\n axis=1\n )\n tpr_rp[i] = tprs[i * splits : (i + 1) * splits].mean()\n scores_rp[i] = scores[i * splits : (i + 1) * splits].mean()\n\n label_U_rp[np.where(prob_U_rp > 0.5)] = 1\n prob = prob_U_rp.mean(axis=1)\n labels[np.where(prob > 0.5)] = 1\n\n # Get confidence interval of TPR for each kfold\n tpr_low, tpr_up = self.bootstrapCI(tpr_rp)\n scores_low, scores_up = self.bootstrapCI(scores_rp)\n\n # PU learning metrics\n metrics = np.asarray(\n [tpr_rp.mean(), tpr_low, tpr_up, scores_rp.mean(), scores_low, scores_up]\n )\n\n print(\"Accuracy: %0.2f\" % (tpr_rp.mean()))\n print(\"95%% confidence interval: [%0.2f, %0.2f]\" % (tpr_low, tpr_up))\n\n # Metrics and results from training / testing\n pu_stats = {\n \"prob\": prob,\n \"labels\": labels,\n \"metrics\": metrics,\n \"prob_rp\": prob_U_rp,\n \"label_rp\": label_U_rp,\n \"tpr_rp\": tpr_rp,\n \"scores_rp\": scores_rp,\n \"feat_rank_rp\": feat_rank_rp,\n }\n\n # Save results\n if filename:\n if filename.endswith(\".json\"):\n dumpfn(pu_stats, filename)\n if filename.endswith(\".pkl\"):\n with open(filename, \"wb\") as file:\n pickle.dump(pu_stats, file, protocol=pickle.HIGHEST_PROTOCOL)\n\n self.pu_stats = pu_stats\n return pu_stats\n\n def bootstrapCI(self, data, ci=95, ns=10000):\n \"\"\"Compute confidence interval of the TPR.\n\n Args:\n data (array): Array of TPRs for each kfold.\n ci (int): Confidence interval.\n ns (int): Number of bootstrap resamplings.\n\n Returns:\n lower (float): Lower endpoint of CI.\n upper (float): Upper endpoint of CI.\n \n \"\"\"\n\n bs_rsample = []\n for _ in range(ns):\n rsample = resample(data, n_samples=len(data))\n bs_rsample.append(np.mean(rsample))\n\n bs_rsample = np.asarray(bs_rsample)\n lower = np.percentile(bs_rsample, (100 - ci) / 2)\n upper = np.percentile(bs_rsample, ci + (100 - ci) / 2)\n\n return lower, upper\n\n def corr_heatmap(self, num_feats=10, fname=\"\"):\n \"\"\"Plot correlation matrix between synthesizability and features.\n\n cv_baggingDT must be run first.\n\n Args:\n num_feats (int): How many features to consider.\n fname (str): Filename if correlation plot should be saved.\n\n Returns:\n None (generates plots)\n\n \"\"\"\n\n pu_stats = self.pu_stats\n df_U = self.df_U\n df_U_copy = df_U.drop(columns=[\"PU_label\"])\n\n # Get normalized, sorted & ranked list of most important features\n synth_scores = pu_stats[\"prob\"]\n df_U_copy[\"synth_score\"] = synth_scores\n\n # Make correlation matrix of top \"num_feats\" features\n corrmat = df_U_copy.corr()\n cols = corrmat.nlargest(num_feats, \"synth_score\")[\"synth_score\"].index\n cm = np.corrcoef(df_U_copy[cols].values.T)\n\n sns.set(style='ticks')\n rcParams['figure.dpi'] = 300\n\n fig, ax = plt.subplots(1, 1)\n hm = sns.heatmap(\n cm,\n ax=ax,\n cbar=True,\n annot=True,\n square=True,\n fmt=\".2f\",\n annot_kws={\"size\": 7},\n yticklabels=cols.values,\n xticklabels=cols.values,\n )\n\n if fname:\n self.save_plot(fname + \".png\", fig, ax)\n\n def get_feat_importances(self, plot_format=\"\"):\n \"\"\"Process output from PU learning k-fold cross validation.\n\n cv_baggingDT must be run first.\n\n If plot_format is specified, a feature importance plot will\n be saved.\n\n Args:\n plot_format (str): svg, png, or pdf file format for saving simple\n visualizations of feature importance and correlation. \n\n \"\"\"\n\n pu_stats = self.pu_stats\n\n # Feature importances for individual repetitions of kfold CV\n feat_rank_rp = pu_stats[\"feat_rank_rp\"]\n feat_importances = np.sum(feat_rank_rp, axis=1)\n\n df_U = self.df_U\n df_U = df_U._get_numeric_data()\n df_U_copy = df_U.drop(columns=[\"PU_label\"])\n feat_names = df_U_copy.columns\n\n # Index corresponds to feature in original data\n df_feat = pd.DataFrame(columns=[\"feature\", \"importance\"])\n df_feat[\"feature\"] = feat_names\n df_feat[\"importance\"] = feat_importances\n\n # Sort by importance\n df_feat_sort = df_feat.sort_values(by=\"importance\", ascending=False)\n max_value = df_feat[\"importance\"].max()\n\n # Normalize to 1\n df_feat_sort[\"importance\"] = df_feat_sort[\"importance\"] / max_value\n\n # Set feature importance attribute\n self.feat_importances = df_feat\n\n if plot_format in [\"svg\", \"pdf\", \"png\"]:\n\n # Feature importance plot\n fig, ax = plt.subplots(figsize=(10, 4))\n with sns.axes_style(style=\"ticks\"):\n sns.barplot(x=\"feature\", y=\"importance\", data=df_feat_sort)\n ax.set_xticklabels(\n ax.get_xticklabels(), rotation=45, ha=\"right\", fontsize=7\n )\n filename = \"feat_importance.\" + plot_format\n self.save_plot(filename, fig, ax)\n\n @staticmethod\n def _process_pu_data(data):\n \"\"\"Utility method for processing input data.\n\n Args:\n data (DataFrame): Data with positive and unlabeled samples.\n\n Returns:\n X_P (array): Positive sample set.\n X_U (array): Unlabeled sample set.\n\n \"\"\"\n\n df_P = data.query(\"PU_label == 1\") # Positive value is 1\n df_U = data.query(\"PU_label == 0\") # Unlabeled value is 0\n\n # Chop off PU label and drop non-numeric columns for sklearn\n X_P = np.asarray(df_P.drop(columns=[\"PU_label\"])._get_numeric_data())\n X_U = np.asarray(df_U.drop(columns=[\"PU_label\"])._get_numeric_data())\n\n return df_P, df_U, X_P, X_U\n\n @staticmethod\n def save_plot(filename, fig, ax):\n \"\"\"Utility method for saving simple visualizations.\n\n Args:\n filename (str): Name ending in .svg, .png, or .pdf\n fig, ax (objects): Matplotlib objects.\n\n Returns:\n None\n\n \"\"\"\n\n sns.set_style(\"ticks\")\n fig.tight_layout()\n fig.savefig(filename)\n\n\nclass PUInteract:\n def __init__(self, df_parent, pu_parent, df_child, pu_child, merge_on=(), feats=()):\n \"\"\"Consider parent and child phase PU learning scores.\n\n This class looks at PU learning scores for parent bulk\n compounds (e.g. layered h-BN) and scores of the child phases\n along with descriptors like exfoliation energy and changes\n in structural/electronic properties to predict (parent, child)\n pairs that can be synthesized.\n\n Parent and child must be linked by a column that allows the\n dataframes to be merged. There should also be additional features\n that characterize the structural and chemical differences between\n parents and children, e.g. changes in bond lengths, etc.\n\n Unsupervised clustering models are used to identify synthesizable \n (parent/child) pairs.\n\n Args:\n df_parent (str): Parent data filename.\n pu_parent (dict): Output from PULearner.cv_baggingDT.\n df_child (str): Child data filename.\n pu_child (dict): Output from PULearner.cv_baggingDT.\n merge_on (tuple): Column name(s) on which to merge.\n feats (tuple): Column names to use as features. If empty, use all\n possible columns. \n\n Attributes:\n merged_df (DataFrame): (Parent, child) pair data.\n X (array): Array representation of merged_df.\n\n Returns:\n None\n\n \"\"\"\n\n df_parent = pd.read_json(df_parent)\n df_child = pd.read_json(df_child)\n\n # Set scores from PULearner\n df_parent[\"synth_score\"] = 1\n df_child[\"synth_score\"] = 1\n\n df_parent.loc[df_parent.eval(\"PU_label == 0\"), \"synth_score\"] = pu_parent[\n \"prob\"\n ]\n df_child.loc[df_child.eval(\"PU_label == 0\"), \"synth_score\"] = pu_child[\"prob\"]\n\n # Merge parent and child dfs\n merge_on = list(merge_on)\n df = pd.merge(\n df_parent, df_child, on=merge_on, how=\"outer\", suffixes=[\"_p\", \"_c\"]\n )\n df.drop(columns=[\"PU_label_p\", \"PU_label_c\"], inplace=True, axis=1)\n\n if feats:\n feat_names = [f + \"_p\" for f in feats] + [f + \"_c\" for f in feats]\n df = df[feat_names]\n\n self.merged_df = df\n self.X = np.array(df)\n\n def do_kmeans(self, n_clusters=2, seed=42):\n \"\"\"Do k-means clustering on (parent, child) pairs.\n\n Args:\n n_clusters (int): Number of clusters.\n seed (int): Fix random seed for kmeans reproducibility.\n\n Returns:\n kmeans_output (dict): kmeans cluster centers, cluster labels for\n each (parent, child)\n\n \"\"\"\n\n np.random.seed(seed)\n km = KMeans(n_clusters=n_clusters, random_state=seed)\n\n km.fit(self.X)\n kmeans_output = {\n \"cluster_centers\": km.cluster_centers_,\n \"cluster_labels\": km.labels_,\n }\n\n return kmeans_output\n\n def do_gmixture(self, n_components=2, seed=42):\n \"\"\"\n Estimate parameters of a Gaussian mixture distribution of (parent,\n child) data.\n\n Args:\n n_components (int): Number of components in GMM.\n seed (int): Random seed.\n\n Returns:\n gmm_output (dict): Predicted labels of (parent, child) pairs and\n predicted posterior probabilities of each component.\n\n \"\"\"\n\n np.random.seed(seed)\n gmm = GaussianMixture(\n n_components=n_components, random_state=seed, covariance_type=\"full\"\n )\n\n gmm.fit(self.X)\n gmm_labels = gmm.predict(self.X)\n gmm_prob = gmm.predict_proba(self.X)[:, 0]\n gmm_output = {\"gmm_labels\": gmm_labels, \"gmm_prob\": gmm_prob}\n\n return gmm_output\n\n def do_bgm(self, n_components=6, seed=42):\n \"\"\"Bayesian Gaussian Mixture.\n\n Infer the effective number of components in a Gaussian Mixture Model\n via variational Bayesian estimation.\n\n n_effective_componenents < n_components if the model sets some\n weights close to 0.\n\n Args:\n n_components (int): Number of components in GMM.\n seed (int): Random seed.\n\n Returns:\n bgm_output (dict): Labels and probabilities.\n\n \"\"\"\n\n np.random.seed(seed)\n bgm = BayesianGaussianMixture(\n n_components=n_components,\n covariance_type=\"full\",\n weight_concentration_prior=1e-2,\n weight_concentration_prior_type=\"dirichlet_process\",\n mean_precision_prior=1e-2,\n init_params=\"random\",\n max_iter=100,\n random_state=seed,\n )\n\n bgm.fit(self.X)\n bgm_labels = bgm.predict(self.X)\n bgm_prob = bgm.predict_proba(self.X)[:, 0]\n\n bgm_output = {\"bgm_labels\": bgm_labels, \"bgm_prob\": bgm_prob}\n\n return bgm_output\n" ]
[ [ "pandas.merge", "sklearn.cluster.KMeans", "numpy.asarray", "pandas.DataFrame", "numpy.concatenate", "sklearn.tree.DecisionTreeClassifier", "numpy.mean", "numpy.where", "numpy.unique", "numpy.arange", "numpy.zeros", "pandas.read_json", "numpy.corrcoef", "sklearn.mixture.GaussianMixture", "numpy.array", "numpy.sum", "sklearn.model_selection.RepeatedKFold", "numpy.random.seed", "matplotlib.pyplot.subplots", "numpy.percentile", "numpy.ones", "sklearn.mixture.BayesianGaussianMixture" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
CitrineInformatics/smlb
[ "28a3689bd36aa8d51031b4faf7e2331bbd8148a9", "28a3689bd36aa8d51031b4faf7e2331bbd8148a9" ]
[ "tests/learners/scikit_learn/test_gpr_skl.py", "smlb/feature_selection/select_percentile_sklearn.py" ]
[ "\"\"\"GaussianProcessRegressionSklearn tests.\n\nScientific Machine Learning Benchmark:\nA benchmark of regression models in chem- and materials informatics.\n\"\"\"\n\nimport pytest\n\nimport numpy as np\n\nskl = pytest.importorskip(\"sklearn\")\n\nimport smlb\nfrom smlb.learners.scikit_learn.gaussian_process_regression_sklearn import GaussianProcessRegressionSklearn\n\n\ndef test_GaussianProcessRegressionSklearn_1():\n \"\"\"Simple examples.\"\"\"\n\n # linear function with linear kernel\n kernel = skl.gaussian_process.kernels.DotProduct(sigma_0=0, sigma_0_bounds=\"fixed\")\n gpr = GaussianProcessRegressionSklearn(kernel=kernel, optimizer=None, rng=1)\n train_data = smlb.TabularData(data=np.array([[-1], [1]]), labels=np.array([-1, 1]))\n valid_data = smlb.TabularData(data=np.array([[-2], [-1], [0], [1], [2]]))\n preds = gpr.fit(train_data).apply(valid_data)\n mean, stddev = preds.mean, preds.stddev\n\n assert np.allclose(mean, [-2, -1, 0, 1, 2])\n assert stddev[0] > stddev[1] > stddev[2] < stddev[3] < stddev[4]\n\n\ndef test_GaussianProcessRegressionSklearn_2():\n \"\"\"All predictive distributions.\n\n Linear noise-free function, linear kernel + white noise kernel.\n The optimized noise level is expected to go to its lower bound.\n \"\"\"\n\n kernel = skl.gaussian_process.kernels.DotProduct(\n sigma_0=0, sigma_0_bounds=\"fixed\"\n ) + skl.gaussian_process.kernels.WhiteKernel(noise_level=0.1, noise_level_bounds=(1e-5, 1e-5))\n gpr = GaussianProcessRegressionSklearn(kernel=kernel, rng=1)\n n = 100\n train_data = smlb.TabularData(\n data=np.ones(shape=(n, 1)) * 2, labels=np.ones(shape=n) * 3\n )\n valid_data = smlb.TabularData(data=train_data.samples())\n preds = gpr.fit(train_data).apply(valid_data)\n\n assert preds.has_signal_part and preds.has_noise_part\n conf, noise = preds.signal_part, preds.noise_part\n\n assert np.allclose(conf.mean, train_data.labels())\n assert np.allclose(conf.stddev, np.ones(n) * np.sqrt(1e-5), atol=1e-3)\n\n assert (preds.mean == conf.mean).all()\n assert np.allclose(preds.stddev, np.ones(n) * np.sqrt(np.square(conf.stddev) + 1e-5))\n\n assert np.allclose(noise.mean, np.zeros(shape=n))\n assert np.allclose(noise.stddev, np.sqrt(1e-5))\n\n\ndef test_GaussianProcessRegressionSklearn_3():\n \"\"\"All predictive distributions.\n\n Linear noisy function, linear kernel + white noise kernel.\n The optimized noise level is expected to go to its true value.\n \"\"\"\n\n kernel = skl.gaussian_process.kernels.DotProduct(\n sigma_0=0, sigma_0_bounds=\"fixed\"\n ) + skl.gaussian_process.kernels.WhiteKernel(noise_level=1, noise_level_bounds=(1e-5, 1e5))\n gpr = GaussianProcessRegressionSklearn(kernel=kernel, rng=1)\n n, nlsd = 100, 0.5\n data = smlb.TabularData(data=np.ones(shape=(n, 1)) * 2, labels=np.ones(shape=n) * 3)\n data = smlb.LabelNoise(noise=smlb.NormalNoise(stddev=nlsd, rng=1)).fit(data).apply(data)\n preds = gpr.fit(data).apply(data)\n\n assert preds.has_signal_part and preds.has_noise_part\n conf, noise = preds.signal_part, preds.noise_part\n\n assert np.allclose(conf.mean, np.ones(n) * 3, atol=1e-1)\n assert np.allclose(conf.stddev, np.ones(n) * nlsd, atol=1e-1)\n\n assert (preds.mean == conf.mean).all()\n assert np.allclose(preds.stddev, np.sqrt(np.square(conf.stddev) + np.square(nlsd)), atol=1e-1)\n\n assert np.allclose(noise.mean, np.zeros(shape=n))\n assert np.allclose(noise.stddev, nlsd, atol=1e-1)\n", "from typing import Callable\n\nfrom sklearn.feature_selection import SelectPercentile\n\nfrom smlb import params\nfrom smlb.feature_selection.feature_selector_sklearn import FeatureSelectorSklearn\n\n\nclass SelectPercentileSklearn(FeatureSelectorSklearn):\n \"\"\"Select features based on percentile of highest scores, scikit-learn implementation.\n\n .. seealso::\n See `here <https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.SelectPercentile.html#sklearn.feature_selection.SelectPercentile>`_ for full documentation.\n \"\"\"\n\n def __init__(self, score_func: Callable, percentile: int = 10, *args, **kwargs):\n \"\"\"Initialize State.\n\n Parameters:\n score_func: Function that takes two arrays X and y and returns a pair of arrays (scores, pvalues) or a single array with scores.\n percentile: Percent of features to keep. Default is 10.\n \"\"\"\n score_func = params.callable(score_func, num_pos_or_kw=2)\n percentile = params.integer(percentile, from_=0, to=100)\n selector = SelectPercentile(score_func=score_func, percentile=percentile)\n super().__init__(selector=selector, *args, **kwargs)\n" ]
[ [ "numpy.square", "numpy.sqrt", "numpy.allclose", "numpy.ones", "numpy.array", "numpy.zeros" ], [ "sklearn.feature_selection.SelectPercentile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lmarti/pandas
[ "fdfd66cdf3f357fb52831eb644897e144a0d7f30" ]
[ "pandas/core/frame.py" ]
[ "\"\"\"\nDataFrame\n---------\nAn efficient 2D container for potentially mixed-type time series or other\nlabeled data series.\n\nSimilar to its R counterpart, data.frame, except providing automatic data\nalignment and a host of useful data manipulation methods having to do with the\nlabeling information\n\"\"\"\nfrom __future__ import division\n# pylint: disable=E1101,E1103\n# pylint: disable=W0212,W0231,W0703,W0622\n\nimport functools\nimport collections\nimport itertools\nimport sys\nimport types\nimport warnings\n\nfrom numpy import nan as NA\nimport numpy as np\nimport numpy.ma as ma\n\nfrom pandas.core.common import (isnull, notnull, PandasError, _try_sort,\n _default_index, _maybe_upcast, is_sequence,\n _infer_dtype_from_scalar, _values_from_object,\n is_list_like, _get_dtype, _maybe_box_datetimelike,\n is_categorical_dtype, is_object_dtype, _possibly_infer_to_datetimelike)\nfrom pandas.core.generic import NDFrame, _shared_docs\nfrom pandas.core.index import Index, MultiIndex, _ensure_index\nfrom pandas.core.indexing import (maybe_droplevels,\n convert_to_index_sliceable,\n check_bool_indexer)\nfrom pandas.core.internals import (BlockManager,\n create_block_manager_from_arrays,\n create_block_manager_from_blocks)\nfrom pandas.core.series import Series\nfrom pandas.core.categorical import Categorical\nimport pandas.computation.expressions as expressions\nfrom pandas.computation.eval import eval as _eval\nfrom numpy import percentile as _quantile\nfrom pandas.compat import(range, zip, lrange, lmap, lzip, StringIO, u,\n OrderedDict, raise_with_traceback)\nfrom pandas import compat\nfrom pandas.sparse.array import SparseArray\nfrom pandas.util.decorators import deprecate, Appender, Substitution, \\\n deprecate_kwarg\n\nfrom pandas.tseries.period import PeriodIndex\nfrom pandas.tseries.index import DatetimeIndex\n\nimport pandas.core.algorithms as algos\nimport pandas.core.common as com\nimport pandas.core.format as fmt\nimport pandas.core.nanops as nanops\nimport pandas.core.ops as ops\n\nimport pandas.lib as lib\nimport pandas.algos as _algos\n\nfrom pandas.core.config import get_option\n\n#----------------------------------------------------------------------\n# Docstring templates\n\n_shared_doc_kwargs = dict(axes='index, columns', klass='DataFrame',\n axes_single_arg=\"{0,1,'index','columns'}\")\n\n_numeric_only_doc = \"\"\"numeric_only : boolean, default None\n Include only float, int, boolean data. If None, will attempt to use\n everything, then use only numeric data\n\"\"\"\n\n_merge_doc = \"\"\"\nMerge DataFrame objects by performing a database-style join operation by\ncolumns or indexes.\n\nIf joining columns on columns, the DataFrame indexes *will be\nignored*. Otherwise if joining indexes on indexes or indexes on a column or\ncolumns, the index will be passed on.\n\nParameters\n----------%s\nright : DataFrame\nhow : {'left', 'right', 'outer', 'inner'}, default 'inner'\n * left: use only keys from left frame (SQL: left outer join)\n * right: use only keys from right frame (SQL: right outer join)\n * outer: use union of keys from both frames (SQL: full outer join)\n * inner: use intersection of keys from both frames (SQL: inner join)\non : label or list\n Field names to join on. Must be found in both DataFrames. If on is\n None and not merging on indexes, then it merges on the intersection of\n the columns by default.\nleft_on : label or list, or array-like\n Field names to join on in left DataFrame. Can be a vector or list of\n vectors of the length of the DataFrame to use a particular vector as\n the join key instead of columns\nright_on : label or list, or array-like\n Field names to join on in right DataFrame or vector/list of vectors per\n left_on docs\nleft_index : boolean, default False\n Use the index from the left DataFrame as the join key(s). If it is a\n MultiIndex, the number of keys in the other DataFrame (either the index\n or a number of columns) must match the number of levels\nright_index : boolean, default False\n Use the index from the right DataFrame as the join key. Same caveats as\n left_index\nsort : boolean, default False\n Sort the join keys lexicographically in the result DataFrame\nsuffixes : 2-length sequence (tuple, list, ...)\n Suffix to apply to overlapping column names in the left and right\n side, respectively\ncopy : boolean, default True\n If False, do not copy data unnecessarily\n\nExamples\n--------\n\n>>> A >>> B\n lkey value rkey value\n0 foo 1 0 foo 5\n1 bar 2 1 bar 6\n2 baz 3 2 qux 7\n3 foo 4 3 bar 8\n\n>>> merge(A, B, left_on='lkey', right_on='rkey', how='outer')\n lkey value_x rkey value_y\n0 foo 1 foo 5\n1 foo 4 foo 5\n2 bar 2 bar 6\n3 bar 2 bar 8\n4 baz 3 NaN NaN\n5 NaN NaN qux 7\n\nReturns\n-------\nmerged : DataFrame\n The output type will the be same as 'left', if it is a subclass\n of DataFrame.\n\"\"\"\n\n#----------------------------------------------------------------------\n# DataFrame class\n\n\nclass DataFrame(NDFrame):\n\n \"\"\" Two-dimensional size-mutable, potentially heterogeneous tabular data\n structure with labeled axes (rows and columns). Arithmetic operations\n align on both row and column labels. Can be thought of as a dict-like\n container for Series objects. The primary pandas data structure\n\n Parameters\n ----------\n data : numpy ndarray (structured or homogeneous), dict, or DataFrame\n Dict can contain Series, arrays, constants, or list-like objects\n index : Index or array-like\n Index to use for resulting frame. Will default to np.arange(n) if\n no indexing information part of input data and no index provided\n columns : Index or array-like\n Column labels to use for resulting frame. Will default to\n np.arange(n) if no column labels are provided\n dtype : dtype, default None\n Data type to force, otherwise infer\n copy : boolean, default False\n Copy data from inputs. Only affects DataFrame / 2d ndarray input\n\n Examples\n --------\n >>> d = {'col1': ts1, 'col2': ts2}\n >>> df = DataFrame(data=d, index=index)\n >>> df2 = DataFrame(np.random.randn(10, 5))\n >>> df3 = DataFrame(np.random.randn(10, 5),\n ... columns=['a', 'b', 'c', 'd', 'e'])\n\n See also\n --------\n DataFrame.from_records : constructor from tuples, also record arrays\n DataFrame.from_dict : from dicts of Series, arrays, or dicts\n DataFrame.from_csv : from CSV files\n DataFrame.from_items : from sequence of (key, value) pairs\n pandas.read_csv, pandas.read_table, pandas.read_clipboard\n \"\"\"\n _auto_consolidate = True\n\n @property\n def _constructor(self):\n return DataFrame\n\n _constructor_sliced = Series\n\n def __init__(self, data=None, index=None, columns=None, dtype=None,\n copy=False):\n if data is None:\n data = {}\n if dtype is not None:\n dtype = self._validate_dtype(dtype)\n\n if isinstance(data, DataFrame):\n data = data._data\n\n if isinstance(data, BlockManager):\n mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),\n dtype=dtype, copy=copy)\n elif isinstance(data, dict):\n mgr = self._init_dict(data, index, columns, dtype=dtype)\n elif isinstance(data, ma.MaskedArray):\n import numpy.ma.mrecords as mrecords\n # masked recarray\n if isinstance(data, mrecords.MaskedRecords):\n mgr = _masked_rec_array_to_mgr(data, index, columns, dtype,\n copy)\n\n # a masked array\n else:\n mask = ma.getmaskarray(data)\n if mask.any():\n data, fill_value = _maybe_upcast(data, copy=True)\n data[mask] = fill_value\n else:\n data = data.copy()\n mgr = self._init_ndarray(data, index, columns, dtype=dtype,\n copy=copy)\n\n elif isinstance(data, (np.ndarray, Series, Index)):\n if data.dtype.names:\n data_columns = list(data.dtype.names)\n data = dict((k, data[k]) for k in data_columns)\n if columns is None:\n columns = data_columns\n mgr = self._init_dict(data, index, columns, dtype=dtype)\n elif getattr(data, 'name', None):\n mgr = self._init_dict({data.name: data}, index, columns,\n dtype=dtype)\n else:\n mgr = self._init_ndarray(data, index, columns, dtype=dtype,\n copy=copy)\n elif isinstance(data, (list, types.GeneratorType)):\n if isinstance(data, types.GeneratorType):\n data = list(data)\n if len(data) > 0:\n if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:\n arrays, columns = _to_arrays(data, columns, dtype=dtype)\n columns = _ensure_index(columns)\n\n # set the index\n if index is None:\n if isinstance(data[0], Series):\n index = _get_names_from_index(data)\n elif isinstance(data[0], Categorical):\n index = _default_index(len(data[0]))\n else:\n index = _default_index(len(data))\n\n mgr = _arrays_to_mgr(arrays, columns, index, columns,\n dtype=dtype)\n else:\n mgr = self._init_ndarray(data, index, columns, dtype=dtype,\n copy=copy)\n else:\n mgr = self._init_ndarray(data, index, columns, dtype=dtype,\n copy=copy)\n elif isinstance(data, collections.Iterator):\n raise TypeError(\"data argument can't be an iterator\")\n else:\n try:\n arr = np.array(data, dtype=dtype, copy=copy)\n except (ValueError, TypeError) as e:\n exc = TypeError('DataFrame constructor called with '\n 'incompatible data and dtype: %s' % e)\n raise_with_traceback(exc)\n\n if arr.ndim == 0 and index is not None and columns is not None:\n if isinstance(data, compat.string_types) and dtype is None:\n dtype = np.object_\n if dtype is None:\n dtype, data = _infer_dtype_from_scalar(data)\n\n values = np.empty((len(index), len(columns)), dtype=dtype)\n values.fill(data)\n mgr = self._init_ndarray(values, index, columns, dtype=dtype,\n copy=False)\n else:\n raise PandasError('DataFrame constructor not properly called!')\n\n NDFrame.__init__(self, mgr, fastpath=True)\n\n def _init_dict(self, data, index, columns, dtype=None):\n \"\"\"\n Segregate Series based on type and coerce into matrices.\n Needs to handle a lot of exceptional cases.\n \"\"\"\n if columns is not None:\n columns = _ensure_index(columns)\n\n # prefilter if columns passed\n\n data = dict((k, v) for k, v in compat.iteritems(data)\n if k in columns)\n\n if index is None:\n index = extract_index(list(data.values()))\n else:\n index = _ensure_index(index)\n\n arrays = []\n data_names = []\n for k in columns:\n if k not in data:\n # no obvious \"empty\" int column\n if dtype is not None and issubclass(dtype.type,\n np.integer):\n continue\n\n if dtype is None:\n # 1783\n v = np.empty(len(index), dtype=object)\n else:\n v = np.empty(len(index), dtype=dtype)\n\n v.fill(NA)\n else:\n v = data[k]\n data_names.append(k)\n arrays.append(v)\n else:\n keys = list(data.keys())\n if not isinstance(data, OrderedDict):\n keys = _try_sort(keys)\n columns = data_names = Index(keys)\n arrays = [data[k] for k in keys]\n\n return _arrays_to_mgr(arrays, data_names, index, columns,\n dtype=dtype)\n\n def _init_ndarray(self, values, index, columns, dtype=None,\n copy=False):\n # input must be a ndarray, list, Series, index\n\n if isinstance(values, Series):\n if columns is None:\n if values.name is not None:\n columns = [values.name]\n if index is None:\n index = values.index\n else:\n values = values.reindex(index)\n\n # zero len case (GH #2234)\n if not len(values) and columns is not None and len(columns):\n values = np.empty((0, 1), dtype=object)\n\n # helper to create the axes as indexes\n def _get_axes(N, K, index=index, columns=columns):\n # return axes or defaults\n\n if index is None:\n index = _default_index(N)\n else:\n index = _ensure_index(index)\n\n if columns is None:\n columns = _default_index(K)\n else:\n columns = _ensure_index(columns)\n return index, columns\n\n # we could have a categorical type passed or coerced to 'category'\n # recast this to an _arrays_to_mgr\n if is_categorical_dtype(getattr(values,'dtype',None)) or is_categorical_dtype(dtype):\n\n if not hasattr(values,'dtype'):\n values = _prep_ndarray(values, copy=copy)\n values = values.ravel()\n elif copy:\n values = values.copy()\n\n index, columns = _get_axes(len(values),1)\n return _arrays_to_mgr([ values ], columns, index, columns,\n dtype=dtype)\n\n # by definition an array here\n # the dtypes will be coerced to a single dtype\n values = _prep_ndarray(values, copy=copy)\n\n if dtype is not None:\n\n if values.dtype != dtype:\n try:\n values = values.astype(dtype)\n except Exception as orig:\n e = ValueError(\"failed to cast to '%s' (Exception was: %s)\"\n % (dtype, orig))\n raise_with_traceback(e)\n\n index, columns = _get_axes(*values.shape)\n values = values.T\n\n # if we don't have a dtype specified, then try to convert objects\n # on the entire block; this is to convert if we have datetimelike's\n # embedded in an object type\n if dtype is None and is_object_dtype(values):\n values = _possibly_infer_to_datetimelike(values)\n\n return create_block_manager_from_blocks([values], [columns, index])\n\n @property\n def axes(self):\n return [self.index, self.columns]\n\n @property\n def shape(self):\n return (len(self.index), len(self.columns))\n\n def _repr_fits_vertical_(self):\n \"\"\"\n Check length against max_rows.\n \"\"\"\n max_rows = get_option(\"display.max_rows\")\n return len(self) <= max_rows\n\n def _repr_fits_horizontal_(self, ignore_width=False):\n \"\"\"\n Check if full repr fits in horizontal boundaries imposed by the display\n options width and max_columns. In case off non-interactive session, no\n boundaries apply.\n\n ignore_width is here so ipnb+HTML output can behave the way\n users expect. display.max_columns remains in effect.\n GH3541, GH3573\n \"\"\"\n\n width, height = fmt.get_console_size()\n max_columns = get_option(\"display.max_columns\")\n nb_columns = len(self.columns)\n\n # exceed max columns\n if ((max_columns and nb_columns > max_columns) or\n ((not ignore_width) and width and nb_columns > (width // 2))):\n return False\n\n if (ignore_width # used by repr_html under IPython notebook\n # scripts ignore terminal dims\n or not com.in_interactive_session()):\n return True\n\n if (get_option('display.width') is not None or\n com.in_ipython_frontend()):\n # check at least the column row for excessive width\n max_rows = 1\n else:\n max_rows = get_option(\"display.max_rows\")\n\n # when auto-detecting, so width=None and not in ipython front end\n # check whether repr fits horizontal by actualy checking\n # the width of the rendered repr\n buf = StringIO()\n\n # only care about the stuff we'll actually print out\n # and to_string on entire frame may be expensive\n d = self\n\n if not (max_rows is None): # unlimited rows\n # min of two, where one may be None\n d = d.iloc[:min(max_rows, len(d))]\n else:\n return True\n\n d.to_string(buf=buf)\n value = buf.getvalue()\n repr_width = max([len(l) for l in value.split('\\n')])\n\n return repr_width < width\n\n def _info_repr(self):\n \"\"\"True if the repr should show the info view.\"\"\"\n info_repr_option = (get_option(\"display.large_repr\") == \"info\")\n return info_repr_option and not (\n self._repr_fits_horizontal_() and self._repr_fits_vertical_()\n )\n\n def __unicode__(self):\n \"\"\"\n Return a string representation for a particular DataFrame\n\n Invoked by unicode(df) in py2 only. Yields a Unicode String in both\n py2/py3.\n \"\"\"\n buf = StringIO(u(\"\"))\n if self._info_repr():\n self.info(buf=buf)\n return buf.getvalue()\n\n max_rows = get_option(\"display.max_rows\")\n max_cols = get_option(\"display.max_columns\")\n show_dimensions = get_option(\"display.show_dimensions\")\n if get_option(\"display.expand_frame_repr\"):\n width, _ = fmt.get_console_size()\n else:\n width = None\n self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,\n line_width=width, show_dimensions=show_dimensions)\n\n return buf.getvalue()\n\n def _repr_html_(self):\n \"\"\"\n Return a html representation for a particular DataFrame.\n Mainly for IPython notebook.\n \"\"\"\n # qtconsole doesn't report its line width, and also\n # behaves badly when outputting an HTML table\n # that doesn't fit the window, so disable it.\n # XXX: In IPython 3.x and above, the Qt console will not attempt to\n # display HTML, so this check can be removed when support for IPython 2.x\n # is no longer needed.\n if com.in_qtconsole():\n # 'HTML output is disabled in QtConsole'\n return None\n\n if self._info_repr():\n buf = StringIO(u(\"\"))\n self.info(buf=buf)\n # need to escape the <class>, should be the first line.\n val = buf.getvalue().replace('<', r'&lt;', 1).replace('>',\n r'&gt;', 1)\n return '<pre>' + val + '</pre>'\n\n if get_option(\"display.notebook_repr_html\"):\n max_rows = get_option(\"display.max_rows\")\n max_cols = get_option(\"display.max_columns\")\n show_dimensions = get_option(\"display.show_dimensions\")\n\n return ('<div style=\"max-height:1000px;'\n 'max-width:1500px;overflow:auto;\">\\n' +\n self.to_html(max_rows=max_rows, max_cols=max_cols,\n show_dimensions=show_dimensions) + '\\n</div>')\n else:\n return None\n\n def iteritems(self):\n \"\"\"Iterator over (column, series) pairs\"\"\"\n if self.columns.is_unique and hasattr(self, '_item_cache'):\n for k in self.columns:\n yield k, self._get_item_cache(k)\n else:\n for i, k in enumerate(self.columns):\n yield k, self.icol(i)\n\n def iterrows(self):\n \"\"\"\n Iterate over rows of DataFrame as (index, Series) pairs.\n\n Notes\n -----\n\n * ``iterrows`` does **not** preserve dtypes across the rows (dtypes\n are preserved across columns for DataFrames). For example,\n\n >>> df = DataFrame([[1, 1.0]], columns=['x', 'y'])\n >>> row = next(df.iterrows())[1]\n >>> print(row['x'].dtype)\n float64\n >>> print(df['x'].dtype)\n int64\n\n Returns\n -------\n it : generator\n A generator that iterates over the rows of the frame.\n \"\"\"\n columns = self.columns\n for k, v in zip(self.index, self.values):\n s = Series(v, index=columns, name=k)\n yield k, s\n\n def itertuples(self, index=True):\n \"\"\"\n Iterate over rows of DataFrame as tuples, with index value\n as first element of the tuple\n \"\"\"\n arrays = []\n if index:\n arrays.append(self.index)\n\n # use integer indexing because of possible duplicate column names\n arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))\n return zip(*arrays)\n\n if compat.PY3: # pragma: no cover\n items = iteritems\n\n def __len__(self):\n \"\"\"Returns length of info axis, but here we use the index \"\"\"\n return len(self.index)\n\n def dot(self, other):\n \"\"\"\n Matrix multiplication with DataFrame or Series objects\n\n Parameters\n ----------\n other : DataFrame or Series\n\n Returns\n -------\n dot_product : DataFrame or Series\n \"\"\"\n if isinstance(other, (Series, DataFrame)):\n common = self.columns.union(other.index)\n if (len(common) > len(self.columns) or\n len(common) > len(other.index)):\n raise ValueError('matrices are not aligned')\n\n left = self.reindex(columns=common, copy=False)\n right = other.reindex(index=common, copy=False)\n lvals = left.values\n rvals = right.values\n else:\n left = self\n lvals = self.values\n rvals = np.asarray(other)\n if lvals.shape[1] != rvals.shape[0]:\n raise ValueError('Dot product shape mismatch, %s vs %s' %\n (lvals.shape, rvals.shape))\n\n if isinstance(other, DataFrame):\n return self._constructor(np.dot(lvals, rvals),\n index=left.index,\n columns=other.columns)\n elif isinstance(other, Series):\n return Series(np.dot(lvals, rvals), index=left.index)\n elif isinstance(rvals, (np.ndarray, Index)):\n result = np.dot(lvals, rvals)\n if result.ndim == 2:\n return self._constructor(result, index=left.index)\n else:\n return Series(result, index=left.index)\n else: # pragma: no cover\n raise TypeError('unsupported type: %s' % type(other))\n\n #----------------------------------------------------------------------\n # IO methods (to / from other formats)\n\n @classmethod\n def from_dict(cls, data, orient='columns', dtype=None):\n \"\"\"\n Construct DataFrame from dict of array-like or dicts\n\n Parameters\n ----------\n data : dict\n {field : array-like} or {field : dict}\n orient : {'columns', 'index'}, default 'columns'\n The \"orientation\" of the data. If the keys of the passed dict\n should be the columns of the resulting DataFrame, pass 'columns'\n (default). Otherwise if the keys should be rows, pass 'index'.\n\n Returns\n -------\n DataFrame\n \"\"\"\n index, columns = None, None\n orient = orient.lower()\n if orient == 'index':\n if len(data) > 0:\n # TODO speed up Series case\n if isinstance(list(data.values())[0], (Series, dict)):\n data = _from_nested_dict(data)\n else:\n data, index = list(data.values()), list(data.keys())\n elif orient != 'columns': # pragma: no cover\n raise ValueError('only recognize index or columns for orient')\n\n return cls(data, index=index, columns=columns, dtype=dtype)\n\n @deprecate_kwarg(old_arg_name='outtype', new_arg_name='orient')\n def to_dict(self, orient='dict'):\n \"\"\"Convert DataFrame to dictionary.\n\n Parameters\n ----------\n orient : str {'dict', 'list', 'series', 'split', 'records'}\n Determines the type of the values of the dictionary.\n\n - dict (default) : dict like {column -> {index -> value}}\n - list : dict like {column -> [values]}\n - series : dict like {column -> Series(values)}\n - split : dict like\n {index -> [index], columns -> [columns], data -> [values]}\n - records : list like\n [{column -> value}, ... , {column -> value}]\n\n Abbreviations are allowed. `s` indicates `series` and `sp`\n indicates `split`.\n\n Returns\n -------\n result : dict like {column -> {index -> value}}\n \"\"\"\n if not self.columns.is_unique:\n warnings.warn(\"DataFrame columns are not unique, some \"\n \"columns will be omitted.\", UserWarning)\n if orient.lower().startswith('d'):\n return dict((k, v.to_dict()) for k, v in compat.iteritems(self))\n elif orient.lower().startswith('l'):\n return dict((k, v.tolist()) for k, v in compat.iteritems(self))\n elif orient.lower().startswith('sp'):\n return {'index': self.index.tolist(),\n 'columns': self.columns.tolist(),\n 'data': self.values.tolist()}\n elif orient.lower().startswith('s'):\n return dict((k, v) for k, v in compat.iteritems(self))\n elif orient.lower().startswith('r'):\n return [dict((k, v) for k, v in zip(self.columns, row))\n for row in self.values]\n else:\n raise ValueError(\"orient '%s' not understood\" % orient)\n\n def to_gbq(self, destination_table, project_id=None, chunksize=10000,\n verbose=True, reauth=False):\n \"\"\"Write a DataFrame to a Google BigQuery table.\n\n THIS IS AN EXPERIMENTAL LIBRARY\n\n If the table exists, the dataframe will be written to the table using\n the defined table schema and column types. For simplicity, this method\n uses the Google BigQuery streaming API. The to_gbq method chunks data\n into a default chunk size of 10,000. Failures return the complete error\n response which can be quite long depending on the size of the insert.\n There are several important limitations of the Google streaming API\n which are detailed at:\n https://developers.google.com/bigquery/streaming-data-into-bigquery.\n\n Parameters\n ----------\n dataframe : DataFrame\n DataFrame to be written\n destination_table : string\n Name of table to be written, in the form 'dataset.tablename'\n project_id : str\n Google BigQuery Account project ID.\n chunksize : int (default 10000)\n Number of rows to be inserted in each chunk from the dataframe.\n verbose : boolean (default True)\n Show percentage complete\n reauth : boolean (default False)\n Force Google BigQuery to reauthenticate the user. This is useful\n if multiple accounts are used.\n\n \"\"\"\n\n from pandas.io import gbq\n return gbq.to_gbq(self, destination_table, project_id=project_id,\n chunksize=chunksize, verbose=verbose,\n reauth=reauth)\n\n @classmethod\n def from_records(cls, data, index=None, exclude=None, columns=None,\n coerce_float=False, nrows=None):\n \"\"\"\n Convert structured or record ndarray to DataFrame\n\n Parameters\n ----------\n data : ndarray (structured dtype), list of tuples, dict, or DataFrame\n index : string, list of fields, array-like\n Field of array to use as the index, alternately a specific set of\n input labels to use\n exclude : sequence, default None\n Columns or fields to exclude\n columns : sequence, default None\n Column names to use. If the passed data do not have names\n associated with them, this argument provides names for the\n columns. Otherwise this argument indicates the order of the columns\n in the result (any names not found in the data will become all-NA\n columns)\n coerce_float : boolean, default False\n Attempt to convert values to non-string, non-numeric objects (like\n decimal.Decimal) to floating point, useful for SQL result sets\n\n Returns\n -------\n df : DataFrame\n \"\"\"\n # Make a copy of the input columns so we can modify it\n if columns is not None:\n columns = _ensure_index(columns)\n\n if com.is_iterator(data):\n if nrows == 0:\n return cls()\n\n try:\n if compat.PY3:\n first_row = next(data)\n else:\n first_row = next(data)\n except StopIteration:\n return cls(index=index, columns=columns)\n\n dtype = None\n if hasattr(first_row, 'dtype') and first_row.dtype.names:\n dtype = first_row.dtype\n\n values = [first_row]\n\n if nrows is None:\n values += data\n else:\n values.extend(itertools.islice(data, nrows - 1))\n\n if dtype is not None:\n data = np.array(values, dtype=dtype)\n else:\n data = values\n\n if isinstance(data, dict):\n if columns is None:\n columns = arr_columns = _ensure_index(sorted(data))\n arrays = [data[k] for k in columns]\n else:\n arrays = []\n arr_columns = []\n for k, v in compat.iteritems(data):\n if k in columns:\n arr_columns.append(k)\n arrays.append(v)\n\n arrays, arr_columns = _reorder_arrays(arrays, arr_columns,\n columns)\n\n elif isinstance(data, (np.ndarray, DataFrame)):\n arrays, columns = _to_arrays(data, columns)\n if columns is not None:\n columns = _ensure_index(columns)\n arr_columns = columns\n else:\n arrays, arr_columns = _to_arrays(data, columns,\n coerce_float=coerce_float)\n\n arr_columns = _ensure_index(arr_columns)\n if columns is not None:\n columns = _ensure_index(columns)\n else:\n columns = arr_columns\n\n if exclude is None:\n exclude = set()\n else:\n exclude = set(exclude)\n\n result_index = None\n if index is not None:\n if (isinstance(index, compat.string_types) or\n not hasattr(index, \"__iter__\")):\n i = columns.get_loc(index)\n exclude.add(index)\n if len(arrays) > 0:\n result_index = Index(arrays[i], name=index)\n else:\n result_index = Index([], name=index)\n else:\n try:\n to_remove = [arr_columns.get_loc(field) for field in index]\n\n result_index = MultiIndex.from_arrays(\n [arrays[i] for i in to_remove], names=index)\n\n exclude.update(index)\n except Exception:\n result_index = index\n\n if any(exclude):\n arr_exclude = [x for x in exclude if x in arr_columns]\n to_remove = [arr_columns.get_loc(col) for col in arr_exclude]\n arrays = [v for i, v in enumerate(arrays) if i not in to_remove]\n\n arr_columns = arr_columns.drop(arr_exclude)\n columns = columns.drop(exclude)\n\n mgr = _arrays_to_mgr(arrays, arr_columns, result_index,\n columns)\n\n return cls(mgr)\n\n def to_records(self, index=True, convert_datetime64=True):\n \"\"\"\n Convert DataFrame to record array. Index will be put in the\n 'index' field of the record array if requested\n\n Parameters\n ----------\n index : boolean, default True\n Include index in resulting record array, stored in 'index' field\n convert_datetime64 : boolean, default True\n Whether to convert the index to datetime.datetime if it is a\n DatetimeIndex\n\n Returns\n -------\n y : recarray\n \"\"\"\n if index:\n if com.is_datetime64_dtype(self.index) and convert_datetime64:\n ix_vals = [self.index.to_pydatetime()]\n else:\n if isinstance(self.index, MultiIndex):\n # array of tuples to numpy cols. copy copy copy\n ix_vals = lmap(np.array, zip(*self.index.values))\n else:\n ix_vals = [self.index.values]\n\n arrays = ix_vals + [self[c].get_values() for c in self.columns]\n\n count = 0\n index_names = list(self.index.names)\n if isinstance(self.index, MultiIndex):\n for i, n in enumerate(index_names):\n if n is None:\n index_names[i] = 'level_%d' % count\n count += 1\n elif index_names[0] is None:\n index_names = ['index']\n names = index_names + lmap(str, self.columns)\n else:\n arrays = [self[c].get_values() for c in self.columns]\n names = lmap(str, self.columns)\n\n dtype = np.dtype([(x, v.dtype) for x, v in zip(names, arrays)])\n return np.rec.fromarrays(arrays, dtype=dtype, names=names)\n\n @classmethod\n def from_items(cls, items, columns=None, orient='columns'):\n \"\"\"\n Convert (key, value) pairs to DataFrame. The keys will be the axis\n index (usually the columns, but depends on the specified\n orientation). The values should be arrays or Series.\n\n Parameters\n ----------\n items : sequence of (key, value) pairs\n Values should be arrays or Series.\n columns : sequence of column labels, optional\n Must be passed if orient='index'.\n orient : {'columns', 'index'}, default 'columns'\n The \"orientation\" of the data. If the keys of the\n input correspond to column labels, pass 'columns'\n (default). Otherwise if the keys correspond to the index,\n pass 'index'.\n\n Returns\n -------\n frame : DataFrame\n \"\"\"\n keys, values = lzip(*items)\n\n if orient == 'columns':\n if columns is not None:\n columns = _ensure_index(columns)\n\n idict = dict(items)\n if len(idict) < len(items):\n if not columns.equals(_ensure_index(keys)):\n raise ValueError('With non-unique item names, passed '\n 'columns must be identical')\n arrays = values\n else:\n arrays = [idict[k] for k in columns if k in idict]\n else:\n columns = _ensure_index(keys)\n arrays = values\n\n return cls._from_arrays(arrays, columns, None)\n elif orient == 'index':\n if columns is None:\n raise TypeError(\"Must pass columns with orient='index'\")\n\n keys = _ensure_index(keys)\n\n arr = np.array(values, dtype=object).T\n data = [lib.maybe_convert_objects(v) for v in arr]\n return cls._from_arrays(data, columns, keys)\n else: # pragma: no cover\n raise ValueError(\"'orient' must be either 'columns' or 'index'\")\n\n @classmethod\n def _from_arrays(cls, arrays, columns, index, dtype=None):\n mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)\n return cls(mgr)\n\n @classmethod\n def from_csv(cls, path, header=0, sep=',', index_col=0,\n parse_dates=True, encoding=None, tupleize_cols=False,\n infer_datetime_format=False):\n \"\"\"\n Read delimited file into DataFrame\n\n Parameters\n ----------\n path : string file path or file handle / StringIO\n header : int, default 0\n Row to use at header (skip prior rows)\n sep : string, default ','\n Field delimiter\n index_col : int or sequence, default 0\n Column to use for index. If a sequence is given, a MultiIndex\n is used. Different default from read_table\n parse_dates : boolean, default True\n Parse dates. Different default from read_table\n tupleize_cols : boolean, default False\n write multi_index columns as a list of tuples (if True)\n or new (expanded format) if False)\n infer_datetime_format: boolean, default False\n If True and `parse_dates` is True for a column, try to infer the\n datetime format based on the first datetime string. If the format\n can be inferred, there often will be a large parsing speed-up.\n\n Notes\n -----\n Preferable to use read_table for most general purposes but from_csv\n makes for an easy roundtrip to and from file, especially with a\n DataFrame of time series data\n\n Returns\n -------\n y : DataFrame\n \"\"\"\n from pandas.io.parsers import read_table\n return read_table(path, header=header, sep=sep,\n parse_dates=parse_dates, index_col=index_col,\n encoding=encoding, tupleize_cols=tupleize_cols,\n infer_datetime_format=infer_datetime_format)\n\n def to_sparse(self, fill_value=None, kind='block'):\n \"\"\"\n Convert to SparseDataFrame\n\n Parameters\n ----------\n fill_value : float, default NaN\n kind : {'block', 'integer'}\n\n Returns\n -------\n y : SparseDataFrame\n \"\"\"\n from pandas.core.sparse import SparseDataFrame\n return SparseDataFrame(self._series, index=self.index,\n default_kind=kind,\n default_fill_value=fill_value)\n\n def to_panel(self):\n \"\"\"\n Transform long (stacked) format (DataFrame) into wide (3D, Panel)\n format.\n\n Currently the index of the DataFrame must be a 2-level MultiIndex. This\n may be generalized later\n\n Returns\n -------\n panel : Panel\n \"\"\"\n from pandas.core.panel import Panel\n\n # only support this kind for now\n if (not isinstance(self.index, MultiIndex) or # pragma: no cover\n len(self.index.levels) != 2):\n raise NotImplementedError('Only 2-level MultiIndex are supported.')\n\n if not self.index.is_unique:\n raise ValueError(\"Can't convert non-uniquely indexed \"\n \"DataFrame to Panel\")\n\n self._consolidate_inplace()\n\n # minor axis must be sorted\n if self.index.lexsort_depth < 2:\n selfsorted = self.sortlevel(0)\n else:\n selfsorted = self\n\n major_axis, minor_axis = selfsorted.index.levels\n major_labels, minor_labels = selfsorted.index.labels\n shape = len(major_axis), len(minor_axis)\n\n # preserve names, if any\n major_axis = major_axis.copy()\n major_axis.name = self.index.names[0]\n\n minor_axis = minor_axis.copy()\n minor_axis.name = self.index.names[1]\n\n # create new axes\n new_axes = [selfsorted.columns, major_axis, minor_axis]\n\n # create new manager\n new_mgr = selfsorted._data.reshape_nd(axes=new_axes,\n labels=[major_labels, minor_labels],\n shape=shape,\n ref_items=selfsorted.columns)\n\n return Panel(new_mgr)\n\n to_wide = deprecate('to_wide', to_panel)\n\n def to_csv(self, path_or_buf=None, sep=\",\", na_rep='', float_format=None,\n columns=None, header=True, index=True, index_label=None,\n mode='w', encoding=None, quoting=None,\n quotechar='\"', line_terminator='\\n', chunksize=None,\n tupleize_cols=False, date_format=None, doublequote=True,\n escapechar=None, decimal='.', **kwds):\n r\"\"\"Write DataFrame to a comma-separated values (csv) file\n\n Parameters\n ----------\n path_or_buf : string or file handle, default None\n File path or object, if None is provided the result is returned as\n a string.\n sep : character, default \",\"\n Field delimiter for the output file.\n na_rep : string, default ''\n Missing data representation\n float_format : string, default None\n Format string for floating point numbers\n columns : sequence, optional\n Columns to write\n header : boolean or list of string, default True\n Write out column names. If a list of string is given it is assumed\n to be aliases for the column names\n index : boolean, default True\n Write row names (index)\n index_label : string or sequence, or False, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex. If\n False do not print fields for index names. Use index_label=False\n for easier importing in R\n nanRep : None\n deprecated, use na_rep\n mode : str\n Python write mode, default 'w'\n encoding : string, optional\n A string representing the encoding to use in the output file,\n defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.\n line_terminator : string, default '\\\\n'\n The newline character or character sequence to use in the output\n file\n quoting : optional constant from csv module\n defaults to csv.QUOTE_MINIMAL\n quotechar : string (length 1), default '\"'\n character used to quote fields\n doublequote : boolean, default True\n Control quoting of `quotechar` inside a field\n escapechar : string (length 1), default None\n character used to escape `sep` and `quotechar` when appropriate\n chunksize : int or None\n rows to write at a time\n tupleize_cols : boolean, default False\n write multi_index columns as a list of tuples (if True)\n or new (expanded format) if False)\n date_format : string, default None\n Format string for datetime objects\n decimal: string, default '.'\n Character recognized as decimal separator. E.g. use ',' for European data\n \"\"\"\n\n formatter = fmt.CSVFormatter(self, path_or_buf,\n line_terminator=line_terminator,\n sep=sep, encoding=encoding,\n quoting=quoting, na_rep=na_rep,\n float_format=float_format, cols=columns,\n header=header, index=index,\n index_label=index_label, mode=mode,\n chunksize=chunksize, quotechar=quotechar,\n engine=kwds.get(\"engine\"),\n tupleize_cols=tupleize_cols,\n date_format=date_format,\n doublequote=doublequote,\n escapechar=escapechar,\n decimal=decimal)\n formatter.save()\n\n if path_or_buf is None:\n return formatter.path_or_buf.getvalue()\n\n def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',\n float_format=None, columns=None, header=True, index=True,\n index_label=None, startrow=0, startcol=0, engine=None,\n merge_cells=True, encoding=None, inf_rep='inf'):\n \"\"\"\n Write DataFrame to a excel sheet\n\n Parameters\n ----------\n excel_writer : string or ExcelWriter object\n File path or existing ExcelWriter\n sheet_name : string, default 'Sheet1'\n Name of sheet which will contain DataFrame\n na_rep : string, default ''\n Missing data representation\n float_format : string, default None\n Format string for floating point numbers\n columns : sequence, optional\n Columns to write\n header : boolean or list of string, default True\n Write out column names. If a list of string is given it is\n assumed to be aliases for the column names\n index : boolean, default True\n Write row names (index)\n index_label : string or sequence, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow :\n upper left cell row to dump data frame\n startcol :\n upper left cell column to dump data frame\n engine : string, default None\n write engine to use - you can also set this via the options\n ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n merge_cells : boolean, default True\n Write MultiIndex and Hierarchical Rows as merged cells.\n encoding: string, default None\n encoding of the resulting excel file. Only necessary for xlwt,\n other writers support unicode natively.\n inf_rep : string, default 'inf'\n Representation for infinity (there is no native representation for\n infinity in Excel)\n\n Notes\n -----\n If passing an existing ExcelWriter object, then the sheet will be added\n to the existing workbook. This can be used to save different\n DataFrames to one workbook:\n\n >>> writer = ExcelWriter('output.xlsx')\n >>> df1.to_excel(writer,'Sheet1')\n >>> df2.to_excel(writer,'Sheet2')\n >>> writer.save()\n \"\"\"\n from pandas.io.excel import ExcelWriter\n\n need_save = False\n if encoding == None:\n encoding = 'ascii'\n\n if isinstance(excel_writer, compat.string_types):\n excel_writer = ExcelWriter(excel_writer, engine=engine)\n need_save = True\n\n formatter = fmt.ExcelFormatter(self,\n na_rep=na_rep,\n cols=columns,\n header=header,\n float_format=float_format,\n index=index,\n index_label=index_label,\n merge_cells=merge_cells,\n inf_rep=inf_rep)\n formatted_cells = formatter.get_formatted_cells()\n excel_writer.write_cells(formatted_cells, sheet_name,\n startrow=startrow, startcol=startcol)\n if need_save:\n excel_writer.save()\n\n def to_stata(\n self, fname, convert_dates=None, write_index=True, encoding=\"latin-1\",\n byteorder=None, time_stamp=None, data_label=None):\n \"\"\"\n A class for writing Stata binary dta files from array-like objects\n\n Parameters\n ----------\n fname : file path or buffer\n Where to save the dta file.\n convert_dates : dict\n Dictionary mapping column of datetime types to the stata internal\n format that you want to use for the dates. Options are\n 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either a\n number or a name.\n encoding : str\n Default is latin-1. Note that Stata does not support unicode.\n byteorder : str\n Can be \">\", \"<\", \"little\", or \"big\". The default is None which uses\n `sys.byteorder`\n\n Examples\n --------\n >>> writer = StataWriter('./data_file.dta', data)\n >>> writer.write_file()\n\n Or with dates\n\n >>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})\n >>> writer.write_file()\n \"\"\"\n from pandas.io.stata import StataWriter\n writer = StataWriter(fname, self, convert_dates=convert_dates,\n encoding=encoding, byteorder=byteorder,\n time_stamp=time_stamp, data_label=data_label,\n write_index=write_index)\n writer.write_file()\n\n @Appender(fmt.docstring_to_string, indents=1)\n def to_string(self, buf=None, columns=None, col_space=None, colSpace=None,\n header=True, index=True, na_rep='NaN', formatters=None,\n float_format=None, sparsify=None, index_names=True,\n justify=None, line_width=None, max_rows=None, max_cols=None,\n show_dimensions=False):\n \"\"\"\n Render a DataFrame to a console-friendly tabular output.\n \"\"\"\n\n if colSpace is not None: # pragma: no cover\n warnings.warn(\"colSpace is deprecated, use col_space\",\n FutureWarning)\n col_space = colSpace\n\n formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,\n col_space=col_space, na_rep=na_rep,\n formatters=formatters,\n float_format=float_format,\n sparsify=sparsify,\n justify=justify,\n index_names=index_names,\n header=header, index=index,\n line_width=line_width,\n max_rows=max_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions)\n formatter.to_string()\n\n if buf is None:\n result = formatter.buf.getvalue()\n return result\n\n @Appender(fmt.docstring_to_string, indents=1)\n def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,\n header=True, index=True, na_rep='NaN', formatters=None,\n float_format=None, sparsify=None, index_names=True,\n justify=None, bold_rows=True, classes=None, escape=True,\n max_rows=None, max_cols=None, show_dimensions=False):\n \"\"\"\n Render a DataFrame as an HTML table.\n\n `to_html`-specific options:\n\n bold_rows : boolean, default True\n Make the row labels bold in the output\n classes : str or list or tuple, default None\n CSS class(es) to apply to the resulting html table\n escape : boolean, default True\n Convert the characters <, >, and & to HTML-safe sequences.=\n max_rows : int, optional\n Maximum number of rows to show before truncating. If None, show\n all.\n max_cols : int, optional\n Maximum number of columns to show before truncating. If None, show\n all.\n\n \"\"\"\n\n if colSpace is not None: # pragma: no cover\n warnings.warn(\"colSpace is deprecated, use col_space\",\n FutureWarning)\n col_space = colSpace\n\n formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,\n col_space=col_space, na_rep=na_rep,\n formatters=formatters,\n float_format=float_format,\n sparsify=sparsify,\n justify=justify,\n index_names=index_names,\n header=header, index=index,\n bold_rows=bold_rows,\n escape=escape,\n max_rows=max_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions)\n formatter.to_html(classes=classes)\n\n if buf is None:\n return formatter.buf.getvalue()\n\n @Appender(fmt.docstring_to_string, indents=1)\n def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None,\n header=True, index=True, na_rep='NaN', formatters=None,\n float_format=None, sparsify=None, index_names=True,\n bold_rows=True, longtable=False, escape=True):\n \"\"\"\n Render a DataFrame to a tabular environment table. You can splice\n this into a LaTeX document. Requires \\\\usepackage{booktabs}.\n\n `to_latex`-specific options:\n\n bold_rows : boolean, default True\n Make the row labels bold in the output\n longtable : boolean, default False\n Use a longtable environment instead of tabular. Requires adding\n a \\\\usepackage{longtable} to your LaTeX preamble.\n escape : boolean, default True\n When set to False prevents from escaping latex special\n characters in column names.\n\n \"\"\"\n\n if colSpace is not None: # pragma: no cover\n warnings.warn(\"colSpace is deprecated, use col_space\",\n FutureWarning)\n col_space = colSpace\n\n formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,\n col_space=col_space, na_rep=na_rep,\n header=header, index=index,\n formatters=formatters,\n float_format=float_format,\n bold_rows=bold_rows,\n sparsify=sparsify,\n index_names=index_names,\n escape=escape)\n formatter.to_latex(longtable=longtable)\n\n if buf is None:\n return formatter.buf.getvalue()\n\n def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None):\n \"\"\"\n Concise summary of a DataFrame.\n\n Parameters\n ----------\n verbose : {None, True, False}, optional\n Whether to print the full summary.\n None follows the `display.max_info_columns` setting.\n True or False overrides the `display.max_info_columns` setting.\n buf : writable buffer, defaults to sys.stdout\n max_cols : int, default None\n Determines whether full summary or short summary is printed.\n None follows the `display.max_info_columns` setting.\n memory_usage : boolean, default None\n Specifies whether total memory usage of the DataFrame\n elements (including index) should be displayed. None follows\n the `display.memory_usage` setting. True or False overrides\n the `display.memory_usage` setting. Memory usage is shown in\n human-readable units (base-2 representation).\n null_counts : boolean, default None\n Whether to show the non-null counts\n If None, then only show if the frame is smaller than max_info_rows and max_info_columns.\n If True, always show counts.\n If False, never show counts.\n\n \"\"\"\n from pandas.core.format import _put_lines\n\n if buf is None: # pragma: no cover\n buf = sys.stdout\n\n lines = []\n\n lines.append(str(type(self)))\n lines.append(self.index.summary())\n\n if len(self.columns) == 0:\n lines.append('Empty %s' % type(self).__name__)\n _put_lines(buf, lines)\n return\n\n cols = self.columns\n\n # hack\n if max_cols is None:\n max_cols = get_option(\n 'display.max_info_columns', len(self.columns) + 1)\n\n max_rows = get_option('display.max_info_rows', len(self) + 1)\n\n if null_counts is None:\n show_counts = ((len(self.columns) <= max_cols) and\n (len(self) < max_rows))\n else:\n show_counts = null_counts\n exceeds_info_cols = len(self.columns) > max_cols\n\n def _verbose_repr():\n lines.append('Data columns (total %d columns):' %\n len(self.columns))\n space = max([len(com.pprint_thing(k)) for k in self.columns]) + 4\n counts = None\n\n tmpl = \"%s%s\"\n if show_counts:\n counts = self.count()\n if len(cols) != len(counts): # pragma: no cover\n raise AssertionError('Columns must equal counts (%d != %d)' %\n (len(cols), len(counts)))\n tmpl = \"%s non-null %s\"\n\n dtypes = self.dtypes\n for i, col in enumerate(self.columns):\n dtype = dtypes[col]\n col = com.pprint_thing(col)\n\n count = \"\"\n if show_counts:\n count = counts.iloc[i]\n\n lines.append(_put_str(col, space) +\n tmpl % (count, dtype))\n\n def _non_verbose_repr():\n lines.append(self.columns.summary(name='Columns'))\n\n def _sizeof_fmt(num, size_qualifier):\n # returns size in human readable format\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if num < 1024.0:\n return \"%3.1f%s %s\" % (num, size_qualifier, x)\n num /= 1024.0\n return \"%3.1f%s %s\" % (num, size_qualifier, 'PB')\n\n if verbose:\n _verbose_repr()\n elif verbose is False: # specifically set to False, not nesc None\n _non_verbose_repr()\n else:\n if exceeds_info_cols:\n _non_verbose_repr()\n else:\n _verbose_repr()\n\n counts = self.get_dtype_counts()\n dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))]\n lines.append('dtypes: %s' % ', '.join(dtypes))\n if memory_usage is None:\n memory_usage = get_option('display.memory_usage')\n if memory_usage: # append memory usage of df to display\n # size_qualifier is just a best effort; not guaranteed to catch all\n # cases (e.g., it misses categorical data even with object\n # categories)\n size_qualifier = ('+' if 'object' in counts\n or is_object_dtype(self.index) else '')\n mem_usage = self.memory_usage(index=True).sum()\n lines.append(\"memory usage: %s\\n\" %\n _sizeof_fmt(mem_usage, size_qualifier))\n _put_lines(buf, lines)\n\n def memory_usage(self, index=False):\n \"\"\"Memory usage of DataFrame columns.\n\n Parameters\n ----------\n index : bool\n Specifies whether to include memory usage of DataFrame's\n index in returned Series. If `index=True` (default is False)\n the first index of the Series is `Index`.\n\n Returns\n -------\n sizes : Series\n A series with column names as index and memory usage of\n columns with units of bytes.\n\n Notes\n -----\n Memory usage does not include memory consumed by elements that\n are not components of the array.\n\n See Also\n --------\n numpy.ndarray.nbytes\n \"\"\"\n result = Series([ c.values.nbytes for col, c in self.iteritems() ],\n index=self.columns)\n if index:\n result = Series(self.index.nbytes,\n index=['Index']).append(result)\n return result\n\n def transpose(self):\n \"\"\"Transpose index and columns\"\"\"\n return super(DataFrame, self).transpose(1, 0)\n\n T = property(transpose)\n\n #----------------------------------------------------------------------\n # Picklability\n\n # legacy pickle formats\n def _unpickle_frame_compat(self, state): # pragma: no cover\n from pandas.core.common import _unpickle_array\n if len(state) == 2: # pragma: no cover\n series, idx = state\n columns = sorted(series)\n else:\n series, cols, idx = state\n columns = _unpickle_array(cols)\n\n index = _unpickle_array(idx)\n self._data = self._init_dict(series, index, columns, None)\n\n def _unpickle_matrix_compat(self, state): # pragma: no cover\n from pandas.core.common import _unpickle_array\n # old unpickling\n (vals, idx, cols), object_state = state\n\n index = _unpickle_array(idx)\n dm = DataFrame(vals, index=index, columns=_unpickle_array(cols),\n copy=False)\n\n if object_state is not None:\n ovals, _, ocols = object_state\n objects = DataFrame(ovals, index=index,\n columns=_unpickle_array(ocols),\n copy=False)\n\n dm = dm.join(objects)\n\n self._data = dm._data\n\n #----------------------------------------------------------------------\n #----------------------------------------------------------------------\n # Getting and setting elements\n\n def get_value(self, index, col, takeable=False):\n \"\"\"\n Quickly retrieve single value at passed column and index\n\n Parameters\n ----------\n index : row label\n col : column label\n takeable : interpret the index/col as indexers, default False\n\n Returns\n -------\n value : scalar value\n \"\"\"\n\n if takeable:\n series = self._iget_item_cache(col)\n return _maybe_box_datetimelike(series.values[index])\n\n series = self._get_item_cache(col)\n engine = self.index._engine\n return engine.get_value(series.get_values(), index)\n\n def set_value(self, index, col, value, takeable=False):\n \"\"\"\n Put single value at passed column and index\n\n Parameters\n ----------\n index : row label\n col : column label\n value : scalar value\n takeable : interpret the index/col as indexers, default False\n\n Returns\n -------\n frame : DataFrame\n If label pair is contained, will be reference to calling DataFrame,\n otherwise a new object\n \"\"\"\n try:\n if takeable is True:\n series = self._iget_item_cache(col)\n return series.set_value(index, value, takeable=True)\n\n series = self._get_item_cache(col)\n engine = self.index._engine\n engine.set_value(series.values, index, value)\n return self\n except (KeyError, TypeError):\n\n # set using a non-recursive method & reset the cache\n self.loc[index, col] = value\n self._item_cache.pop(col, None)\n\n return self\n\n def irow(self, i, copy=False):\n return self._ixs(i, axis=0)\n\n def icol(self, i):\n return self._ixs(i, axis=1)\n\n def _ixs(self, i, axis=0):\n \"\"\"\n i : int, slice, or sequence of integers\n axis : int\n \"\"\"\n\n # irow\n if axis == 0:\n\n \"\"\"\n Notes\n -----\n If slice passed, the resulting data will be a view\n \"\"\"\n\n if isinstance(i, slice):\n return self[i]\n else:\n label = self.index[i]\n if isinstance(label, Index):\n # a location index by definition\n result = self.take(i, axis=axis)\n copy=True\n else:\n new_values = self._data.fast_xs(i)\n\n # if we are a copy, mark as such\n copy = isinstance(new_values,np.ndarray) and new_values.base is None\n result = Series(new_values, index=self.columns,\n name=self.index[i], dtype=new_values.dtype)\n result._set_is_copy(self, copy=copy)\n return result\n\n # icol\n else:\n\n \"\"\"\n Notes\n -----\n If slice passed, the resulting data will be a view\n \"\"\"\n\n label = self.columns[i]\n if isinstance(i, slice):\n # need to return view\n lab_slice = slice(label[0], label[-1])\n return self.ix[:, lab_slice]\n else:\n label = self.columns[i]\n if isinstance(label, Index):\n return self.take(i, axis=1, convert=True)\n\n # if the values returned are not the same length\n # as the index (iow a not found value), iget returns\n # a 0-len ndarray. This is effectively catching\n # a numpy error (as numpy should really raise)\n values = self._data.iget(i)\n if not len(values):\n values = np.array([np.nan] * len(self.index), dtype=object)\n result = self._constructor_sliced.from_array(\n values, index=self.index,\n name=label, fastpath=True)\n\n # this is a cached value, mark it so\n result._set_as_cached(label, self)\n\n return result\n\n def iget_value(self, i, j):\n return self.iat[i, j]\n\n def __getitem__(self, key):\n\n # shortcut if we are an actual column\n is_mi_columns = isinstance(self.columns, MultiIndex)\n try:\n if key in self.columns and not is_mi_columns:\n return self._getitem_column(key)\n except:\n pass\n\n # see if we can slice the rows\n indexer = convert_to_index_sliceable(self, key)\n if indexer is not None:\n return self._getitem_slice(indexer)\n\n if isinstance(key, (Series, np.ndarray, Index, list)):\n # either boolean or fancy integer index\n return self._getitem_array(key)\n elif isinstance(key, DataFrame):\n return self._getitem_frame(key)\n elif is_mi_columns:\n return self._getitem_multilevel(key)\n else:\n return self._getitem_column(key)\n\n def _getitem_column(self, key):\n \"\"\" return the actual column \"\"\"\n\n # get column\n if self.columns.is_unique:\n return self._get_item_cache(key)\n\n # duplicate columns & possible reduce dimensionaility\n result = self._constructor(self._data.get(key))\n if result.columns.is_unique:\n result = result[key]\n\n return result\n\n def _getitem_slice(self, key):\n return self._slice(key, axis=0)\n\n def _getitem_array(self, key):\n # also raises Exception if object array with NA values\n if com.is_bool_indexer(key):\n # warning here just in case -- previously __setitem__ was\n # reindexing but __getitem__ was not; it seems more reasonable to\n # go with the __setitem__ behavior since that is more consistent\n # with all other indexing behavior\n if isinstance(key, Series) and not key.index.equals(self.index):\n warnings.warn(\"Boolean Series key will be reindexed to match \"\n \"DataFrame index.\", UserWarning)\n elif len(key) != len(self.index):\n raise ValueError('Item wrong length %d instead of %d.' %\n (len(key), len(self.index)))\n # check_bool_indexer will throw exception if Series key cannot\n # be reindexed to match DataFrame rows\n key = check_bool_indexer(self.index, key)\n indexer = key.nonzero()[0]\n return self.take(indexer, axis=0, convert=False)\n else:\n indexer = self.ix._convert_to_indexer(key, axis=1)\n return self.take(indexer, axis=1, convert=True)\n\n def _getitem_multilevel(self, key):\n loc = self.columns.get_loc(key)\n if isinstance(loc, (slice, Series, np.ndarray, Index)):\n new_columns = self.columns[loc]\n result_columns = maybe_droplevels(new_columns, key)\n if self._is_mixed_type:\n result = self.reindex(columns=new_columns)\n result.columns = result_columns\n else:\n new_values = self.values[:, loc]\n result = DataFrame(new_values, index=self.index,\n columns=result_columns).__finalize__(self)\n if len(result.columns) == 1:\n top = result.columns[0]\n if ((type(top) == str and top == '') or\n (type(top) == tuple and top[0] == '')):\n result = result['']\n if isinstance(result, Series):\n result = Series(result, index=self.index, name=key)\n\n result._set_is_copy(self)\n return result\n else:\n return self._get_item_cache(key)\n\n def _getitem_frame(self, key):\n if key.values.dtype != np.bool_:\n raise ValueError('Must pass DataFrame with boolean values only')\n return self.where(key)\n\n def query(self, expr, **kwargs):\n \"\"\"Query the columns of a frame with a boolean expression.\n\n .. versionadded:: 0.13\n\n Parameters\n ----------\n expr : string\n The query string to evaluate. You can refer to variables\n in the environment by prefixing them with an '@' character like\n ``@a + b``.\n kwargs : dict\n See the documentation for :func:`pandas.eval` for complete details\n on the keyword arguments accepted by :meth:`DataFrame.query`.\n\n Returns\n -------\n q : DataFrame\n\n Notes\n -----\n The result of the evaluation of this expression is first passed to\n :attr:`DataFrame.loc` and if that fails because of a\n multidimensional key (e.g., a DataFrame) then the result will be passed\n to :meth:`DataFrame.__getitem__`.\n\n This method uses the top-level :func:`pandas.eval` function to\n evaluate the passed query.\n\n The :meth:`~pandas.DataFrame.query` method uses a slightly\n modified Python syntax by default. For example, the ``&`` and ``|``\n (bitwise) operators have the precedence of their boolean cousins,\n :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,\n however the semantics are different.\n\n You can change the semantics of the expression by passing the keyword\n argument ``parser='python'``. This enforces the same semantics as\n evaluation in Python space. Likewise, you can pass ``engine='python'``\n to evaluate an expression using Python itself as a backend. This is not\n recommended as it is inefficient compared to using ``numexpr`` as the\n engine.\n\n The :attr:`DataFrame.index` and\n :attr:`DataFrame.columns` attributes of the\n :class:`~pandas.DataFrame` instance are placed in the query namespace\n by default, which allows you to treat both the index and columns of the\n frame as a column in the frame.\n The identifier ``index`` is used for the frame index; you can also\n use the name of the index to identify it in a query.\n\n For further details and examples see the ``query`` documentation in\n :ref:`indexing <indexing.query>`.\n\n See Also\n --------\n pandas.eval\n DataFrame.eval\n\n Examples\n --------\n >>> from numpy.random import randn\n >>> from pandas import DataFrame\n >>> df = DataFrame(randn(10, 2), columns=list('ab'))\n >>> df.query('a > b')\n >>> df[df.a > df.b] # same result as the previous expression\n \"\"\"\n kwargs['level'] = kwargs.pop('level', 0) + 1\n res = self.eval(expr, **kwargs)\n\n try:\n return self.loc[res]\n except ValueError:\n # when res is multi-dimensional loc raises, but this is sometimes a\n # valid query\n return self[res]\n\n def eval(self, expr, **kwargs):\n \"\"\"Evaluate an expression in the context of the calling DataFrame\n instance.\n\n Parameters\n ----------\n expr : string\n The expression string to evaluate.\n kwargs : dict\n See the documentation for :func:`~pandas.eval` for complete details\n on the keyword arguments accepted by\n :meth:`~pandas.DataFrame.query`.\n\n Returns\n -------\n ret : ndarray, scalar, or pandas object\n\n See Also\n --------\n pandas.DataFrame.query\n pandas.eval\n\n Notes\n -----\n For more details see the API documentation for :func:`~pandas.eval`.\n For detailed examples see :ref:`enhancing performance with eval\n <enhancingperf.eval>`.\n\n Examples\n --------\n >>> from numpy.random import randn\n >>> from pandas import DataFrame\n >>> df = DataFrame(randn(10, 2), columns=list('ab'))\n >>> df.eval('a + b')\n >>> df.eval('c = a + b')\n \"\"\"\n resolvers = kwargs.pop('resolvers', None)\n kwargs['level'] = kwargs.pop('level', 0) + 1\n if resolvers is None:\n index_resolvers = self._get_index_resolvers()\n resolvers = dict(self.iteritems()), index_resolvers\n kwargs['target'] = self\n kwargs['resolvers'] = kwargs.get('resolvers', ()) + resolvers\n return _eval(expr, **kwargs)\n\n def select_dtypes(self, include=None, exclude=None):\n \"\"\"Return a subset of a DataFrame including/excluding columns based on\n their ``dtype``.\n\n Parameters\n ----------\n include, exclude : list-like\n A list of dtypes or strings to be included/excluded. You must pass\n in a non-empty sequence for at least one of these.\n\n Raises\n ------\n ValueError\n * If both of ``include`` and ``exclude`` are empty\n * If ``include`` and ``exclude`` have overlapping elements\n * If any kind of string dtype is passed in.\n TypeError\n * If either of ``include`` or ``exclude`` is not a sequence\n\n Returns\n -------\n subset : DataFrame\n The subset of the frame including the dtypes in ``include`` and\n excluding the dtypes in ``exclude``.\n\n Notes\n -----\n * To select all *numeric* types use the numpy dtype ``numpy.number``\n * To select strings you must use the ``object`` dtype, but note that\n this will return *all* object dtype columns\n * See the `numpy dtype hierarchy\n <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__\n * To select Pandas categorical dtypes, use 'category'\n\n Examples\n --------\n >>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'),\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3})\n >>> df\n a b c\n 0 0.3962 True 1\n 1 0.1459 False 2\n 2 0.2623 True 1\n 3 0.0764 False 2\n 4 -0.9703 True 1\n 5 -1.2094 False 2\n >>> df.select_dtypes(include=['float64'])\n c\n 0 1\n 1 2\n 2 1\n 3 2\n 4 1\n 5 2\n >>> df.select_dtypes(exclude=['floating'])\n b\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n \"\"\"\n include, exclude = include or (), exclude or ()\n if not (com.is_list_like(include) and com.is_list_like(exclude)):\n raise TypeError('include and exclude must both be non-string'\n ' sequences')\n selection = tuple(map(frozenset, (include, exclude)))\n\n if not any(selection):\n raise ValueError('at least one of include or exclude must be '\n 'nonempty')\n\n # convert the myriad valid dtypes object to a single representation\n include, exclude = map(lambda x:\n frozenset(map(com._get_dtype_from_object, x)),\n selection)\n for dtypes in (include, exclude):\n com._invalidate_string_dtypes(dtypes)\n\n # can't both include AND exclude!\n if not include.isdisjoint(exclude):\n raise ValueError('include and exclude overlap on %s'\n % (include & exclude))\n\n # empty include/exclude -> defaults to True\n # three cases (we've already raised if both are empty)\n # case 1: empty include, nonempty exclude\n # we have True, True, ... True for include, same for exclude\n # in the loop below we get the excluded\n # and when we call '&' below we get only the excluded\n # case 2: nonempty include, empty exclude\n # same as case 1, but with include\n # case 3: both nonempty\n # the \"union\" of the logic of case 1 and case 2:\n # we get the included and excluded, and return their logical and\n include_these = Series(not bool(include), index=self.columns)\n exclude_these = Series(not bool(exclude), index=self.columns)\n\n def is_dtype_instance_mapper(column, dtype):\n return column, functools.partial(issubclass, dtype.type)\n\n for column, f in itertools.starmap(is_dtype_instance_mapper,\n self.dtypes.iteritems()):\n if include: # checks for the case of empty include or exclude\n include_these[column] = any(map(f, include))\n if exclude:\n exclude_these[column] = not any(map(f, exclude))\n\n dtype_indexer = include_these & exclude_these\n return self.loc[com._get_info_slice(self, dtype_indexer)]\n\n def _box_item_values(self, key, values):\n items = self.columns[self.columns.get_loc(key)]\n if values.ndim == 2:\n return self._constructor(values.T, columns=items, index=self.index)\n else:\n return self._box_col_values(values, items)\n\n def _box_col_values(self, values, items):\n \"\"\" provide boxed values for a column \"\"\"\n return self._constructor_sliced.from_array(values, index=self.index,\n name=items, fastpath=True)\n\n def __setitem__(self, key, value):\n\n # see if we can slice the rows\n indexer = convert_to_index_sliceable(self, key)\n if indexer is not None:\n return self._setitem_slice(indexer, value)\n\n if isinstance(key, (Series, np.ndarray, list, Index)):\n self._setitem_array(key, value)\n elif isinstance(key, DataFrame):\n self._setitem_frame(key, value)\n else:\n # set column\n self._set_item(key, value)\n\n def _setitem_slice(self, key, value):\n self._check_setitem_copy()\n self.ix._setitem_with_indexer(key, value)\n\n def _setitem_array(self, key, value):\n # also raises Exception if object array with NA values\n if com.is_bool_indexer(key):\n if len(key) != len(self.index):\n raise ValueError('Item wrong length %d instead of %d!' %\n (len(key), len(self.index)))\n key = check_bool_indexer(self.index, key)\n indexer = key.nonzero()[0]\n self._check_setitem_copy()\n self.ix._setitem_with_indexer(indexer, value)\n else:\n if isinstance(value, DataFrame):\n if len(value.columns) != len(key):\n raise ValueError('Columns must be same length as key')\n for k1, k2 in zip(key, value.columns):\n self[k1] = value[k2]\n else:\n indexer = self.ix._convert_to_indexer(key, axis=1)\n self._check_setitem_copy()\n self.ix._setitem_with_indexer((slice(None), indexer), value)\n\n def _setitem_frame(self, key, value):\n # support boolean setting with DataFrame input, e.g.\n # df[df > df2] = 0\n if key.values.dtype != np.bool_:\n raise TypeError('Must pass DataFrame with boolean values only')\n\n self._check_inplace_setting(value)\n self._check_setitem_copy()\n self.where(-key, value, inplace=True)\n\n def _ensure_valid_index(self, value):\n \"\"\"\n ensure that if we don't have an index, that we can create one from the\n passed value\n \"\"\"\n if not len(self.index):\n\n # GH5632, make sure that we are a Series convertible\n if is_list_like(value):\n try:\n value = Series(value)\n except:\n pass\n\n if not isinstance(value, Series):\n raise ValueError('Cannot set a frame with no defined index '\n 'and a value that cannot be converted to a '\n 'Series')\n\n self._data = self._data.reindex_axis(value.index.copy(), axis=1,\n fill_value=np.nan)\n\n # we are a scalar\n # noop\n else:\n\n pass\n\n def _set_item(self, key, value):\n \"\"\"\n Add series to DataFrame in specified column.\n\n If series is a numpy-array (not a Series/TimeSeries), it must be the\n same length as the DataFrames index or an error will be thrown.\n\n Series/TimeSeries will be conformed to the DataFrames index to\n ensure homogeneity.\n \"\"\"\n\n self._ensure_valid_index(value)\n value = self._sanitize_column(key, value)\n NDFrame._set_item(self, key, value)\n\n # check if we are modifying a copy\n # try to set first as we want an invalid\n # value exeption to occur first\n if len(self):\n self._check_setitem_copy()\n\n def insert(self, loc, column, value, allow_duplicates=False):\n \"\"\"\n Insert column into DataFrame at specified location.\n\n If `allow_duplicates` is False, raises Exception if column\n is already contained in the DataFrame.\n\n Parameters\n ----------\n loc : int\n Must have 0 <= loc <= len(columns)\n column : object\n value : int, Series, or array-like\n \"\"\"\n self._ensure_valid_index(value)\n value = self._sanitize_column(column, value)\n self._data.insert(\n loc, column, value, allow_duplicates=allow_duplicates)\n\n def assign(self, **kwargs):\n \"\"\"\n Assign new columns to a DataFrame, returning a new object\n (a copy) with all the original columns in addition to the new ones.\n\n .. versionadded:: 0.16.0\n\n Parameters\n ----------\n kwargs : keyword, value pairs\n keywords are the column names. If the values are\n callable, they are computed on the DataFrame and\n assigned to the new columns. If the values are\n not callable, (e.g. a Series, scalar, or array),\n they are simply assigned.\n\n Returns\n -------\n df : DataFrame\n A new DataFrame with the new columns in addition to\n all the existing columns.\n\n Notes\n -----\n Since ``kwargs`` is a dictionary, the order of your\n arguments may not be preserved, and so the order of the\n new columns is not well defined. Assigning multiple\n columns within the same ``assign`` is possible, but you cannot\n reference other columns created within the same ``assign`` call.\n\n Examples\n --------\n >>> df = DataFrame({'A': range(1, 11), 'B': np.random.randn(10)})\n\n Where the value is a callable, evaluated on `df`:\n\n >>> df.assign(ln_A = lambda x: np.log(x.A))\n A B ln_A\n 0 1 0.426905 0.000000\n 1 2 -0.780949 0.693147\n 2 3 -0.418711 1.098612\n 3 4 -0.269708 1.386294\n 4 5 -0.274002 1.609438\n 5 6 -0.500792 1.791759\n 6 7 1.649697 1.945910\n 7 8 -1.495604 2.079442\n 8 9 0.549296 2.197225\n 9 10 -0.758542 2.302585\n\n Where the value already exists and is inserted:\n\n >>> newcol = np.log(df['A'])\n >>> df.assign(ln_A=newcol)\n A B ln_A\n 0 1 0.426905 0.000000\n 1 2 -0.780949 0.693147\n 2 3 -0.418711 1.098612\n 3 4 -0.269708 1.386294\n 4 5 -0.274002 1.609438\n 5 6 -0.500792 1.791759\n 6 7 1.649697 1.945910\n 7 8 -1.495604 2.079442\n 8 9 0.549296 2.197225\n 9 10 -0.758542 2.302585\n \"\"\"\n data = self.copy()\n\n # do all calculations first...\n results = {}\n for k, v in kwargs.items():\n\n if callable(v):\n results[k] = v(data)\n else:\n results[k] = v\n\n # ... and then assign\n for k, v in results.items():\n data[k] = v\n\n return data\n\n def _sanitize_column(self, key, value):\n # Need to make sure new columns (which go into the BlockManager as new\n # blocks) are always copied\n\n def reindexer(value):\n # reindex if necessary\n\n if value.index.equals(self.index) or not len(self.index):\n value = value.values.copy()\n else:\n\n # GH 4107\n try:\n value = value.reindex(self.index).values\n except Exception as e:\n\n # duplicate axis\n if not value.index.is_unique:\n raise e\n\n # other\n raise TypeError('incompatible index of inserted column '\n 'with frame index')\n return value\n\n if isinstance(value, Series):\n value = reindexer(value)\n\n elif isinstance(value, DataFrame):\n # align right-hand-side columns if self.columns\n # is multi-index and self[key] is a sub-frame\n if isinstance(self.columns, MultiIndex) and key in self.columns:\n loc = self.columns.get_loc(key)\n if isinstance(loc, (slice, Series, np.ndarray, Index)):\n cols = maybe_droplevels(self.columns[loc], key)\n if len(cols) and not cols.equals(value.columns):\n value = value.reindex_axis(cols, axis=1)\n # now align rows\n value = reindexer(value).T\n\n elif isinstance(value, Categorical):\n value = value.copy()\n\n elif (isinstance(value, Index) or is_sequence(value)):\n from pandas.core.series import _sanitize_index\n\n # turn me into an ndarray\n value = _sanitize_index(value, self.index, copy=False)\n if not isinstance(value, (np.ndarray, Index)):\n if isinstance(value, list) and len(value) > 0:\n value = com._possibly_convert_platform(value)\n else:\n value = com._asarray_tuplesafe(value)\n elif value.ndim == 2:\n value = value.copy().T\n else:\n value = value.copy()\n\n # possibly infer to datetimelike\n if is_object_dtype(value.dtype):\n value = _possibly_infer_to_datetimelike(value.ravel()).reshape(value.shape)\n\n else:\n # upcast the scalar\n dtype, value = _infer_dtype_from_scalar(value)\n value = np.repeat(value, len(self.index)).astype(dtype)\n value = com._possibly_cast_to_datetime(value, dtype)\n\n # return unconsolidatables directly\n if isinstance(value, (Categorical, SparseArray)):\n return value\n\n # broadcast across multiple columns if necessary\n if key in self.columns and value.ndim == 1:\n if not self.columns.is_unique or isinstance(self.columns,\n MultiIndex):\n existing_piece = self[key]\n if isinstance(existing_piece, DataFrame):\n value = np.tile(value, (len(existing_piece.columns), 1))\n\n return np.atleast_2d(np.asarray(value))\n\n @property\n def _series(self):\n result = {}\n for idx, item in enumerate(self.columns):\n result[item] = Series(self._data.iget(idx), index=self.index,\n name=item)\n return result\n\n def lookup(self, row_labels, col_labels):\n \"\"\"Label-based \"fancy indexing\" function for DataFrame.\n Given equal-length arrays of row and column labels, return an\n array of the values corresponding to each (row, col) pair.\n\n Parameters\n ----------\n row_labels : sequence\n The row labels to use for lookup\n col_labels : sequence\n The column labels to use for lookup\n\n Notes\n -----\n Akin to::\n\n result = []\n for row, col in zip(row_labels, col_labels):\n result.append(df.get_value(row, col))\n\n Examples\n --------\n values : ndarray\n The found values\n\n \"\"\"\n n = len(row_labels)\n if n != len(col_labels):\n raise ValueError('Row labels must have same size as column labels')\n\n thresh = 1000\n if not self._is_mixed_type or n > thresh:\n values = self.values\n ridx = self.index.get_indexer(row_labels)\n cidx = self.columns.get_indexer(col_labels)\n if (ridx == -1).any():\n raise KeyError('One or more row labels was not found')\n if (cidx == -1).any():\n raise KeyError('One or more column labels was not found')\n flat_index = ridx * len(self.columns) + cidx\n result = values.flat[flat_index]\n else:\n result = np.empty(n, dtype='O')\n for i, (r, c) in enumerate(zip(row_labels, col_labels)):\n result[i] = self.get_value(r, c)\n\n if is_object_dtype(result):\n result = lib.maybe_convert_objects(result)\n\n return result\n\n #----------------------------------------------------------------------\n # Reindexing and alignment\n\n def _reindex_axes(self, axes, level, limit, method, fill_value, copy):\n frame = self\n\n columns = axes['columns']\n if columns is not None:\n frame = frame._reindex_columns(columns, copy, level, fill_value,\n limit)\n\n index = axes['index']\n if index is not None:\n frame = frame._reindex_index(index, method, copy, level,\n fill_value, limit)\n\n return frame\n\n def _reindex_index(self, new_index, method, copy, level, fill_value=NA,\n limit=None):\n new_index, indexer = self.index.reindex(new_index, method, level,\n limit=limit)\n return self._reindex_with_indexers({0: [new_index, indexer]},\n copy=copy, fill_value=fill_value,\n allow_dups=False)\n\n def _reindex_columns(self, new_columns, copy, level, fill_value=NA,\n limit=None):\n new_columns, indexer = self.columns.reindex(new_columns, level=level,\n limit=limit)\n return self._reindex_with_indexers({1: [new_columns, indexer]},\n copy=copy, fill_value=fill_value,\n allow_dups=False)\n\n def _reindex_multi(self, axes, copy, fill_value):\n \"\"\" we are guaranteed non-Nones in the axes! \"\"\"\n\n new_index, row_indexer = self.index.reindex(axes['index'])\n new_columns, col_indexer = self.columns.reindex(axes['columns'])\n\n if row_indexer is not None and col_indexer is not None:\n indexer = row_indexer, col_indexer\n new_values = com.take_2d_multi(self.values, indexer,\n fill_value=fill_value)\n return self._constructor(new_values, index=new_index,\n columns=new_columns)\n else:\n return self._reindex_with_indexers({0: [new_index, row_indexer],\n 1: [new_columns, col_indexer]},\n copy=copy,\n fill_value=fill_value)\n\n @Appender(_shared_docs['reindex'] % _shared_doc_kwargs)\n def reindex(self, index=None, columns=None, **kwargs):\n return super(DataFrame, self).reindex(index=index, columns=columns,\n **kwargs)\n\n @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)\n def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,\n limit=None, fill_value=np.nan):\n return super(DataFrame, self).reindex_axis(labels=labels, axis=axis,\n method=method, level=level,\n copy=copy, limit=limit,\n fill_value=fill_value)\n\n @Appender(_shared_docs['rename'] % _shared_doc_kwargs)\n def rename(self, index=None, columns=None, **kwargs):\n return super(DataFrame, self).rename(index=index, columns=columns,\n **kwargs)\n\n def set_index(self, keys, drop=True, append=False, inplace=False,\n verify_integrity=False):\n \"\"\"\n Set the DataFrame index (row labels) using one or more existing\n columns. By default yields a new object.\n\n Parameters\n ----------\n keys : column label or list of column labels / arrays\n drop : boolean, default True\n Delete columns to be used as the new index\n append : boolean, default False\n Whether to append columns to existing index\n inplace : boolean, default False\n Modify the DataFrame in place (do not create a new object)\n verify_integrity : boolean, default False\n Check the new index for duplicates. Otherwise defer the check until\n necessary. Setting to False will improve the performance of this\n method\n\n Examples\n --------\n >>> indexed_df = df.set_index(['A', 'B'])\n >>> indexed_df2 = df.set_index(['A', [0, 1, 2, 0, 1, 2]])\n >>> indexed_df3 = df.set_index([[0, 1, 2, 0, 1, 2]])\n\n Returns\n -------\n dataframe : DataFrame\n \"\"\"\n if not isinstance(keys, list):\n keys = [keys]\n\n if inplace:\n frame = self\n else:\n frame = self.copy()\n\n arrays = []\n names = []\n if append:\n names = [x for x in self.index.names]\n if isinstance(self.index, MultiIndex):\n for i in range(self.index.nlevels):\n arrays.append(self.index.get_level_values(i))\n else:\n arrays.append(self.index)\n\n to_remove = []\n for col in keys:\n if isinstance(col, MultiIndex):\n # append all but the last column so we don't have to modify\n # the end of this loop\n for n in range(col.nlevels - 1):\n arrays.append(col.get_level_values(n))\n\n level = col.get_level_values(col.nlevels - 1)\n names.extend(col.names)\n elif isinstance(col, Series):\n level = col.values\n names.append(col.name)\n elif isinstance(col, Index):\n level = col\n names.append(col.name)\n elif isinstance(col, (list, np.ndarray, Index)):\n level = col\n names.append(None)\n else:\n level = frame[col].values\n names.append(col)\n if drop:\n to_remove.append(col)\n arrays.append(level)\n\n index = MultiIndex.from_arrays(arrays, names=names)\n\n if verify_integrity and not index.is_unique:\n duplicates = index.get_duplicates()\n raise ValueError('Index has duplicate keys: %s' % duplicates)\n\n for c in to_remove:\n del frame[c]\n\n # clear up memory usage\n index._cleanup()\n\n frame.index = index\n\n if not inplace:\n return frame\n\n def reset_index(self, level=None, drop=False, inplace=False, col_level=0,\n col_fill=''):\n \"\"\"\n For DataFrame with multi-level index, return new DataFrame with\n labeling information in the columns under the index names, defaulting\n to 'level_0', 'level_1', etc. if any are None. For a standard index,\n the index name will be used (if set), otherwise a default 'index' or\n 'level_0' (if 'index' is already taken) will be used.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default None\n Only remove the given levels from the index. Removes all levels by\n default\n drop : boolean, default False\n Do not try to insert index into dataframe columns. This resets\n the index to the default integer index.\n inplace : boolean, default False\n Modify the DataFrame in place (do not create a new object)\n col_level : int or str, default 0\n If the columns have multiple levels, determines which level the\n labels are inserted into. By default it is inserted into the first\n level.\n col_fill : object, default ''\n If the columns have multiple levels, determines how the other\n levels are named. If None then the index name is repeated.\n\n Returns\n -------\n resetted : DataFrame\n \"\"\"\n if inplace:\n new_obj = self\n else:\n new_obj = self.copy()\n\n def _maybe_casted_values(index, labels=None):\n if isinstance(index, PeriodIndex):\n values = index.asobject.values\n elif (isinstance(index, DatetimeIndex) and\n index.tz is not None):\n values = index.asobject\n else:\n values = index.values\n if values.dtype == np.object_:\n values = lib.maybe_convert_objects(values)\n\n # if we have the labels, extract the values with a mask\n if labels is not None:\n mask = labels == -1\n values = values.take(labels)\n if mask.any():\n values, changed = com._maybe_upcast_putmask(values,\n mask, np.nan)\n return values\n\n new_index = np.arange(len(new_obj),dtype='int64')\n if isinstance(self.index, MultiIndex):\n if level is not None:\n if not isinstance(level, (tuple, list)):\n level = [level]\n level = [self.index._get_level_number(lev) for lev in level]\n if len(level) < len(self.index.levels):\n new_index = self.index.droplevel(level)\n\n if not drop:\n names = self.index.names\n zipped = lzip(self.index.levels, self.index.labels)\n\n multi_col = isinstance(self.columns, MultiIndex)\n for i, (lev, lab) in reversed(list(enumerate(zipped))):\n col_name = names[i]\n if col_name is None:\n col_name = 'level_%d' % i\n\n if multi_col:\n if col_fill is None:\n col_name = tuple([col_name] *\n self.columns.nlevels)\n else:\n name_lst = [col_fill] * self.columns.nlevels\n lev_num = self.columns._get_level_number(col_level)\n name_lst[lev_num] = col_name\n col_name = tuple(name_lst)\n\n # to ndarray and maybe infer different dtype\n level_values = _maybe_casted_values(lev, lab)\n if level is None or i in level:\n new_obj.insert(0, col_name, level_values)\n\n elif not drop:\n name = self.index.name\n if name is None or name == 'index':\n name = 'index' if 'index' not in self else 'level_0'\n if isinstance(self.columns, MultiIndex):\n if col_fill is None:\n name = tuple([name] * self.columns.nlevels)\n else:\n name_lst = [col_fill] * self.columns.nlevels\n lev_num = self.columns._get_level_number(col_level)\n name_lst[lev_num] = name\n name = tuple(name_lst)\n values = _maybe_casted_values(self.index)\n new_obj.insert(0, name, values)\n\n new_obj.index = new_index\n if not inplace:\n return new_obj\n\n\n #----------------------------------------------------------------------\n # Reindex-based selection methods\n\n def dropna(self, axis=0, how='any', thresh=None, subset=None,\n inplace=False):\n \"\"\"\n Return object with labels on given axis omitted where alternately any\n or all of the data are missing\n\n Parameters\n ----------\n axis : {0, 1}, or tuple/list thereof\n Pass tuple or list to drop on multiple axes\n how : {'any', 'all'}\n * any : if any NA values are present, drop that label\n * all : if all values are NA, drop that label\n thresh : int, default None\n int value : require that many non-NA values\n subset : array-like\n Labels along other axis to consider, e.g. if you are dropping rows\n these would be a list of columns to include\n inplace : boolean, defalt False\n If True, do operation inplace and return None.\n\n Returns\n -------\n dropped : DataFrame\n \"\"\"\n if isinstance(axis, (tuple, list)):\n result = self\n for ax in axis:\n result = result.dropna(how=how, thresh=thresh,\n subset=subset, axis=ax)\n else:\n axis = self._get_axis_number(axis)\n agg_axis = 1 - axis\n\n agg_obj = self\n if subset is not None:\n ax = self._get_axis(agg_axis)\n indices = ax.get_indexer_for(subset)\n check = indices == -1\n if check.any():\n raise KeyError(list(np.compress(check,subset)))\n agg_obj = self.take(indices,axis=agg_axis)\n\n count = agg_obj.count(axis=agg_axis)\n\n if thresh is not None:\n mask = count >= thresh\n elif how == 'any':\n mask = count == len(agg_obj._get_axis(agg_axis))\n elif how == 'all':\n mask = count > 0\n else:\n if how is not None:\n raise ValueError('invalid how option: %s' % how)\n else:\n raise TypeError('must specify how or thresh')\n\n result = self.take(mask.nonzero()[0], axis=axis, convert=False)\n\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset')\n def drop_duplicates(self, subset=None, take_last=False, inplace=False):\n \"\"\"\n Return DataFrame with duplicate rows removed, optionally only\n considering certain columns\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns\n take_last : boolean, default False\n Take the last observed row in a row. Defaults to the first row\n inplace : boolean, default False\n Whether to drop duplicates in place or to return a copy\n cols : kwargs only argument of subset [deprecated]\n\n Returns\n -------\n deduplicated : DataFrame\n \"\"\"\n duplicated = self.duplicated(subset, take_last=take_last)\n\n if inplace:\n inds, = (-duplicated).nonzero()\n new_data = self._data.take(inds)\n self._update_inplace(new_data)\n else:\n return self[-duplicated]\n\n @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset')\n def duplicated(self, subset=None, take_last=False):\n \"\"\"\n Return boolean Series denoting duplicate rows, optionally only\n considering certain columns\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns\n take_last : boolean, default False\n For a set of distinct duplicate rows, flag all but the last row as\n duplicated. Default is for all but the first row to be flagged\n cols : kwargs only argument of subset [deprecated]\n\n Returns\n -------\n duplicated : Series\n \"\"\"\n from pandas.core.groupby import get_group_index\n from pandas.core.algorithms import factorize\n from pandas.hashtable import duplicated_int64, _SIZE_HINT_LIMIT\n\n def f(vals):\n labels, shape = factorize(vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))\n return labels.astype('i8',copy=False), len(shape)\n\n if subset is None:\n subset = self.columns\n elif not np.iterable(subset) or \\\n isinstance(subset, compat.string_types) or \\\n isinstance(subset, tuple) and subset in self.columns:\n subset = subset,\n\n vals = (self[col].values for col in subset)\n labels, shape = map(list, zip( * map(f, vals)))\n\n ids = get_group_index(labels, shape, sort=False, xnull=False)\n return Series(duplicated_int64(ids, take_last), index=self.index)\n\n #----------------------------------------------------------------------\n # Sorting\n\n def sort(self, columns=None, axis=0, ascending=True,\n inplace=False, kind='quicksort', na_position='last'):\n \"\"\"\n Sort DataFrame either by labels (along either axis) or by the values in\n column(s)\n\n Parameters\n ----------\n columns : object\n Column name(s) in frame. Accepts a column name or a list\n for a nested sort. A tuple will be interpreted as the\n levels of a multi-index.\n ascending : boolean or list, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders\n axis : {0, 1}\n Sort index/rows versus columns\n inplace : boolean, default False\n Sort the DataFrame without creating a new instance\n kind : {'quicksort', 'mergesort', 'heapsort'}, optional\n This option is only applied when sorting on a single column or label.\n na_position : {'first', 'last'} (optional, default='last')\n 'first' puts NaNs at the beginning\n 'last' puts NaNs at the end\n\n Examples\n --------\n >>> result = df.sort(['A', 'B'], ascending=[1, 0])\n\n Returns\n -------\n sorted : DataFrame\n \"\"\"\n return self.sort_index(by=columns, axis=axis, ascending=ascending,\n inplace=inplace, kind=kind, na_position=na_position)\n\n def sort_index(self, axis=0, by=None, ascending=True, inplace=False,\n kind='quicksort', na_position='last'):\n \"\"\"\n Sort DataFrame either by labels (along either axis) or by the values in\n a column\n\n Parameters\n ----------\n axis : {0, 1}\n Sort index/rows versus columns\n by : object\n Column name(s) in frame. Accepts a column name or a list\n for a nested sort. A tuple will be interpreted as the\n levels of a multi-index.\n ascending : boolean or list, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders\n inplace : boolean, default False\n Sort the DataFrame without creating a new instance\n na_position : {'first', 'last'} (optional, default='last')\n 'first' puts NaNs at the beginning\n 'last' puts NaNs at the end\n kind : {'quicksort', 'mergesort', 'heapsort'}, optional\n This option is only applied when sorting on a single column or label.\n\n Examples\n --------\n >>> result = df.sort_index(by=['A', 'B'], ascending=[True, False])\n\n Returns\n -------\n sorted : DataFrame\n \"\"\"\n\n from pandas.core.groupby import _lexsort_indexer, _nargsort\n axis = self._get_axis_number(axis)\n if axis not in [0, 1]: # pragma: no cover\n raise AssertionError('Axis must be 0 or 1, got %s' % str(axis))\n\n labels = self._get_axis(axis)\n\n if by is not None:\n if axis != 0:\n raise ValueError('When sorting by column, axis must be 0 '\n '(rows)')\n if not isinstance(by, list):\n by = [by]\n if com.is_sequence(ascending) and len(by) != len(ascending):\n raise ValueError('Length of ascending (%d) != length of by'\n ' (%d)' % (len(ascending), len(by)))\n if len(by) > 1:\n def trans(v):\n if com.needs_i8_conversion(v):\n return v.view('i8')\n return v\n keys = []\n for x in by:\n k = self[x].values\n if k.ndim == 2:\n raise ValueError('Cannot sort by duplicate column %s' % str(x))\n keys.append(trans(k))\n indexer = _lexsort_indexer(keys, orders=ascending,\n na_position=na_position)\n indexer = com._ensure_platform_int(indexer)\n else:\n by = by[0]\n k = self[by].values\n if k.ndim == 2:\n\n # try to be helpful\n if isinstance(self.columns, MultiIndex):\n raise ValueError('Cannot sort by column %s in a multi-index'\n ' you need to explicity provide all the levels'\n % str(by))\n\n raise ValueError('Cannot sort by duplicate column %s'\n % str(by))\n if isinstance(ascending, (tuple, list)):\n ascending = ascending[0]\n\n indexer = _nargsort(k, kind=kind, ascending=ascending,\n na_position=na_position)\n\n elif isinstance(labels, MultiIndex):\n\n # make sure that the axis is lexsorted to start\n # if not we need to reconstruct to get the correct indexer\n if not labels.is_lexsorted():\n labels = MultiIndex.from_tuples(labels.values)\n\n indexer = _lexsort_indexer(labels.labels, orders=ascending,\n na_position=na_position)\n indexer = com._ensure_platform_int(indexer)\n else:\n indexer = _nargsort(labels, kind=kind, ascending=ascending,\n na_position=na_position)\n\n bm_axis = self._get_block_manager_axis(axis)\n new_data = self._data.take(indexer, axis=bm_axis,\n convert=False, verify=False)\n\n if inplace:\n return self._update_inplace(new_data)\n else:\n return self._constructor(new_data).__finalize__(self)\n\n def sortlevel(self, level=0, axis=0, ascending=True,\n inplace=False, sort_remaining=True):\n \"\"\"\n Sort multilevel index by chosen axis and primary level. Data will be\n lexicographically sorted by the chosen level followed by the other\n levels (in order)\n\n Parameters\n ----------\n level : int\n axis : {0, 1}\n ascending : boolean, default True\n inplace : boolean, default False\n Sort the DataFrame without creating a new instance\n sort_remaining : boolean, default True\n Sort by the other levels too.\n\n Returns\n -------\n sorted : DataFrame\n \"\"\"\n axis = self._get_axis_number(axis)\n the_axis = self._get_axis(axis)\n if not isinstance(the_axis, MultiIndex):\n raise TypeError('can only sort by level with a hierarchical index')\n\n new_axis, indexer = the_axis.sortlevel(level, ascending=ascending,\n sort_remaining=sort_remaining)\n\n if self._is_mixed_type and not inplace:\n ax = 'index' if axis == 0 else 'columns'\n\n if new_axis.is_unique:\n return self.reindex(**{ax: new_axis})\n else:\n return self.take(indexer, axis=axis, convert=False)\n\n bm_axis = self._get_block_manager_axis(axis)\n new_data = self._data.take(indexer, axis=bm_axis,\n convert=False, verify=False)\n if inplace:\n return self._update_inplace(new_data)\n else:\n return self._constructor(new_data).__finalize__(self)\n\n def swaplevel(self, i, j, axis=0):\n \"\"\"\n Swap levels i and j in a MultiIndex on a particular axis\n\n Parameters\n ----------\n i, j : int, string (can be mixed)\n Level of index to be swapped. Can pass level name as string.\n\n Returns\n -------\n swapped : type of caller (new object)\n \"\"\"\n result = self.copy()\n\n axis = self._get_axis_number(axis)\n if axis == 0:\n result.index = result.index.swaplevel(i, j)\n else:\n result.columns = result.columns.swaplevel(i, j)\n return result\n\n def reorder_levels(self, order, axis=0):\n \"\"\"\n Rearrange index levels using input order.\n May not drop or duplicate levels\n\n Parameters\n ----------\n order : list of int or list of str\n List representing new level order. Reference level by number\n (position) or by key (label).\n axis : int\n Where to reorder levels.\n\n Returns\n -------\n type of caller (new object)\n \"\"\"\n axis = self._get_axis_number(axis)\n if not isinstance(self._get_axis(axis),\n MultiIndex): # pragma: no cover\n raise TypeError('Can only reorder levels on a hierarchical axis.')\n\n result = self.copy()\n\n if axis == 0:\n result.index = result.index.reorder_levels(order)\n else:\n result.columns = result.columns.reorder_levels(order)\n return result\n\n #----------------------------------------------------------------------\n # Arithmetic / combination related\n\n def _combine_frame(self, other, func, fill_value=None, level=None):\n this, other = self.align(other, join='outer', level=level, copy=False)\n new_index, new_columns = this.index, this.columns\n\n def _arith_op(left, right):\n if fill_value is not None:\n left_mask = isnull(left)\n right_mask = isnull(right)\n left = left.copy()\n right = right.copy()\n\n # one but not both\n mask = left_mask ^ right_mask\n left[left_mask & mask] = fill_value\n right[right_mask & mask] = fill_value\n\n return func(left, right)\n\n if this._is_mixed_type or other._is_mixed_type:\n\n # unique\n if this.columns.is_unique:\n\n def f(col):\n r = _arith_op(this[col].values, other[col].values)\n return self._constructor_sliced(r, index=new_index,\n dtype=r.dtype)\n\n result = dict([(col, f(col)) for col in this])\n\n # non-unique\n else:\n\n def f(i):\n r = _arith_op(this.iloc[:, i].values,\n other.iloc[:, i].values)\n return self._constructor_sliced(r, index=new_index,\n dtype=r.dtype)\n\n result = dict([\n (i, f(i)) for i, col in enumerate(this.columns)\n ])\n result = self._constructor(result, index=new_index, copy=False)\n result.columns = new_columns\n return result\n\n else:\n result = _arith_op(this.values, other.values)\n\n return self._constructor(result, index=new_index,\n columns=new_columns, copy=False)\n\n def _combine_series(self, other, func, fill_value=None, axis=None,\n level=None):\n if axis is not None:\n axis = self._get_axis_name(axis)\n if axis == 'index':\n return self._combine_match_index(other, func, level=level, fill_value=fill_value)\n else:\n return self._combine_match_columns(other, func, level=level, fill_value=fill_value)\n return self._combine_series_infer(other, func, level=level, fill_value=fill_value)\n\n def _combine_series_infer(self, other, func, level=None, fill_value=None):\n if len(other) == 0:\n return self * NA\n\n if len(self) == 0:\n # Ambiguous case, use _series so works with DataFrame\n return self._constructor(data=self._series, index=self.index,\n columns=self.columns)\n\n # teeny hack because one does DataFrame + TimeSeries all the time\n if self.index.is_all_dates and other.index.is_all_dates:\n warnings.warn((\"TimeSeries broadcasting along DataFrame index \"\n \"by default is deprecated. Please use \"\n \"DataFrame.<op> to explicitly broadcast arithmetic \"\n \"operations along the index\"),\n FutureWarning)\n return self._combine_match_index(other, func, level=level, fill_value=fill_value)\n else:\n return self._combine_match_columns(other, func, level=level, fill_value=fill_value)\n\n def _combine_match_index(self, other, func, level=None, fill_value=None):\n left, right = self.align(other, join='outer', axis=0, level=level, copy=False)\n if fill_value is not None:\n raise NotImplementedError(\"fill_value %r not supported.\" %\n fill_value)\n return self._constructor(func(left.values.T, right.values).T,\n index=left.index,\n columns=self.columns, copy=False)\n\n def _combine_match_columns(self, other, func, level=None, fill_value=None):\n left, right = self.align(other, join='outer', axis=1, level=level, copy=False)\n if fill_value is not None:\n raise NotImplementedError(\"fill_value %r not supported\" %\n fill_value)\n\n new_data = left._data.eval(\n func=func, other=right, axes=[left.columns, self.index])\n return self._constructor(new_data)\n\n def _combine_const(self, other, func, raise_on_error=True):\n if self.empty:\n return self\n\n new_data = self._data.eval(func=func, other=other, raise_on_error=raise_on_error)\n return self._constructor(new_data)\n\n def _compare_frame_evaluate(self, other, func, str_rep):\n\n # unique\n if self.columns.is_unique:\n def _compare(a, b):\n return dict([(col, func(a[col], b[col])) for col in a.columns])\n new_data = expressions.evaluate(_compare, str_rep, self, other)\n return self._constructor(data=new_data, index=self.index,\n columns=self.columns, copy=False)\n # non-unique\n else:\n def _compare(a, b):\n return dict([(i, func(a.iloc[:, i], b.iloc[:, i]))\n for i, col in enumerate(a.columns)])\n new_data = expressions.evaluate(_compare, str_rep, self, other)\n result = self._constructor(data=new_data, index=self.index,\n copy=False)\n result.columns = self.columns\n return result\n\n def _compare_frame(self, other, func, str_rep):\n if not self._indexed_same(other):\n raise ValueError('Can only compare identically-labeled '\n 'DataFrame objects')\n return self._compare_frame_evaluate(other, func, str_rep)\n\n def _flex_compare_frame(self, other, func, str_rep, level):\n if not self._indexed_same(other):\n self, other = self.align(other, 'outer', level=level, copy=False)\n return self._compare_frame_evaluate(other, func, str_rep)\n\n def combine(self, other, func, fill_value=None, overwrite=True):\n \"\"\"\n Add two DataFrame objects and do not propagate NaN values, so if for a\n (column, time) one frame is missing a value, it will default to the\n other frame's value (which might be NaN as well)\n\n Parameters\n ----------\n other : DataFrame\n func : function\n fill_value : scalar value\n overwrite : boolean, default True\n If True then overwrite values for common keys in the calling frame\n\n Returns\n -------\n result : DataFrame\n \"\"\"\n\n other_idxlen = len(other.index) # save for compare\n\n this, other = self.align(other, copy=False)\n new_index = this.index\n\n if other.empty and len(new_index) == len(self.index):\n return self.copy()\n\n if self.empty and len(other) == other_idxlen:\n return other.copy()\n\n # sorts if possible\n new_columns = this.columns.union(other.columns)\n do_fill = fill_value is not None\n\n result = {}\n for col in new_columns:\n series = this[col]\n otherSeries = other[col]\n\n this_dtype = series.dtype\n other_dtype = otherSeries.dtype\n\n this_mask = isnull(series)\n other_mask = isnull(otherSeries)\n\n # don't overwrite columns unecessarily\n # DO propogate if this column is not in the intersection\n if not overwrite and other_mask.all():\n result[col] = this[col].copy()\n continue\n\n if do_fill:\n series = series.copy()\n otherSeries = otherSeries.copy()\n series[this_mask] = fill_value\n otherSeries[other_mask] = fill_value\n\n # if we have different dtypes, possibily promote\n new_dtype = this_dtype\n if this_dtype != other_dtype:\n new_dtype = com._lcd_dtypes(this_dtype, other_dtype)\n series = series.astype(new_dtype)\n otherSeries = otherSeries.astype(new_dtype)\n\n # see if we need to be represented as i8 (datetimelike)\n # try to keep us at this dtype\n needs_i8_conversion = com.needs_i8_conversion(new_dtype)\n if needs_i8_conversion:\n this_dtype = new_dtype\n arr = func(series, otherSeries, True)\n else:\n arr = func(series, otherSeries)\n\n if do_fill:\n arr = com.ensure_float(arr)\n arr[this_mask & other_mask] = NA\n\n # try to downcast back to the original dtype\n if needs_i8_conversion:\n arr = com._possibly_cast_to_datetime(arr, this_dtype)\n else:\n arr = com._possibly_downcast_to_dtype(arr, this_dtype)\n\n result[col] = arr\n\n # convert_objects just in case\n return self._constructor(result,\n index=new_index,\n columns=new_columns).convert_objects(\n convert_dates=True,\n copy=False)\n\n def combine_first(self, other):\n \"\"\"\n Combine two DataFrame objects and default to non-null values in frame\n calling the method. Result index columns will be the union of the\n respective indexes and columns\n\n Parameters\n ----------\n other : DataFrame\n\n Examples\n --------\n a's values prioritized, use values from b to fill holes:\n\n >>> a.combine_first(b)\n\n\n Returns\n -------\n combined : DataFrame\n \"\"\"\n def combiner(x, y, needs_i8_conversion=False):\n x_values = x.values if hasattr(x, 'values') else x\n y_values = y.values if hasattr(y, 'values') else y\n if needs_i8_conversion:\n mask = isnull(x)\n x_values = x_values.view('i8')\n y_values = y_values.view('i8')\n else:\n mask = isnull(x_values)\n\n return expressions.where(mask, y_values, x_values,\n raise_on_error=True)\n\n return self.combine(other, combiner, overwrite=False)\n\n def update(self, other, join='left', overwrite=True, filter_func=None,\n raise_conflict=False):\n \"\"\"\n Modify DataFrame in place using non-NA values from passed\n DataFrame. Aligns on indices\n\n Parameters\n ----------\n other : DataFrame, or object coercible into a DataFrame\n join : {'left'}, default 'left'\n overwrite : boolean, default True\n If True then overwrite values for common keys in the calling frame\n filter_func : callable(1d-array) -> 1d-array<boolean>, default None\n Can choose to replace values other than NA. Return True for values\n that should be updated\n raise_conflict : boolean\n If True, will raise an error if the DataFrame and other both\n contain data in the same place.\n \"\"\"\n # TODO: Support other joins\n if join != 'left': # pragma: no cover\n raise NotImplementedError(\"Only left join is supported\")\n\n if not isinstance(other, DataFrame):\n other = DataFrame(other)\n\n other = other.reindex_like(self)\n\n for col in self.columns:\n this = self[col].values\n that = other[col].values\n if filter_func is not None:\n mask = ~filter_func(this) | isnull(that)\n else:\n if raise_conflict:\n mask_this = notnull(that)\n mask_that = notnull(this)\n if any(mask_this & mask_that):\n raise ValueError(\"Data overlaps.\")\n\n if overwrite:\n mask = isnull(that)\n\n # don't overwrite columns unecessarily\n if mask.all():\n continue\n else:\n mask = notnull(this)\n\n self[col] = expressions.where(\n mask, this, that, raise_on_error=True)\n\n #----------------------------------------------------------------------\n # Misc methods\n\n def first_valid_index(self):\n \"\"\"\n Return label for first non-NA/null value\n \"\"\"\n return self.index[self.count(1) > 0][0]\n\n def last_valid_index(self):\n \"\"\"\n Return label for last non-NA/null value\n \"\"\"\n return self.index[self.count(1) > 0][-1]\n\n #----------------------------------------------------------------------\n # Data reshaping\n\n def pivot(self, index=None, columns=None, values=None):\n \"\"\"\n Reshape data (produce a \"pivot\" table) based on column values. Uses\n unique values from index / columns to form axes and return either\n DataFrame or Panel, depending on whether you request a single value\n column (DataFrame) or all columns (Panel)\n\n Parameters\n ----------\n index : string or object\n Column name to use to make new frame's index\n columns : string or object\n Column name to use to make new frame's columns\n values : string or object, optional\n Column name to use for populating new frame's values\n\n Notes\n -----\n For finer-tuned control, see hierarchical indexing documentation along\n with the related stack/unstack methods\n\n Examples\n --------\n >>> df\n foo bar baz\n 0 one A 1.\n 1 one B 2.\n 2 one C 3.\n 3 two A 4.\n 4 two B 5.\n 5 two C 6.\n\n >>> df.pivot('foo', 'bar', 'baz')\n A B C\n one 1 2 3\n two 4 5 6\n\n >>> df.pivot('foo', 'bar')['baz']\n A B C\n one 1 2 3\n two 4 5 6\n\n Returns\n -------\n pivoted : DataFrame\n If no values column specified, will have hierarchically indexed\n columns\n \"\"\"\n from pandas.core.reshape import pivot\n return pivot(self, index=index, columns=columns, values=values)\n\n def stack(self, level=-1, dropna=True):\n \"\"\"\n Pivot a level of the (possibly hierarchical) column labels, returning a\n DataFrame (or Series in the case of an object with a single level of\n column labels) having a hierarchical index with a new inner-most level\n of row labels.\n The level involved will automatically get sorted.\n\n Parameters\n ----------\n level : int, string, or list of these, default last level\n Level(s) to stack, can pass level name\n dropna : boolean, default True\n Whether to drop rows in the resulting Frame/Series with no valid\n values\n\n Examples\n ----------\n >>> s\n a b\n one 1. 2.\n two 3. 4.\n\n >>> s.stack()\n one a 1\n b 2\n two a 3\n b 4\n\n Returns\n -------\n stacked : DataFrame or Series\n \"\"\"\n from pandas.core.reshape import stack, stack_multiple\n\n if isinstance(level, (tuple, list)):\n return stack_multiple(self, level, dropna=dropna)\n else:\n return stack(self, level, dropna=dropna)\n\n def unstack(self, level=-1):\n \"\"\"\n Pivot a level of the (necessarily hierarchical) index labels, returning\n a DataFrame having a new level of column labels whose inner-most level\n consists of the pivoted index labels. If the index is not a MultiIndex,\n the output will be a Series (the analogue of stack when the columns are\n not a MultiIndex).\n The level involved will automatically get sorted.\n\n Parameters\n ----------\n level : int, string, or list of these, default -1 (last level)\n Level(s) of index to unstack, can pass level name\n\n See also\n --------\n DataFrame.pivot : Pivot a table based on column values.\n DataFrame.stack : Pivot a level of the column labels (inverse operation\n from `unstack`).\n\n Examples\n --------\n >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),\n ... ('two', 'a'), ('two', 'b')])\n >>> s = pd.Series(np.arange(1.0, 5.0), index=index)\n >>> s\n one a 1\n b 2\n two a 3\n b 4\n dtype: float64\n\n >>> s.unstack(level=-1)\n a b\n one 1 2\n two 3 4\n\n >>> s.unstack(level=0)\n one two\n a 1 3\n b 2 4\n\n >>> df = s.unstack(level=0)\n >>> df.unstack()\n one a 1.\n b 3.\n two a 2.\n b 4.\n\n Returns\n -------\n unstacked : DataFrame or Series\n \"\"\"\n from pandas.core.reshape import unstack\n return unstack(self, level)\n\n #----------------------------------------------------------------------\n # Time series-related\n\n def diff(self, periods=1):\n \"\"\"\n 1st discrete difference of object\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming difference\n\n Returns\n -------\n diffed : DataFrame\n \"\"\"\n new_data = self._data.diff(n=periods)\n return self._constructor(new_data)\n\n #----------------------------------------------------------------------\n # Function application\n\n def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,\n args=(), **kwds):\n \"\"\"\n Applies function along input axis of DataFrame.\n\n Objects passed to functions are Series objects having index\n either the DataFrame's index (axis=0) or the columns (axis=1).\n Return type depends on whether passed function aggregates, or the\n reduce argument if the DataFrame is empty.\n\n Parameters\n ----------\n func : function\n Function to apply to each column/row\n axis : {0, 1}\n * 0 : apply function to each column\n * 1 : apply function to each row\n broadcast : boolean, default False\n For aggregation functions, return object of same size with values\n propagated\n reduce : boolean or None, default None\n Try to apply reduction procedures. If the DataFrame is empty,\n apply will use reduce to determine whether the result should be a\n Series or a DataFrame. If reduce is None (the default), apply's\n return value will be guessed by calling func an empty Series (note:\n while guessing, exceptions raised by func will be ignored). If\n reduce is True a Series will always be returned, and if False a\n DataFrame will always be returned.\n raw : boolean, default False\n If False, convert each row or column into a Series. If raw=True the\n passed function will receive ndarray objects instead. If you are\n just applying a NumPy reduction function this will achieve much\n better performance\n args : tuple\n Positional arguments to pass to function in addition to the\n array/series\n Additional keyword arguments will be passed as keywords to the function\n\n Notes\n -----\n In the current implementation apply calls func twice on the\n first column/row to decide whether it can take a fast or slow\n code path. This can lead to unexpected behavior if func has\n side-effects, as they will take effect twice for the first\n column/row.\n\n Examples\n --------\n >>> df.apply(numpy.sqrt) # returns DataFrame\n >>> df.apply(numpy.sum, axis=0) # equiv to df.sum(0)\n >>> df.apply(numpy.sum, axis=1) # equiv to df.sum(1)\n\n See also\n --------\n DataFrame.applymap: For elementwise operations\n\n Returns\n -------\n applied : Series or DataFrame\n \"\"\"\n axis = self._get_axis_number(axis)\n if kwds or args and not isinstance(func, np.ufunc):\n f = lambda x: func(x, *args, **kwds)\n else:\n f = func\n\n if len(self.columns) == 0 and len(self.index) == 0:\n return self._apply_empty_result(func, axis, reduce, *args, **kwds)\n\n if isinstance(f, np.ufunc):\n results = f(self.values)\n return self._constructor(data=results, index=self.index,\n columns=self.columns, copy=False)\n else:\n if not broadcast:\n if not all(self.shape):\n return self._apply_empty_result(func, axis, reduce, *args,\n **kwds)\n\n if raw and not self._is_mixed_type:\n return self._apply_raw(f, axis)\n else:\n if reduce is None:\n reduce = True\n return self._apply_standard(f, axis, reduce=reduce)\n else:\n return self._apply_broadcast(f, axis)\n\n def _apply_empty_result(self, func, axis, reduce, *args, **kwds):\n if reduce is None:\n reduce = False\n try:\n reduce = not isinstance(func(_EMPTY_SERIES, *args, **kwds),\n Series)\n except Exception:\n pass\n\n if reduce:\n return Series(NA, index=self._get_agg_axis(axis))\n else:\n return self.copy()\n\n def _apply_raw(self, func, axis):\n try:\n result = lib.reduce(self.values, func, axis=axis)\n except Exception:\n result = np.apply_along_axis(func, axis, self.values)\n\n # TODO: mixed type case\n if result.ndim == 2:\n return DataFrame(result, index=self.index,\n columns=self.columns)\n else:\n return Series(result, index=self._get_agg_axis(axis))\n\n def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):\n\n # skip if we are mixed datelike and trying reduce across axes\n # GH6125\n if reduce and axis==1 and self._is_mixed_type and self._is_datelike_mixed_type:\n reduce=False\n\n # try to reduce first (by default)\n # this only matters if the reduction in values is of different dtype\n # e.g. if we want to apply to a SparseFrame, then can't directly reduce\n if reduce:\n\n try:\n\n # the is the fast-path\n values = self.values\n dummy = Series(NA, index=self._get_axis(axis),\n dtype=values.dtype)\n\n labels = self._get_agg_axis(axis)\n result = lib.reduce(values, func, axis=axis, dummy=dummy,\n labels=labels)\n return Series(result, index=labels)\n except Exception:\n pass\n\n dtype = object if self._is_mixed_type else None\n if axis == 0:\n series_gen = (self.icol(i) for i in range(len(self.columns)))\n res_index = self.columns\n res_columns = self.index\n elif axis == 1:\n res_index = self.index\n res_columns = self.columns\n values = self.values\n series_gen = (Series.from_array(arr, index=res_columns, name=name, dtype=dtype)\n for i, (arr, name) in\n enumerate(zip(values, res_index)))\n else: # pragma : no cover\n raise AssertionError('Axis must be 0 or 1, got %s' % str(axis))\n\n i = None\n keys = []\n results = {}\n if ignore_failures:\n successes = []\n for i, v in enumerate(series_gen):\n try:\n results[i] = func(v)\n keys.append(v.name)\n successes.append(i)\n except Exception:\n pass\n # so will work with MultiIndex\n if len(successes) < len(res_index):\n res_index = res_index.take(successes)\n else:\n try:\n for i, v in enumerate(series_gen):\n results[i] = func(v)\n keys.append(v.name)\n except Exception as e:\n if hasattr(e, 'args'):\n # make sure i is defined\n if i is not None:\n k = res_index[i]\n e.args = e.args + ('occurred at index %s' %\n com.pprint_thing(k),)\n raise\n\n if len(results) > 0 and is_sequence(results[0]):\n if not isinstance(results[0], Series):\n index = res_columns\n else:\n index = None\n\n result = self._constructor(data=results, index=index)\n result.columns = res_index\n\n if axis == 1:\n result = result.T\n result = result.convert_objects(copy=False)\n\n else:\n\n result = Series(results)\n result.index = res_index\n\n return result\n\n def _apply_broadcast(self, func, axis):\n if axis == 0:\n target = self\n elif axis == 1:\n target = self.T\n else: # pragma: no cover\n raise AssertionError('Axis must be 0 or 1, got %s' % axis)\n\n result_values = np.empty_like(target.values)\n columns = target.columns\n for i, col in enumerate(columns):\n result_values[:, i] = func(target[col])\n\n result = self._constructor(result_values, index=target.index,\n columns=target.columns)\n\n if axis == 1:\n result = result.T\n\n return result\n\n def applymap(self, func):\n \"\"\"\n Apply a function to a DataFrame that is intended to operate\n elementwise, i.e. like doing map(func, series) for each series in the\n DataFrame\n\n Parameters\n ----------\n func : function\n Python function, returns a single value from a single value\n\n Returns\n -------\n applied : DataFrame\n\n See also\n --------\n DataFrame.apply : For operations on rows/columns\n\n \"\"\"\n\n # if we have a dtype == 'M8[ns]', provide boxed values\n def infer(x):\n if com.needs_i8_conversion(x):\n f = com.i8_boxer(x)\n x = lib.map_infer(_values_from_object(x), f)\n return lib.map_infer(_values_from_object(x), func)\n return self.apply(infer)\n\n #----------------------------------------------------------------------\n # Merging / joining methods\n\n def append(self, other, ignore_index=False, verify_integrity=False):\n \"\"\"\n Append rows of `other` to the end of this frame, returning a new\n object. Columns not in this frame are added as new columns.\n\n Parameters\n ----------\n other : DataFrame or Series/dict-like object, or list of these\n The data to append.\n ignore_index : boolean, default False\n If True, do not use the index labels.\n verify_integrity : boolean, default False\n If True, raise ValueError on creating index with duplicates.\n\n Returns\n -------\n appended : DataFrame\n\n Notes\n -----\n If a list of dict/series is passed and the keys are all contained in the\n DataFrame's index, the order of the columns in the resulting DataFrame\n will be unchanged.\n\n See also\n --------\n pandas.concat : General function to concatenate DataFrame, Series\n or Panel objects\n\n Examples\n --------\n\n >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))\n >>> df\n A B\n 0 1 2\n 1 3 4\n >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))\n >>> df.append(df2)\n A B\n 0 1 2\n 1 3 4\n 0 5 6\n 1 7 8\n\n With `ignore_index` set to True:\n\n >>> df.append(df2, ignore_index=True)\n A B\n 0 1 2\n 1 3 4\n 2 5 6\n 3 7 8\n\n \"\"\"\n if isinstance(other, (Series, dict)):\n if isinstance(other, dict):\n other = Series(other)\n if other.name is None and not ignore_index:\n raise TypeError('Can only append a Series if ignore_index=True'\n ' or if the Series has a name')\n\n index = None if other.name is None else [other.name]\n combined_columns = self.columns.tolist() + self.columns.union(other.index).difference(self.columns).tolist()\n other = other.reindex(combined_columns, copy=False)\n other = DataFrame(other.values.reshape((1, len(other))),\n index=index, columns=combined_columns).convert_objects()\n if not self.columns.equals(combined_columns):\n self = self.reindex(columns=combined_columns)\n elif isinstance(other, list) and not isinstance(other[0], DataFrame):\n other = DataFrame(other)\n if (self.columns.get_indexer(other.columns) >= 0).all():\n other = other.ix[:, self.columns]\n\n from pandas.tools.merge import concat\n if isinstance(other, (list, tuple)):\n to_concat = [self] + other\n else:\n to_concat = [self, other]\n return concat(to_concat, ignore_index=ignore_index,\n verify_integrity=verify_integrity)\n\n def join(self, other, on=None, how='left', lsuffix='', rsuffix='',\n sort=False):\n \"\"\"\n Join columns with other DataFrame either on index or on a key\n column. Efficiently Join multiple DataFrame objects by index at once by\n passing a list.\n\n Parameters\n ----------\n other : DataFrame, Series with name field set, or list of DataFrame\n Index should be similar to one of the columns in this one. If a\n Series is passed, its name attribute must be set, and that will be\n used as the column name in the resulting joined DataFrame\n on : column name, tuple/list of column names, or array-like\n Column(s) to use for joining, otherwise join on index. If multiples\n columns given, the passed DataFrame must have a MultiIndex. Can\n pass an array as the join key if not already contained in the\n calling DataFrame. Like an Excel VLOOKUP operation\n how : {'left', 'right', 'outer', 'inner'}\n How to handle indexes of the two objects. Default: 'left'\n for joining on index, None otherwise\n\n * left: use calling frame's index\n * right: use input frame's index\n * outer: form union of indexes\n * inner: use intersection of indexes\n lsuffix : string\n Suffix to use from left frame's overlapping columns\n rsuffix : string\n Suffix to use from right frame's overlapping columns\n sort : boolean, default False\n Order result DataFrame lexicographically by the join key. If False,\n preserves the index order of the calling (left) DataFrame\n\n Notes\n -----\n on, lsuffix, and rsuffix options are not supported when passing a list\n of DataFrame objects\n\n Returns\n -------\n joined : DataFrame\n \"\"\"\n # For SparseDataFrame's benefit\n return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,\n rsuffix=rsuffix, sort=sort)\n\n def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',\n sort=False):\n from pandas.tools.merge import merge, concat\n\n if isinstance(other, Series):\n if other.name is None:\n raise ValueError('Other Series must have a name')\n other = DataFrame({other.name: other})\n\n if isinstance(other, DataFrame):\n return merge(self, other, left_on=on, how=how,\n left_index=on is None, right_index=True,\n suffixes=(lsuffix, rsuffix), sort=sort)\n else:\n if on is not None:\n raise ValueError('Joining multiple DataFrames only supported'\n ' for joining on index')\n\n # join indexes only using concat\n if how == 'left':\n how = 'outer'\n join_axes = [self.index]\n else:\n join_axes = None\n\n frames = [self] + list(other)\n\n can_concat = all(df.index.is_unique for df in frames)\n\n if can_concat:\n return concat(frames, axis=1, join=how, join_axes=join_axes,\n verify_integrity=True)\n\n joined = frames[0]\n\n for frame in frames[1:]:\n joined = merge(joined, frame, how=how,\n left_index=True, right_index=True)\n\n return joined\n\n @Substitution('')\n @Appender(_merge_doc, indents=2)\n def merge(self, right, how='inner', on=None, left_on=None, right_on=None,\n left_index=False, right_index=False, sort=False,\n suffixes=('_x', '_y'), copy=True):\n from pandas.tools.merge import merge\n return merge(self, right, how=how, on=on,\n left_on=left_on, right_on=right_on,\n left_index=left_index, right_index=right_index, sort=sort,\n suffixes=suffixes, copy=copy)\n\n #----------------------------------------------------------------------\n # Statistical methods, etc.\n\n def corr(self, method='pearson', min_periods=1):\n \"\"\"\n Compute pairwise correlation of columns, excluding NA/null values\n\n Parameters\n ----------\n method : {'pearson', 'kendall', 'spearman'}\n * pearson : standard correlation coefficient\n * kendall : Kendall Tau correlation coefficient\n * spearman : Spearman rank correlation\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result. Currently only available for pearson\n and spearman correlation\n\n Returns\n -------\n y : DataFrame\n \"\"\"\n numeric_df = self._get_numeric_data()\n cols = numeric_df.columns\n mat = numeric_df.values\n\n if method == 'pearson':\n correl = _algos.nancorr(com._ensure_float64(mat),\n minp=min_periods)\n elif method == 'spearman':\n correl = _algos.nancorr_spearman(com._ensure_float64(mat),\n minp=min_periods)\n else:\n if min_periods is None:\n min_periods = 1\n mat = mat.T\n corrf = nanops.get_corr_func(method)\n K = len(cols)\n correl = np.empty((K, K), dtype=float)\n mask = np.isfinite(mat)\n for i, ac in enumerate(mat):\n for j, bc in enumerate(mat):\n valid = mask[i] & mask[j]\n if valid.sum() < min_periods:\n c = NA\n elif not valid.all():\n c = corrf(ac[valid], bc[valid])\n else:\n c = corrf(ac, bc)\n correl[i, j] = c\n correl[j, i] = c\n\n return self._constructor(correl, index=cols, columns=cols)\n\n def cov(self, min_periods=None):\n \"\"\"\n Compute pairwise covariance of columns, excluding NA/null values\n\n Parameters\n ----------\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result.\n\n Returns\n -------\n y : DataFrame\n\n Notes\n -----\n `y` contains the covariance matrix of the DataFrame's time series.\n The covariance is normalized by N-1 (unbiased estimator).\n \"\"\"\n numeric_df = self._get_numeric_data()\n cols = numeric_df.columns\n mat = numeric_df.values\n\n if notnull(mat).all():\n if min_periods is not None and min_periods > len(mat):\n baseCov = np.empty((mat.shape[1], mat.shape[1]))\n baseCov.fill(np.nan)\n else:\n baseCov = np.cov(mat.T)\n baseCov = baseCov.reshape((len(cols), len(cols)))\n else:\n baseCov = _algos.nancorr(com._ensure_float64(mat), cov=True,\n minp=min_periods)\n\n return self._constructor(baseCov, index=cols, columns=cols)\n\n def corrwith(self, other, axis=0, drop=False):\n \"\"\"\n Compute pairwise correlation between rows or columns of two DataFrame\n objects.\n\n Parameters\n ----------\n other : DataFrame\n axis : {0, 1}\n 0 to compute column-wise, 1 for row-wise\n drop : boolean, default False\n Drop missing indices from result, default returns union of all\n\n Returns\n -------\n correls : Series\n \"\"\"\n axis = self._get_axis_number(axis)\n if isinstance(other, Series):\n return self.apply(other.corr, axis=axis)\n\n this = self._get_numeric_data()\n other = other._get_numeric_data()\n\n left, right = this.align(other, join='inner', copy=False)\n\n # mask missing values\n left = left + right * 0\n right = right + left * 0\n\n if axis == 1:\n left = left.T\n right = right.T\n\n # demeaned data\n ldem = left - left.mean()\n rdem = right - right.mean()\n\n num = (ldem * rdem).sum()\n dom = (left.count() - 1) * left.std() * right.std()\n\n correl = num / dom\n\n if not drop:\n raxis = 1 if axis == 0 else 0\n result_index = this._get_axis(raxis).union(other._get_axis(raxis))\n correl = correl.reindex(result_index)\n\n return correl\n\n #----------------------------------------------------------------------\n # ndarray-like stats methods\n\n def count(self, axis=0, level=None, numeric_only=False):\n \"\"\"\n Return Series with number of non-NA/null observations over requested\n axis. Works with non-floating point data as well (detects NaN and None)\n\n Parameters\n ----------\n axis : {0, 1}\n 0 for row-wise, 1 for column-wise\n level : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a DataFrame\n numeric_only : boolean, default False\n Include only float, int, boolean data\n\n Returns\n -------\n count : Series (or DataFrame if level specified)\n \"\"\"\n axis = self._get_axis_number(axis)\n if level is not None:\n return self._count_level(level, axis=axis,\n numeric_only=numeric_only)\n\n if numeric_only:\n frame = self._get_numeric_data()\n else:\n frame = self\n\n # GH #423\n if len(frame._get_axis(axis)) == 0:\n result = Series(0, index=frame._get_agg_axis(axis))\n else:\n if frame._is_mixed_type:\n result = notnull(frame).sum(axis=axis)\n else:\n counts = notnull(frame.values).sum(axis=axis)\n result = Series(counts, index=frame._get_agg_axis(axis))\n\n return result.astype('int64')\n\n def _count_level(self, level, axis=0, numeric_only=False):\n if numeric_only:\n frame = self._get_numeric_data()\n else:\n frame = self\n\n count_axis = frame._get_axis(axis)\n agg_axis = frame._get_agg_axis(axis)\n\n if not isinstance(count_axis, MultiIndex):\n raise TypeError(\"Can only count levels on hierarchical %s.\" %\n self._get_axis_name(axis))\n\n if frame._is_mixed_type:\n # Since we have mixed types, calling notnull(frame.values) might\n # upcast everything to object\n mask = notnull(frame).values\n else:\n # But use the speedup when we have homogeneous dtypes\n mask = notnull(frame.values)\n\n if axis == 1:\n # We're transposing the mask rather than frame to avoid potential\n # upcasts to object, which induces a ~20x slowdown\n mask = mask.T\n\n if isinstance(level, compat.string_types):\n level = count_axis._get_level_number(level)\n\n level_index = count_axis.levels[level]\n labels = com._ensure_int64(count_axis.labels[level])\n counts = lib.count_level_2d(mask, labels, len(level_index))\n\n result = DataFrame(counts, index=level_index,\n columns=agg_axis)\n\n if axis == 1:\n # Undo our earlier transpose\n return result.T\n else:\n return result\n\n def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,\n filter_type=None, **kwds):\n axis = self._get_axis_number(axis)\n f = lambda x: op(x, axis=axis, skipna=skipna, **kwds)\n labels = self._get_agg_axis(axis)\n\n # exclude timedelta/datetime unless we are uniform types\n if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:\n numeric_only = True\n\n if numeric_only is None:\n try:\n values = self.values\n result = f(values)\n except Exception as e:\n\n # try by-column first\n if filter_type is None and axis == 0:\n try:\n\n # this can end up with a non-reduction\n # but not always. if the types are mixed\n # with datelike then need to make sure a series\n result = self.apply(f,reduce=False)\n if result.ndim == self.ndim:\n result = result.iloc[0]\n return result\n except:\n pass\n\n if filter_type is None or filter_type == 'numeric':\n data = self._get_numeric_data()\n elif filter_type == 'bool':\n data = self._get_bool_data()\n else: # pragma: no cover\n e = NotImplementedError(\"Handling exception with filter_\"\n \"type %s not implemented.\"\n % filter_type)\n raise_with_traceback(e)\n result = f(data.values)\n labels = data._get_agg_axis(axis)\n else:\n if numeric_only:\n if filter_type is None or filter_type == 'numeric':\n data = self._get_numeric_data()\n elif filter_type == 'bool':\n data = self._get_bool_data()\n else: # pragma: no cover\n msg = (\"Generating numeric_only data with filter_type %s\"\n \"not supported.\" % filter_type)\n raise NotImplementedError(msg)\n values = data.values\n labels = data._get_agg_axis(axis)\n else:\n values = self.values\n result = f(values)\n\n if is_object_dtype(result.dtype):\n try:\n if filter_type is None or filter_type == 'numeric':\n result = result.astype(np.float64)\n elif filter_type == 'bool' and notnull(result).all():\n result = result.astype(np.bool_)\n except (ValueError, TypeError):\n\n # try to coerce to the original dtypes item by item if we can\n if axis == 0:\n result = com._coerce_to_dtypes(result, self.dtypes)\n\n return Series(result, index=labels)\n\n def idxmin(self, axis=0, skipna=True):\n \"\"\"\n Return index of first occurrence of minimum over requested axis.\n NA/null values are excluded.\n\n Parameters\n ----------\n axis : {0, 1}\n 0 for row-wise, 1 for column-wise\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA\n\n Returns\n -------\n idxmin : Series\n\n Notes\n -----\n This method is the DataFrame version of ``ndarray.argmin``.\n\n See Also\n --------\n Series.idxmin\n \"\"\"\n axis = self._get_axis_number(axis)\n indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)\n index = self._get_axis(axis)\n result = [index[i] if i >= 0 else NA for i in indices]\n return Series(result, index=self._get_agg_axis(axis))\n\n def idxmax(self, axis=0, skipna=True):\n \"\"\"\n Return index of first occurrence of maximum over requested axis.\n NA/null values are excluded.\n\n Parameters\n ----------\n axis : {0, 1}\n 0 for row-wise, 1 for column-wise\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be first index.\n\n Returns\n -------\n idxmax : Series\n\n Notes\n -----\n This method is the DataFrame version of ``ndarray.argmax``.\n\n See Also\n --------\n Series.idxmax\n \"\"\"\n axis = self._get_axis_number(axis)\n indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)\n index = self._get_axis(axis)\n result = [index[i] if i >= 0 else NA for i in indices]\n return Series(result, index=self._get_agg_axis(axis))\n\n def _get_agg_axis(self, axis_num):\n \"\"\" let's be explict about this \"\"\"\n if axis_num == 0:\n return self.columns\n elif axis_num == 1:\n return self.index\n else:\n raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)\n\n def mode(self, axis=0, numeric_only=False):\n \"\"\"\n Gets the mode(s) of each element along the axis selected. Empty if nothing\n has 2+ occurrences. Adds a row for each mode per label, fills in gaps\n with nan. \n \n Note that there could be multiple values returned for the selected\n axis (when more than one item share the maximum frequency), which is the \n reason why a dataframe is returned. If you want to impute missing values \n with the mode in a dataframe ``df``, you can just do this: \n ``df.fillna(df.mode().iloc[0])``\n\n Parameters\n ----------\n axis : {0, 1, 'index', 'columns'} (default 0)\n * 0/'index' : get mode of each column\n * 1/'columns' : get mode of each row\n numeric_only : boolean, default False\n if True, only apply to numeric columns\n\n Returns\n -------\n modes : DataFrame (sorted)\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 1, 2, 1, 2, 3]})\n >>> df.mode()\n A\n 0 1\n 1 2\n \"\"\"\n data = self if not numeric_only else self._get_numeric_data()\n f = lambda s: s.mode()\n return data.apply(f, axis=axis)\n\n def quantile(self, q=0.5, axis=0, numeric_only=True):\n \"\"\"\n Return values at the given quantile over requested axis, a la\n numpy.percentile.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n 0 <= q <= 1, the quantile(s) to compute\n axis : {0, 1}\n 0 for row-wise, 1 for column-wise\n\n Returns\n -------\n quantiles : Series or DataFrame\n If ``q`` is an array, a DataFrame will be returned where the\n index is ``q``, the columns are the columns of self, and the\n values are the quantiles.\n If ``q`` is a float, a Series will be returned where the\n index is the columns of self and the values are the quantiles.\n\n Examples\n --------\n\n >>> df = DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),\n columns=['a', 'b'])\n >>> df.quantile(.1)\n a 1.3\n b 3.7\n dtype: float64\n >>> df.quantile([.1, .5])\n a b\n 0.1 1.3 3.7\n 0.5 2.5 55.0\n \"\"\"\n per = np.asarray(q) * 100\n\n if not com.is_list_like(per):\n per = [per]\n q = [q]\n squeeze = True\n else:\n squeeze = False\n\n def f(arr, per):\n if arr._is_datelike_mixed_type:\n values = _values_from_object(arr).view('i8')\n else:\n values = arr.astype(float)\n values = values[notnull(values)]\n if len(values) == 0:\n return NA\n else:\n return _quantile(values, per)\n\n data = self._get_numeric_data() if numeric_only else self\n if axis == 1:\n data = data.T\n\n # need to know which cols are timestamp going in so that we can\n # map timestamp over them after getting the quantile.\n is_dt_col = data.dtypes.map(com.is_datetime64_dtype)\n is_dt_col = is_dt_col[is_dt_col].index\n\n quantiles = [[f(vals, x) for x in per]\n for (_, vals) in data.iteritems()]\n result = DataFrame(quantiles, index=data._info_axis, columns=q).T\n if len(is_dt_col) > 0:\n result[is_dt_col] = result[is_dt_col].applymap(lib.Timestamp)\n if squeeze:\n if result.shape == (1, 1):\n result = result.T.iloc[:, 0] # don't want scalar\n else:\n result = result.T.squeeze()\n result.name = None # For groupby, so it can set an index name\n return result\n\n def rank(self, axis=0, numeric_only=None, method='average',\n na_option='keep', ascending=True, pct=False):\n \"\"\"\n Compute numerical data ranks (1 through n) along axis. Equal values are\n assigned a rank that is the average of the ranks of those values\n\n Parameters\n ----------\n axis : {0, 1}, default 0\n Ranks over columns (0) or rows (1)\n numeric_only : boolean, default None\n Include only float, int, boolean data\n method : {'average', 'min', 'max', 'first', 'dense'}\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n na_option : {'keep', 'top', 'bottom'}\n * keep: leave NA values where they are\n * top: smallest rank if ascending\n * bottom: smallest rank if descending\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n pct : boolean, default False\n Computes percentage rank of data\n\n Returns\n -------\n ranks : DataFrame\n \"\"\"\n axis = self._get_axis_number(axis)\n if numeric_only is None:\n try:\n ranks = algos.rank(self.values, axis=axis, method=method,\n ascending=ascending, na_option=na_option,\n pct=pct)\n return self._constructor(ranks, index=self.index,\n columns=self.columns)\n except TypeError:\n numeric_only = True\n if numeric_only:\n data = self._get_numeric_data()\n else:\n data = self\n ranks = algos.rank(data.values, axis=axis, method=method,\n ascending=ascending, na_option=na_option, pct=pct)\n return self._constructor(ranks, index=data.index, columns=data.columns)\n\n def to_timestamp(self, freq=None, how='start', axis=0, copy=True):\n \"\"\"\n Cast to DatetimeIndex of timestamps, at *beginning* of period\n\n Parameters\n ----------\n freq : string, default frequency of PeriodIndex\n Desired frequency\n how : {'s', 'e', 'start', 'end'}\n Convention for converting period to timestamp; start of period\n vs. end\n axis : {0, 1} default 0\n The axis to convert (the index by default)\n copy : boolean, default True\n If false then underlying input data is not copied\n\n Returns\n -------\n df : DataFrame with DatetimeIndex\n \"\"\"\n new_data = self._data\n if copy:\n new_data = new_data.copy()\n\n axis = self._get_axis_number(axis)\n if axis == 0:\n new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))\n elif axis == 1:\n new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))\n else: # pragma: no cover\n raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))\n\n return self._constructor(new_data)\n\n def to_period(self, freq=None, axis=0, copy=True):\n \"\"\"\n Convert DataFrame from DatetimeIndex to PeriodIndex with desired\n frequency (inferred from index if not passed)\n\n Parameters\n ----------\n freq : string, default\n axis : {0, 1}, default 0\n The axis to convert (the index by default)\n copy : boolean, default True\n If False then underlying input data is not copied\n\n Returns\n -------\n ts : TimeSeries with PeriodIndex\n \"\"\"\n new_data = self._data\n if copy:\n new_data = new_data.copy()\n\n axis = self._get_axis_number(axis)\n if axis == 0:\n new_data.set_axis(1, self.index.to_period(freq=freq))\n elif axis == 1:\n new_data.set_axis(0, self.columns.to_period(freq=freq))\n else: # pragma: no cover\n raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))\n\n return self._constructor(new_data)\n\n def isin(self, values):\n \"\"\"\n Return boolean DataFrame showing whether each element in the\n DataFrame is contained in values.\n\n Parameters\n ----------\n values : iterable, Series, DataFrame or dictionary\n The result will only be true at a location if all the\n labels match. If `values` is a Series, that's the index. If\n `values` is a dictionary, the keys must be the column names,\n which must match. If `values` is a DataFrame,\n then both the index and column labels must match.\n\n Returns\n -------\n\n DataFrame of booleans\n\n Examples\n --------\n When ``values`` is a list:\n\n >>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})\n >>> df.isin([1, 3, 12, 'a'])\n A B\n 0 True True\n 1 False False\n 2 True False\n\n When ``values`` is a dict:\n\n >>> df = DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]})\n >>> df.isin({'A': [1, 3], 'B': [4, 7, 12]})\n A B\n 0 True False # Note that B didn't match the 1 here.\n 1 False True\n 2 True True\n\n When ``values`` is a Series or DataFrame:\n\n >>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})\n >>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']})\n >>> df.isin(other)\n A B\n 0 True False\n 1 False False # Column A in `other` has a 3, but not at index 1.\n 2 True True\n \"\"\"\n if isinstance(values, dict):\n from collections import defaultdict\n from pandas.tools.merge import concat\n values = defaultdict(list, values)\n return concat((self.iloc[:, [i]].isin(values[col])\n for i, col in enumerate(self.columns)), axis=1)\n elif isinstance(values, Series):\n if not values.index.is_unique:\n raise ValueError(\"ValueError: cannot compute isin with\"\n \" a duplicate axis.\")\n return self.eq(values.reindex_like(self), axis='index')\n elif isinstance(values, DataFrame):\n if not (values.columns.is_unique and values.index.is_unique):\n raise ValueError(\"ValueError: cannot compute isin with\"\n \" a duplicate axis.\")\n return self.eq(values.reindex_like(self))\n else:\n if not is_list_like(values):\n raise TypeError(\"only list-like or dict-like objects are\"\n \" allowed to be passed to DataFrame.isin(), \"\n \"you passed a \"\n \"{0!r}\".format(type(values).__name__))\n return DataFrame(lib.ismember(self.values.ravel(),\n set(values)).reshape(self.shape),\n self.index,\n self.columns)\n\n #----------------------------------------------------------------------\n # Deprecated stuff\n\n def combineAdd(self, other):\n \"\"\"\n Add two DataFrame objects and do not propagate\n NaN values, so if for a (column, time) one frame is missing a\n value, it will default to the other frame's value (which might\n be NaN as well)\n\n Parameters\n ----------\n other : DataFrame\n\n Returns\n -------\n DataFrame\n \"\"\"\n return self.add(other, fill_value=0.)\n\n def combineMult(self, other):\n \"\"\"\n Multiply two DataFrame objects and do not propagate NaN values, so if\n for a (column, time) one frame is missing a value, it will default to\n the other frame's value (which might be NaN as well)\n\n Parameters\n ----------\n other : DataFrame\n\n Returns\n -------\n DataFrame\n \"\"\"\n return self.mul(other, fill_value=1.)\n\n\nDataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,\n axes_are_reversed=True, aliases={'rows': 0})\nDataFrame._add_numeric_operations()\n\n_EMPTY_SERIES = Series([])\n\ndef _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):\n \"\"\"\n Segregate Series based on type and coerce into matrices.\n Needs to handle a lot of exceptional cases.\n \"\"\"\n # figure out the index, if necessary\n if index is None:\n index = extract_index(arrays)\n else:\n index = _ensure_index(index)\n\n # don't force copy because getting jammed in an ndarray anyway\n arrays = _homogenize(arrays, index, dtype)\n\n # from BlockManager perspective\n axes = [_ensure_index(columns), _ensure_index(index)]\n\n return create_block_manager_from_arrays(arrays, arr_names, axes)\n\n\ndef extract_index(data):\n from pandas.core.index import _union_indexes\n\n index = None\n if len(data) == 0:\n index = Index([])\n elif len(data) > 0:\n raw_lengths = []\n indexes = []\n\n have_raw_arrays = False\n have_series = False\n have_dicts = False\n\n for v in data:\n if isinstance(v, Series):\n have_series = True\n indexes.append(v.index)\n elif isinstance(v, dict):\n have_dicts = True\n indexes.append(list(v.keys()))\n elif is_list_like(v) and getattr(v, 'ndim', 1) == 1:\n have_raw_arrays = True\n raw_lengths.append(len(v))\n\n if not indexes and not raw_lengths:\n raise ValueError('If using all scalar values, you must pass'\n ' an index')\n\n if have_series or have_dicts:\n index = _union_indexes(indexes)\n\n if have_raw_arrays:\n lengths = list(set(raw_lengths))\n if len(lengths) > 1:\n raise ValueError('arrays must all be same length')\n\n if have_dicts:\n raise ValueError('Mixing dicts with non-Series may lead to '\n 'ambiguous ordering.')\n\n if have_series:\n if lengths[0] != len(index):\n msg = ('array length %d does not match index length %d'\n % (lengths[0], len(index)))\n raise ValueError(msg)\n else:\n index = Index(np.arange(lengths[0]))\n\n return _ensure_index(index)\n\n\ndef _prep_ndarray(values, copy=True):\n if not isinstance(values, (np.ndarray, Series, Index)):\n if len(values) == 0:\n return np.empty((0, 0), dtype=object)\n\n def convert(v):\n return com._possibly_convert_platform(v)\n\n # we could have a 1-dim or 2-dim list here\n # this is equiv of np.asarray, but does object conversion\n # and platform dtype preservation\n try:\n if com.is_list_like(values[0]) or hasattr(values[0], 'len'):\n values = np.array([convert(v) for v in values])\n else:\n values = convert(values)\n except:\n values = convert(values)\n\n else:\n\n # drop subclass info, do not copy data\n values = np.asarray(values)\n if copy:\n values = values.copy()\n\n if values.ndim == 1:\n values = values.reshape((values.shape[0], 1))\n elif values.ndim != 2:\n raise ValueError('Must pass 2-d input')\n\n return values\n\n\ndef _to_arrays(data, columns, coerce_float=False, dtype=None):\n \"\"\"\n Return list of arrays, columns\n \"\"\"\n if isinstance(data, DataFrame):\n if columns is not None:\n arrays = [data.icol(i).values for i, col in enumerate(data.columns)\n if col in columns]\n else:\n columns = data.columns\n arrays = [data.icol(i).values for i in range(len(columns))]\n\n return arrays, columns\n\n if not len(data):\n if isinstance(data, np.ndarray):\n columns = data.dtype.names\n if columns is not None:\n return [[]] * len(columns), columns\n return [], [] # columns if columns is not None else []\n if isinstance(data[0], (list, tuple)):\n return _list_to_arrays(data, columns, coerce_float=coerce_float,\n dtype=dtype)\n elif isinstance(data[0], collections.Mapping):\n return _list_of_dict_to_arrays(data, columns,\n coerce_float=coerce_float,\n dtype=dtype)\n elif isinstance(data[0], Series):\n return _list_of_series_to_arrays(data, columns,\n coerce_float=coerce_float,\n dtype=dtype)\n elif isinstance(data[0], Categorical):\n if columns is None:\n columns = _default_index(len(data))\n return data, columns\n elif (isinstance(data, (np.ndarray, Series, Index))\n and data.dtype.names is not None):\n\n columns = list(data.dtype.names)\n arrays = [data[k] for k in columns]\n return arrays, columns\n else:\n # last ditch effort\n data = lmap(tuple, data)\n return _list_to_arrays(data, columns,\n coerce_float=coerce_float,\n dtype=dtype)\n\n\ndef _masked_rec_array_to_mgr(data, index, columns, dtype, copy):\n \"\"\" extract from a masked rec array and create the manager \"\"\"\n\n # essentially process a record array then fill it\n fill_value = data.fill_value\n fdata = ma.getdata(data)\n if index is None:\n index = _get_names_from_index(fdata)\n if index is None:\n index = _default_index(len(data))\n index = _ensure_index(index)\n\n if columns is not None:\n columns = _ensure_index(columns)\n arrays, arr_columns = _to_arrays(fdata, columns)\n\n # fill if needed\n new_arrays = []\n for fv, arr, col in zip(fill_value, arrays, arr_columns):\n mask = ma.getmaskarray(data[col])\n if mask.any():\n arr, fv = _maybe_upcast(arr, fill_value=fv, copy=True)\n arr[mask] = fv\n new_arrays.append(arr)\n\n # create the manager\n arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns)\n if columns is None:\n columns = arr_columns\n\n mgr = _arrays_to_mgr(arrays, arr_columns, index, columns)\n\n if copy:\n mgr = mgr.copy()\n return mgr\n\n\ndef _reorder_arrays(arrays, arr_columns, columns):\n # reorder according to the columns\n if (columns is not None and len(columns) and arr_columns is not None and\n len(arr_columns)):\n indexer = _ensure_index(\n arr_columns).get_indexer(columns)\n arr_columns = _ensure_index(\n [arr_columns[i] for i in indexer])\n arrays = [arrays[i] for i in indexer]\n return arrays, arr_columns\n\n\ndef _list_to_arrays(data, columns, coerce_float=False, dtype=None):\n if len(data) > 0 and isinstance(data[0], tuple):\n content = list(lib.to_object_array_tuples(data).T)\n else:\n # list of lists\n content = list(lib.to_object_array(data).T)\n return _convert_object_array(content, columns, dtype=dtype,\n coerce_float=coerce_float)\n\n\ndef _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):\n from pandas.core.index import _get_combined_index\n\n if columns is None:\n columns = _get_combined_index([\n s.index for s in data if getattr(s, 'index', None) is not None\n ])\n\n indexer_cache = {}\n\n aligned_values = []\n for s in data:\n index = getattr(s, 'index', None)\n if index is None:\n index = _default_index(len(s))\n\n if id(index) in indexer_cache:\n indexer = indexer_cache[id(index)]\n else:\n indexer = indexer_cache[id(index)] = index.get_indexer(columns)\n\n values = _values_from_object(s)\n aligned_values.append(com.take_1d(values, indexer))\n\n values = np.vstack(aligned_values)\n\n if values.dtype == np.object_:\n content = list(values.T)\n return _convert_object_array(content, columns, dtype=dtype,\n coerce_float=coerce_float)\n else:\n return values.T, columns\n\n\ndef _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):\n if columns is None:\n gen = (list(x.keys()) for x in data)\n columns = lib.fast_unique_multiple_list_gen(gen)\n\n # assure that they are of the base dict class and not of derived\n # classes\n data = [(type(d) is dict) and d or dict(d) for d in data]\n\n content = list(lib.dicts_to_array(data, list(columns)).T)\n return _convert_object_array(content, columns, dtype=dtype,\n coerce_float=coerce_float)\n\n\ndef _convert_object_array(content, columns, coerce_float=False, dtype=None):\n if columns is None:\n columns = _default_index(len(content))\n else:\n if len(columns) != len(content): # pragma: no cover\n # caller's responsibility to check for this...\n raise AssertionError('%d columns passed, passed data had %s '\n 'columns' % (len(columns), len(content)))\n\n # provide soft conversion of object dtypes\n def convert(arr):\n if dtype != object and dtype != np.object:\n arr = lib.maybe_convert_objects(arr, try_float=coerce_float)\n arr = com._possibly_cast_to_datetime(arr, dtype)\n return arr\n\n arrays = [ convert(arr) for arr in content ]\n\n return arrays, columns\n\n\ndef _get_names_from_index(data):\n index = lrange(len(data))\n has_some_name = any([getattr(s, 'name', None) is not None for s in data])\n if not has_some_name:\n return index\n\n count = 0\n for i, s in enumerate(data):\n n = getattr(s, 'name', None)\n if n is not None:\n index[i] = n\n else:\n index[i] = 'Unnamed %d' % count\n count += 1\n\n return index\n\n\ndef _homogenize(data, index, dtype=None):\n from pandas.core.series import _sanitize_array\n\n oindex = None\n homogenized = []\n\n for v in data:\n if isinstance(v, Series):\n if dtype is not None:\n v = v.astype(dtype)\n if v.index is not index:\n # Forces alignment. No need to copy data since we\n # are putting it into an ndarray later\n v = v.reindex(index, copy=False)\n else:\n if isinstance(v, dict):\n if oindex is None:\n oindex = index.astype('O')\n if type(v) == dict:\n # fast cython method\n v = lib.fast_multiget(v, oindex.values, default=NA)\n else:\n v = lib.map_infer(oindex.values, v.get)\n\n v = _sanitize_array(v, index, dtype=dtype, copy=False,\n raise_cast_failure=False)\n\n homogenized.append(v)\n\n return homogenized\n\n\ndef _from_nested_dict(data):\n # TODO: this should be seriously cythonized\n new_data = OrderedDict()\n for index, s in compat.iteritems(data):\n for col, v in compat.iteritems(s):\n new_data[col] = new_data.get(col, OrderedDict())\n new_data[col][index] = v\n return new_data\n\n\ndef _put_str(s, space):\n return ('%s' % s)[:space].ljust(space)\n\n\n#----------------------------------------------------------------------\n# Add plotting methods to DataFrame\n\nimport pandas.tools.plotting as gfx\n\nDataFrame.plot = gfx.plot_frame\nDataFrame.hist = gfx.hist_frame\n\n\n@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)\ndef boxplot(self, column=None, by=None, ax=None, fontsize=None,\n rot=0, grid=True, figsize=None, layout=None, return_type=None,\n **kwds):\n import pandas.tools.plotting as plots\n import matplotlib.pyplot as plt\n ax = plots.boxplot(self, column=column, by=by, ax=ax,\n fontsize=fontsize, grid=grid, rot=rot,\n figsize=figsize, layout=layout, return_type=return_type,\n **kwds)\n plt.draw_if_interactive()\n return ax\n\nDataFrame.boxplot = boxplot\n\nops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs)\nops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs)\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n" ]
[ [ "pandas.core.common.is_categorical_dtype", "pandas.compat.lzip", "pandas.core.common._default_index", "pandas.core.common._possibly_infer_to_datetimelike", "pandas.core.reshape.stack", "pandas.compat.OrderedDict", "pandas.compat.raise_with_traceback", "pandas.core.common._infer_dtype_from_scalar", "pandas.computation.eval.eval", "pandas.util.decorators.Appender", "pandas.core.common.is_object_dtype", "pandas.lib.to_object_array_tuples", "pandas.core.common._try_sort", "pandas.core.index.MultiIndex.from_arrays", "pandas.tools.merge.concat", "pandas.core.reshape.stack_multiple", "pandas.core.nanops.nanargmax", "numpy.array", "pandas.core.common._lcd_dtypes", "pandas.lib.map_infer", "pandas.io.parsers.read_table", "pandas.core.groupby._lexsort_indexer", "pandas.core.groupby.get_group_index", "pandas.core.index._union_indexes", "pandas.core.common.in_interactive_session", "numpy.vstack", "pandas.compat.range", "pandas.core.generic.NDFrame.__init__", "pandas.core.common._maybe_upcast", "pandas.computation.expressions.where", "numpy.asarray", "numpy.ma.getdata", "pandas.io.stata.StataWriter", "pandas.core.common._ensure_platform_int", "pandas.io.gbq.to_gbq", "pandas.core.common._maybe_upcast_putmask", "pandas.core.series._sanitize_index", "numpy.ma.getmaskarray", "pandas.compat.StringIO", "pandas.core.common.is_iterator", "pandas.core.index._ensure_index", "pandas.core.index.MultiIndex.from_tuples", "pandas.util.decorators.deprecate", "pandas.core.common.take_1d", "pandas.compat.u", "pandas.core.common._possibly_downcast_to_dtype", "pandas.tools.merge.merge", "pandas.computation.expressions.evaluate", "pandas.core.sparse.SparseDataFrame", "numpy.rec.fromarrays", "pandas.core.nanops.get_corr_func", "pandas.core.common.take_2d_multi", "numpy.empty", "pandas.core.index.Index", "pandas.util.decorators.deprecate_kwarg", "pandas.core.common._unpickle_array", "pandas.core.common._get_info_slice", "pandas.core.internals.create_block_manager_from_blocks", "pandas.compat.iteritems", "pandas.core.common.in_ipython_frontend", "pandas.core.series.Series", "pandas.core.common.needs_i8_conversion", "pandas.core.ops.add_special_arithmetic_methods", "pandas.core.common._possibly_cast_to_datetime", "pandas.core.nanops.nanargmin", "pandas.core.common._ensure_float64", "pandas.core.common.in_qtconsole", "pandas.core.common._ensure_int64", "pandas.core.common.ensure_float", "pandas.core.reshape.pivot", "pandas.core.common.PandasError", "pandas.core.common._possibly_convert_platform", "pandas.core.algorithms.rank", "numpy.cov", "pandas.lib.fast_multiget", "pandas.tools.plotting.boxplot", "numpy.iterable", "pandas.core.format.ExcelFormatter", "pandas.lib.fast_unique_multiple_list_gen", "pandas.core.ops.add_flex_arithmetic_methods", "pandas.io.excel.ExcelWriter", "pandas.core.common._asarray_tuplesafe", "pandas.lib.to_object_array", "pandas.core.internals.create_block_manager_from_arrays", "pandas.core.common.is_bool_indexer", "pandas.core.common._maybe_box_datetimelike", "numpy.percentile", "matplotlib.pyplot.draw_if_interactive", "pandas.compat.zip", "pandas.core.common.pprint_thing", "pandas.core.format.DataFrameFormatter", "pandas.core.series.Series.from_array", "pandas.core.common.is_list_like", "pandas.core.panel.Panel", "numpy.dot", "pandas.lib.reduce", "pandas.core.common.is_sequence", "pandas.lib.maybe_convert_objects", "pandas.core.generic.NDFrame._set_item", "pandas.core.format._put_lines", "pandas.core.common.i8_boxer", "pandas.core.common.notnull", "pandas.core.indexing.check_bool_indexer", "pandas.core.config.get_option", "numpy.empty_like", "numpy.arange", "pandas.compat.lmap", "numpy.apply_along_axis", "pandas.core.indexing.convert_to_index_sliceable", "pandas.core.common._coerce_to_dtypes", "pandas.core.series._sanitize_array", "pandas.hashtable.duplicated_int64", "pandas.core.format.get_console_size", "pandas.util.decorators.Substitution", "numpy.isfinite", "pandas.core.groupby._nargsort", "pandas.core.reshape.unstack", "numpy.compress", "pandas.core.common._values_from_object", "pandas.core.common.isnull", "pandas.core.common.is_datetime64_dtype", "pandas.core.common._invalidate_string_dtypes", "pandas.core.indexing.maybe_droplevels" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "0.19", "0.24", "0.20", "0.25" ], "scipy": [], "tensorflow": [] } ]
AIDefender/MyMBPO
[ "d75699b65af8eea14acffc1b5738900d1079ad46" ]
[ "mbpo/static/reacher.py" ]
[ "import numpy as np\n\nclass StaticFns:\n\n @staticmethod\n def termination_fn(obs, act, next_obs):\n\n done = np.array([False]).repeat(len(obs))\n done = done[:,None]\n return done\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
colizoli/letter_color_mri
[ "f4c4d8a91aa17664bdeb16b0436fc8f8fdac2710" ]
[ "experiment/Behav_Consistency.py" ]
[ "\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nLetter-color Consistency test\nO.Colizoli 2020\nEach letter of the alphabet in random order x 2\nColor wheel opens at a randomized color on each trial (but does not turn)\nPython 2..7\n\"\"\"\n# data saved in ~/LogFiles/sub-XXX\n\n# Import necessary modules\nimport random\nimport numpy as np\nimport pandas as pd\nimport os, time # for paths and data\nfrom IPython import embed as shell\ntry:\n import Tkinter as tk # py27\n from tkColorChooser import askcolor\nexcept:\n import tkinter as tk\n from tkinter.colorchooser import askcolor\n\n\n# Get subject number via tkinter (command line doesn't work in PsychoPy)\nsubject_ID = []\nsession = []\n## INPUT WINDOW\nclass GetInput():\n def __init__(self):\n self.root2 = tk.Tk()\n self.root2.title(\"Subject and Session\")\n # always put in same location\n w = 400 # width for the Tk root\n h = 200 # height for the Tk root\n # get screen width and height\n ws = self.root2.winfo_screenwidth() # width of the screen\n hs = self.root2.winfo_screenheight() # height of the screen\n # calculate x and y coordinates for the Tk root window\n x = (ws/6) - (w/6)\n y = (hs/6) - (h/6)\n self.root2.geometry('%dx%d+%d+%d' % (w, h, x, y))\n # Subject\n self.e = tk.Entry(self.root2)\n self.e.insert(0, 'Subject Number')\n self.e.pack()\n self.e.focus_set()\n # Session\n self.e2 = tk.Entry(self.root2)\n self.e2.insert(0, 'Session')\n self.e2.pack()\n self.e2.focus_set()\n \n txt='If each letter of the alphabet\\\n \\nwere to have a unique color,\\\n \\nwhat color would it have?\\\n \\n\\nThere are no right or wrong answers.'\n # instructions\n self.instr = tk.Label(self.root2, bg='white', text=txt, font=(\"Helvetica\", 14))\n self.instr.pack()\n \n b = tk.Button(self.root2,text='OK',command=self.get_input)\n b.pack(side='bottom')\n \n self.root2.mainloop()\n \n def get_input(self):\n subj_str = self.e.get() \n sess_str = self.e2.get()\n subject_ID.append(subj_str)\n session.append(sess_str)\n self.root2.destroy()\n \n## ASK INPUT\napp = GetInput() # subject and session\nsubject_ID = int(subject_ID[0])\nsession = int(session[0])\n\n## Create LogFile folder cwd/LogFiles\ncwd = os.getcwd()\nlogfile_dir = os.path.join(cwd,'LogFiles','sub-{}'.format(subject_ID),'sess-{}'.format(session),'behav') \nif not os.path.isdir(logfile_dir):\n os.makedirs(logfile_dir)\ntimestr = time.strftime(\"%Y%m%d-%H%M%S\") \noutput_alphabet = os.path.join(logfile_dir,'sub-{}_sess-{}_task-consistency_events_{}.tsv'.format(subject_ID,session,timestr))\n\n### CONSISTENCY TASK ###\nalphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n#alphabet = ['a','b','c']\n\nREPS = 2 # number of times to repeat whole alphabet\n\nRGBS = [] # save output\nL = '2' # place holder \n\nclass Test():\n def __init__(self):\n self.counter = 1\n self.root = tk.Tk()\n self.root.title(\"Subject {} Session {}\".format(subject_ID, session))\n # always put in same location\n # get screen width and height\n ws = self.root.winfo_screenwidth() # width of the screen\n hs = self.root.winfo_screenheight() # height of the screen\n # open in full screen\n self.root.geometry('%dx%d+%d+%d' % (ws, hs, 0, 0))\n self.open1 = tk.Button(self.root, text='Pick a color:', command=self.pick_a_color, font=('Helvetica', '36'),padx=5, pady=5)\n self.open1.pack(fill=tk.X, expand=False) \n self.letter = tk.Label(self.root, bg='white', text=L, font=(\"Helvetica\", 90))\n self.letter.pack()\n self.root.mainloop()\n \n def quit(self):\n RGBS.append( [L ,self.RGB, self.HEX, abc] )\n self.root.destroy()\n \n def pick_a_color(self,): \n # GET COLOR CHOOSER NOT OPEN ON TOP OF ROOT\n self.RGB,self.HEX = askcolor((random.randint(0,255), random.randint(0,255), random.randint(0,255)), parent=None, title='Pick a color: {}'.format(L) )\n self.letter.configure(fg = self.HEX)\n if self.counter:\n exit_button = tk.Button(self.root, text='FINISHED', command=self.quit, font=('Helvetica', '28'))\n exit_button.pack()\n self.counter = 0\n self.root.mainloop()\n\n# MAIN LOOP \nabc = 1 # round\nfor R in np.arange(REPS):\n random.shuffle(alphabet) \n # Open a new GUI per letter \n for L in alphabet: \n app = Test()\n # save colors on each trial to prevent losing data\n \n DFS = pd.DataFrame(RGBS)\n print(RGBS)\n\n try:\n DFS.columns = [\"letter\",\"rgb\",\"hex\",\"choice\"]\n DFS['subject'] = np.repeat(subject_ID,len(DFS))\n DFS['r'] = [c[0] for c in DFS['rgb']]\n DFS['g'] = [c[1] for c in DFS['rgb']]\n DFS['b'] = [c[2] for c in DFS['rgb']]\n except:\n # clicked window away\n pass\n DFS.to_csv(output_alphabet, sep='\\t') # save all alphabet/preferences for both groups (also in case it goes wrong)\n abc+=1\n\n####################################\n## SAVE OUTPUT & determine conditions\nprint(RGBS)\nprint('consistency test - success!')\n\n\n##### OUTPUT FIGURE WITH COLORS #####\n# Sort and show letters x 2 side by side\ndel tk # py27\ndel askcolor\nimport matplotlib.pyplot as plt # doesn't work together with tkinter\nimport seaborn as sns\nfig = plt.figure(figsize=(10,5))\n\n# Sort so the same letters go side by side for each choice\ntry:\n DFS.sort_values(by=['choice', 'letter'],inplace=True)\nexcept:\n DFS = DFS.sort(['choice', 'letter'])\n\nDFS.reset_index(inplace=True)\nfor i,A in enumerate(alphabet):\n ax = fig.add_subplot(6,5,i+1)\n ax.text(0.5, 0.5, DFS['letter'][i], color=DFS['hex'][i],fontsize=18)\n ax.text(0.25, 0.5, DFS['letter'][i+len(alphabet)], color=DFS['hex'][i+len(alphabet)],fontsize=18)\n ax.set_axis_off() \n\nsns.despine(offset=10, trim=True)\nplt.tight_layout()\nfig.savefig(os.path.join(cwd,'LogFiles','sub-{}'.format(subject_ID),'sess-{}'.format(session),'behav','sub-{}_sess-{}_colors.pdf'.format(subject_ID,session)))\nprint('success: sub-{}_sess-{}_colors.pdf'.format(subject_ID,session))\n\n \n \n" ]
[ [ "numpy.arange", "matplotlib.pyplot.tight_layout", "pandas.DataFrame", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
terraPulse/boreal-tcc-analysis
[ "e8a7b4bae727811d03bb57c5738945af7fe2920d", "e8a7b4bae727811d03bb57c5738945af7fe2920d" ]
[ "src/bin/create_esta_layers.py", "src/bin/create_age_layers.py" ]
[ "'''\r\nFile: detect_forest_change.py\r\nAuthor: Min Feng\r\nVersion: 0.1\r\nCreate: 2018-04-20 15:42:37\r\nDescription: detect forest changes from foest probility layers and tree cover layers\r\n'''\r\n\r\nimport logging\r\n\r\ndef _load_tcc(f_tcc, msk):\r\n from gio import geo_raster_ex as gx\r\n from gio import config\r\n import numpy as np\r\n\r\n _bnd = gx.read_block(f_tcc, msk)\r\n if _bnd is None:\r\n return None\r\n \r\n _dat = np.zeros(msk.data.shape, dtype=np.uint8)\r\n\r\n _m_tcc = config.getfloat('conf', 'min_tcc')\r\n _idx = _bnd.data >= _m_tcc\r\n _dat[_idx] = 100\r\n\r\n _idx = _bnd.data > 100\r\n _dat[_idx] = _bnd.data[_idx]\r\n\r\n return msk.from_grid(_dat, nodata=255)\r\n\r\ndef _task(tile, d_out, d_ref, opts):\r\n from gio import file_unzip\r\n from gio import config\r\n from gio import file_mag\r\n from gio import metadata\r\n from gio import geo_raster as ge\r\n from gio import mod_filter\r\n import numpy as np\r\n import os\r\n import re\r\n\r\n _tag = tile.tag\r\n\r\n _ttt = config.get('conf', 'test_tile')\r\n if _ttt and _tag not in _ttt.replace(' ', '').split(','):\r\n return\r\n\r\n _m = re.match(r'(h\\d+)(v\\d+)', _tag)\r\n _h = _m.group(1)\r\n _v = _m.group(2)\r\n \r\n _d_out = os.path.join(d_out, _h, _v, _tag)\r\n _d_ref = os.path.join(d_ref, _h, _v, _tag)\r\n _f_met = os.path.join(_d_out, '%s_met.txt' % _tag)\r\n \r\n _fname = lambda t: os.path.join(_d_out, '%s_%s.tif' % (_tag, t))\r\n _fname_ref = lambda t: os.path.join(_d_ref, '%s_%s.tif' % (_tag, t))\r\n _fname_m1 = lambda t, a='_m1': _fname('%s_n0%s' % (t, a))\r\n\r\n # if not file_mag.get(_f_met).exists():\r\n # logging.info('skip non-existing result for %s' % _tag)\r\n # return\r\n\r\n if not file_mag.get(_fname_m1('loss_year')).exists():\r\n logging.info('skip non-existing result for %s' % _tag)\r\n return\r\n \r\n if (not _ttt) and file_mag.get(_fname_m1('esta_year')).exists() and \\\r\n (not config.getboolean('conf', 'over_write', False)):\r\n logging.info('skip processed esta result for %s' % _tag)\r\n return\r\n \r\n _b_loss_year = ge.open(_fname_m1('loss_year')).get_band().cache()\r\n _b_gain_year = ge.open(_fname_m1('gain_year')).get_band().cache()\r\n \r\n _b_loss_prob = ge.open(_fname_m1('loss_prob')).get_band().cache()\r\n _b_gain_prob = ge.open(_fname_m1('gain_prob')).get_band().cache()\r\n\r\n _f_tcc = config.get('conf', 'latest_tcc')\r\n _b_prob = _load_tcc(_f_tcc, _b_loss_year) if _f_tcc else ge.open(_fname_ref('age_prob')).get_band().cache()\r\n if _b_prob is None:\r\n logging.info('forced to use age_prob layer %s' % _fname_ref('age_prob'))\r\n _b_prob = ge.open(_fname_ref('age_prob')).get_band().cache()\r\n\r\n _d_forest_prob = _b_prob.data\r\n _d_loss = _b_loss_year.data\r\n _d_gain = _b_gain_year.data\r\n\r\n _d_esta = np.zeros(_d_forest_prob.shape, dtype=np.uint8)\r\n \r\n _d_prob = np.empty(_d_forest_prob.shape, dtype=np.float32)\r\n _d_prob.fill(100)\r\n _d_prob[_b_prob.data == _b_prob.nodata] = -9999\r\n \r\n _b_esta = _b_loss_year.from_grid(_d_esta, nodata=255)\r\n _b_esta.color_table = ge.load_colortable(config.get('conf', 'color'))\r\n\r\n _d_esta[_d_forest_prob > 100] = _d_forest_prob[_d_forest_prob > 100]\r\n \r\n for _y in range(1970, 2021):\r\n _y = _y - 1970\r\n \r\n _idx = _d_loss == _y\r\n _d_esta[_idx] = 100\r\n _d_prob[_idx] = _b_loss_prob.data[_idx]\r\n \r\n _idx = _d_gain == _y\r\n _d_esta[_idx] = _y\r\n _d_prob[_idx] = _b_gain_prob.data[_idx]\r\n \r\n _d_esta[_d_forest_prob < 50] = 100\r\n \r\n _d_test = (_d_esta < 100).astype(np.uint8)\r\n _d_test[(_d_esta < 100) & (_d_esta > 0)] = 1\r\n _b_test = _b_esta.from_grid(_d_test, nodata=255)\r\n mod_filter.filter_band_mmu(_b_test, area=config.getfloat('conf', 'min_patch'))\r\n _d_esta[(_d_esta == 100) & (_b_test.data == 1)] = 0\r\n \r\n _d_test = ((_d_esta > 0) & (_d_esta <= 100)).astype(np.uint8)\r\n _d_test[(_d_esta < 100) & (_d_esta > 0)] = 1\r\n _b_test = _b_esta.from_grid(_d_test, nodata=255)\r\n mod_filter.filter_band_mmu(_b_test, area=config.getfloat('conf', 'min_patch'))\r\n _d_esta[(_d_esta == 0) & (_b_test.data == 1)] = 100\r\n \r\n with file_unzip.file_unzip() as _zip:\r\n _zip.save(_b_esta, _fname_m1('esta_year'))\r\n _zip.save(_b_esta.from_grid(_d_prob, nodata=-9999), _fname_m1('esta_prob'))\r\n \r\n return True\r\n\r\ndef main(opts):\r\n import logging\r\n from gio import config\r\n from gio import file_mag\r\n from gio import global_task\r\n import os\r\n \r\n _d_inp = config.get('conf', 'input')\r\n _d_ref = config.get('conf', 'refer', _d_inp)\r\n \r\n _f_mak = file_mag.get(os.path.join(_d_inp, 'tasks.txt'))\r\n _ts = global_task.load(_f_mak)\r\n\r\n from gio import multi_task\r\n _rs = multi_task.run(_task, [(_t, os.path.join(_d_inp, 'data'), os.path.join(_d_ref, 'data'), opts) for _t in multi_task.load(_ts, opts)], opts)\r\n print('processed', len([_r for _r in _rs if _r]), 'tiles')\r\n\r\ndef usage():\r\n _p = environ_mag.usage(True)\r\n\r\n _p.add_argument('-i', '--input', dest='input')\r\n _p.add_argument('-r', '--refer', dest='refer')\r\n _p.add_argument('--latest-tcc', dest='latest_tcc')\r\n _p.add_argument('-w', '--over-write', dest='over_write', type='bool')\r\n _p.add_argument('--min-tcc', dest='min_tcc', type=int, default=30)\r\n _p.add_argument('-m', '--min-patch', dest='min_patch', type=float, default=100 * 100)\r\n _p.add_argument('--test-tile', dest='test_tile')\r\n\r\n return _p\r\n\r\nif __name__ == '__main__':\r\n from gio import environ_mag\r\n environ_mag.init_path()\r\n environ_mag.run(main, [environ_mag.config(usage())])\r\n", "'''\r\nFile: detect_forest_change.py\r\nAuthor: Min Feng\r\nVersion: 0.1\r\nCreate: 2018-04-20 15:42:37\r\nDescription: detect forest changes from foest probility layers and tree cover layers\r\n'''\r\n\r\nimport logging\r\n\r\ndef _load_tcc(f_tcc, msk):\r\n from gio import geo_raster_ex as gx\r\n from gio import config\r\n import numpy as np\r\n\r\n _bnd = gx.read_block(f_tcc, msk)\r\n if _bnd is None:\r\n return None\r\n \r\n _dat = np.zeros(msk.data.shape, dtype=np.uint8)\r\n\r\n _m_tcc = config.getfloat('conf', 'min_tcc')\r\n _idx = _bnd.data >= _m_tcc\r\n _dat[_idx] = 100\r\n\r\n _idx = _bnd.data > 100\r\n _dat[_idx] = _bnd.data[_idx]\r\n\r\n return msk.from_grid(_dat, nodata=255)\r\n\r\ndef _task(tile, d_out, d_ref, opts):\r\n from gio import file_unzip\r\n from gio import config\r\n from gio import file_mag\r\n from gio import metadata\r\n from gio import geo_raster as ge\r\n from gio import geo_raster_ex as gx\r\n from gio import mod_filter\r\n import numpy as np\r\n import os\r\n import re\r\n\r\n _tag = tile.tag\r\n\r\n _ttt = config.get('conf', 'test_tile')\r\n if _ttt and _tag not in _ttt.replace(' ', '').split(','):\r\n return\r\n\r\n _m = re.match(r'(h\\d+)(v\\d+)', _tag)\r\n _h = _m.group(1)\r\n _v = _m.group(2)\r\n \r\n _d_out = os.path.join(d_out, _h, _v, _tag)\r\n _d_ref = os.path.join(d_ref, _h, _v, _tag)\r\n _f_met = os.path.join(_d_out, '%s_met.txt' % _tag)\r\n \r\n _fname = lambda t: os.path.join(_d_out, '%s_%s.tif' % (_tag, t))\r\n _fname_m1 = lambda t, a='_m1': _fname('%s_n0%s' % (t, a))\r\n\r\n # if not file_mag.get(_f_met).exists():\r\n # logging.info('skip non-existing result for %s' % _tag)\r\n # return\r\n\r\n if not file_mag.get(_fname_m1('esta_year')).exists():\r\n logging.info('skip non-existing result for %s' % _tag)\r\n return\r\n \r\n if (not _ttt) and file_mag.get(_fname_m1('age_year')).exists() and \\\r\n (not config.getboolean('conf', 'over_write', False)):\r\n logging.info('skip processed esta result for %s' % _tag)\r\n return\r\n \r\n _b_esta_year = ge.open(_fname_m1('esta_year')).get_band().cache()\r\n # _b_esta_prob = ge.open(_fname_m1('esta_prob')).get_band().cache()\r\n\r\n _latest_year = config.getint('conf', 'latest_year')\r\n \r\n _est = _b_esta_year.data\r\n _dat = np.zeros((_b_esta_year.height, _b_esta_year.width), dtype=np.uint8)\r\n _idx = _est < 100\r\n _dat[_idx] = (_latest_year - 1970 - _est[_idx])\r\n \r\n _b_age = _b_esta_year.from_grid(_dat, nodata=255)\r\n \r\n _f_lnd = 's3://geo-dataset/data/land/list.shp'\r\n _b_lnd = gx.read_block(_f_lnd, _b_esta_year)\r\n _b_age.data[_b_lnd.data != 1] = _b_age.nodata\r\n _b_age.color_table = ge.load_colortable(config.get('conf', 'color'))\r\n \r\n with file_unzip.file_unzip() as _zip:\r\n _zip.save(_b_age, _fname_m1('age_year'))\r\n \r\n return True\r\n\r\ndef main(opts):\r\n import logging\r\n from gio import config\r\n from gio import file_mag\r\n from gio import global_task\r\n import os\r\n \r\n _d_inp = config.get('conf', 'input')\r\n _d_ref = config.get('conf', 'refer', _d_inp)\r\n \r\n _f_mak = file_mag.get(os.path.join(_d_inp, 'tasks.txt'))\r\n _ts = global_task.load(_f_mak)\r\n\r\n from gio import multi_task\r\n _rs = multi_task.run(_task, [(_t, os.path.join(_d_inp, 'data'), os.path.join(_d_ref, 'data'), opts) for _t in multi_task.load(_ts, opts)], opts)\r\n print('processed', len([_r for _r in _rs if _r]), 'tiles')\r\n\r\ndef usage():\r\n _p = environ_mag.usage(True)\r\n\r\n _p.add_argument('-i', '--input', dest='input')\r\n _p.add_argument('-y', '--latest-year', dest='latest_year', type=int, required=True)\r\n _p.add_argument('-w', '--over-write', dest='over_write', type='bool')\r\n _p.add_argument('--test-tile', dest='test_tile')\r\n\r\n return _p\r\n\r\nif __name__ == '__main__':\r\n from gio import environ_mag\r\n environ_mag.init_path()\r\n environ_mag.run(main, [environ_mag.config(usage())])\r\n" ]
[ [ "numpy.zeros", "numpy.empty" ], [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DonaldWhyte/high-performance-data-processing-in-python
[ "f7f8076ff67d53be09e1d2f9988976e31b92f8e9", "f7f8076ff67d53be09e1d2f9988976e31b92f8e9" ]
[ "code/rolling_tests.py", "code/oversample.py" ]
[ "import numpy as np\n\n\ndef _main():\n # Inputs\n n = 3\n x = np.arange(20, dtype=np.float64)\n\n # Slow average/std\n avg = np.zeros(len(x) - n + 1)\n std = np.zeros(len(x) - n + 1)\n for i in range(len(avg)):\n avg[i] = np.mean(x[i:i+n])\n std[i] = np.std(x[i:i+n])\n\n print('AVG')\n print('\\n'.join(str(x) for x in avg))\n print('STD:')\n print('\\n'.join(str(x) for x in std))\n\n # Fast std\n squares = np.square(x)\n sum_of_squares = np.convolve(squares, np.ones(n, dtype=int), 'valid')\n var_fast = (sum_of_squares / n) - np.square(avg)\n std_fast = np.sqrt(var_fast)\n\n print('STD FAST:')\n print('\\n'.join(str(x) for x in std_fast))\n\n\nif __name__ == '__main__':\n _main()\n", "\"\"\"\nTODO: explain\n\"\"\"\n\nimport argparse\nimport datetime\nimport csv\nfrom typing import List, Tuple\n\nimport pandas as pd\n\n\n_NON_MEASUREMENT_COLUMNS = {\n 'STATION', 'STATION_NAME', 'ELEVATION', 'LATITUDE', 'LONGITUDE', 'DATE'\n #'STATION', 'DATE', 'SOURCE', 'LATITUDE', 'LONGITUDE', 'ELEVATION', 'NAME',\n #'REPORT_TYPE', 'CALL_SIGN', 'QUALITY_CONTROL'\n}\n\ndef _main():\n args = _parse_args()\n step_size = {\n '1sec': 1,\n '5sec': 5,\n '10sec': 10,\n '30sec': 30,\n '1min': 60,\n '5min': 60 * 5,\n '10min': 60 * 10,\n '30min': 60 * 30\n }[args.granularity]\n\n df = pd.read_csv(args.input)\n df.sort_values(['STATION', 'DATE'], inplace=True)\n float_cols = list(df.select_dtypes(include=['float']).columns)\n rows = df.to_dict(orient='record')\n\n with open(args.output, 'wt') as f:\n writer = csv.DictWriter(f, fieldnames=rows[0].keys())\n writer.writeheader()\n for a, b in zip(rows[:-1], rows[1:]):\n writer.writerow(a)\n if _primary_key(a) == _primary_key(b):\n a_dt = _to_dt(a['DATE'])\n b_dt = _to_dt(b['DATE'])\n\n # Based on the input granularity/step size, determine how many\n # samples to add between point a and b,\n n_steps = int((b_dt - a_dt).total_seconds() / step_size)\n interpolated_values = {\n col: _interpolate(float(a[col]), float(b[col]), n_steps)\n for col in a.keys() if col in float_cols\n }\n for step in range(1, n_steps):\n row = {\n col: val for col, val in a.items()\n if col not in float_cols\n }\n row['DATE'] = (\n (a_dt + datetime.timedelta(seconds=(step * step_size)))\n .strftime(_DT_FORMAT))\n row.update({\n col: f'{interpolated_values[col][step]:.4f}'\n for col in a.keys()\n if col in float_cols\n })\n writer.writerow(row)\n\n # Need to write the final row of the input CSV after the interpolation\n # loop.\n writer.writerow(b)\n\n\ndef _parse_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-i', '--input',\n required=True,\n help='path to input file to oversample')\n parser.add_argument(\n '-o', '--output',\n required=True,\n help='path of oversampled file to generate')\n parser.add_argument(\n '-g', '--granularity',\n required=True,\n choices=('1min', '10min'),\n help='granularity of oversampling. This determines how many extra '\n 'samples will be generated between two time points.')\n return parser.parse_args()\n\n\n\ndef _primary_key(row: dict) -> Tuple[str, str]:\n return row['STATION']\n\n\ndef _to_dt(val: str) -> datetime.datetime:\n return datetime.datetime.strptime(val, _DT_FORMAT)\n\n\n_DT_FORMAT = '%Y-%m-%dT%H:%M%S'\n\n\ndef _interpolate(start: float, end: float, n_steps: int) -> List[float]:\n step_size = (end - start) / n_steps\n return [start + step_size * step for step in range(n_steps)]\n\n\nif __name__ == '__main__':\n _main()\n" ]
[ [ "numpy.square", "numpy.sqrt", "numpy.arange", "numpy.ones", "numpy.std", "numpy.mean" ], [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
INK-USC/RiddleSense
[ "a3d57eaf084da9cf6b77692c608e2cd2870fbd97", "a3d57eaf084da9cf6b77692c608e2cd2870fbd97", "a3d57eaf084da9cf6b77692c608e2cd2870fbd97", "a3d57eaf084da9cf6b77692c608e2cd2870fbd97", "a3d57eaf084da9cf6b77692c608e2cd2870fbd97", "a3d57eaf084da9cf6b77692c608e2cd2870fbd97", "a3d57eaf084da9cf6b77692c608e2cd2870fbd97", "a3d57eaf084da9cf6b77692c608e2cd2870fbd97", "a3d57eaf084da9cf6b77692c608e2cd2870fbd97" ]
[ "methods/transformers/examples/deebert/src/modeling_highway_bert.py", "methods/t5/predict.py", "methods/transformers/templates/adding_a_new_model/modeling_tf_xxx.py", "methods/transformers/examples/lxmert/extracting_data.py", "methods/transformers/src/transformers/modeling_roberta.py", "methods/transformers/src/transformers/modeling_tf_openai.py", "methods/transformers/examples/seq2seq/finetune.py", "methods/transformers/src/transformers/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py", "methods/transformers/examples/distillation/scripts/extract_distilbert.py" ]
[ "import torch\r\nfrom torch import nn\r\nfrom torch.nn import CrossEntropyLoss, MSELoss\r\n\r\nfrom transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward\r\nfrom transformers.modeling_bert import (\r\n BERT_INPUTS_DOCSTRING,\r\n BERT_START_DOCSTRING,\r\n BertEmbeddings,\r\n BertLayer,\r\n BertPooler,\r\n BertPreTrainedModel,\r\n)\r\n\r\n\r\ndef entropy(x):\r\n \"\"\"Calculate entropy of a pre-softmax logit Tensor\"\"\"\r\n exp_x = torch.exp(x)\r\n A = torch.sum(exp_x, dim=1) # sum of exp(x_i)\r\n B = torch.sum(x * exp_x, dim=1) # sum of x_i * exp(x_i)\r\n return torch.log(A) - B / A\r\n\r\n\r\nclass DeeBertEncoder(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.output_attentions = config.output_attentions\r\n self.output_hidden_states = config.output_hidden_states\r\n self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])\r\n self.highway = nn.ModuleList([BertHighway(config) for _ in range(config.num_hidden_layers)])\r\n\r\n self.early_exit_entropy = [-1 for _ in range(config.num_hidden_layers)]\r\n\r\n def set_early_exit_entropy(self, x):\r\n if (type(x) is float) or (type(x) is int):\r\n for i in range(len(self.early_exit_entropy)):\r\n self.early_exit_entropy[i] = x\r\n else:\r\n self.early_exit_entropy = x\r\n\r\n def init_highway_pooler(self, pooler):\r\n loaded_model = pooler.state_dict()\r\n for highway in self.highway:\r\n for name, param in highway.pooler.state_dict().items():\r\n param.copy_(loaded_model[name])\r\n\r\n def forward(\r\n self,\r\n hidden_states,\r\n attention_mask=None,\r\n head_mask=None,\r\n encoder_hidden_states=None,\r\n encoder_attention_mask=None,\r\n ):\r\n all_hidden_states = ()\r\n all_attentions = ()\r\n all_highway_exits = ()\r\n for i, layer_module in enumerate(self.layer):\r\n if self.output_hidden_states:\r\n all_hidden_states = all_hidden_states + (hidden_states,)\r\n\r\n layer_outputs = layer_module(\r\n hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask\r\n )\r\n hidden_states = layer_outputs[0]\r\n\r\n if self.output_attentions:\r\n all_attentions = all_attentions + (layer_outputs[1],)\r\n\r\n current_outputs = (hidden_states,)\r\n if self.output_hidden_states:\r\n current_outputs = current_outputs + (all_hidden_states,)\r\n if self.output_attentions:\r\n current_outputs = current_outputs + (all_attentions,)\r\n\r\n highway_exit = self.highway[i](current_outputs)\r\n # logits, pooled_output\r\n\r\n if not self.training:\r\n highway_logits = highway_exit[0]\r\n highway_entropy = entropy(highway_logits)\r\n highway_exit = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy\r\n all_highway_exits = all_highway_exits + (highway_exit,)\r\n\r\n if highway_entropy < self.early_exit_entropy[i]:\r\n new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)\r\n raise HighwayException(new_output, i + 1)\r\n else:\r\n all_highway_exits = all_highway_exits + (highway_exit,)\r\n\r\n # Add last layer\r\n if self.output_hidden_states:\r\n all_hidden_states = all_hidden_states + (hidden_states,)\r\n\r\n outputs = (hidden_states,)\r\n if self.output_hidden_states:\r\n outputs = outputs + (all_hidden_states,)\r\n if self.output_attentions:\r\n outputs = outputs + (all_attentions,)\r\n\r\n outputs = outputs + (all_highway_exits,)\r\n return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits\r\n\r\n\r\n@add_start_docstrings(\r\n \"The Bert Model transformer with early exiting (DeeBERT). \",\r\n BERT_START_DOCSTRING,\r\n)\r\nclass DeeBertModel(BertPreTrainedModel):\r\n def __init__(self, config):\r\n super().__init__(config)\r\n self.config = config\r\n\r\n self.embeddings = BertEmbeddings(config)\r\n self.encoder = DeeBertEncoder(config)\r\n self.pooler = BertPooler(config)\r\n\r\n self.init_weights()\r\n\r\n def init_highway_pooler(self):\r\n self.encoder.init_highway_pooler(self.pooler)\r\n\r\n def get_input_embeddings(self):\r\n return self.embeddings.word_embeddings\r\n\r\n def set_input_embeddings(self, value):\r\n self.embeddings.word_embeddings = value\r\n\r\n def _prune_heads(self, heads_to_prune):\r\n \"\"\"Prunes heads of the model.\r\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\r\n See base class PreTrainedModel\r\n \"\"\"\r\n for layer, heads in heads_to_prune.items():\r\n self.encoder.layer[layer].attention.prune_heads(heads)\r\n\r\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING)\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n encoder_hidden_states=None,\r\n encoder_attention_mask=None,\r\n ):\r\n r\"\"\"\r\n Return:\r\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:\r\n last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\r\n Sequence of hidden-states at the output of the last layer of the model.\r\n pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):\r\n Last layer hidden-state of the first token of the sequence (classification token)\r\n further processed by a Linear layer and a Tanh activation function. The Linear\r\n layer weights are trained from the next sentence prediction (classification)\r\n objective during pre-training.\r\n\r\n This output is usually *not* a good summary\r\n of the semantic content of the input, you're often better with averaging or pooling\r\n the sequence of hidden-states for the whole input sequence.\r\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\r\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\r\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\r\n\r\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\r\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\r\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\r\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\r\n\r\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\r\n heads.\r\n highway_exits (:obj:`tuple(tuple(torch.Tensor))`:\r\n Tuple of each early exit's results (total length: number of layers)\r\n Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states.\r\n \"\"\"\r\n if input_ids is not None and inputs_embeds is not None:\r\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\r\n elif input_ids is not None:\r\n input_shape = input_ids.size()\r\n elif inputs_embeds is not None:\r\n input_shape = inputs_embeds.size()[:-1]\r\n else:\r\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\r\n\r\n device = input_ids.device if input_ids is not None else inputs_embeds.device\r\n\r\n if attention_mask is None:\r\n attention_mask = torch.ones(input_shape, device=device)\r\n if encoder_attention_mask is None:\r\n encoder_attention_mask = torch.ones(input_shape, device=device)\r\n if token_type_ids is None:\r\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\r\n\r\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\r\n # ourselves in which case we just need to make it broadcastable to all heads.\r\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)\r\n\r\n # If a 2D ou 3D attention mask is provided for the cross-attention\r\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\r\n if encoder_attention_mask.dim() == 3:\r\n encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]\r\n if encoder_attention_mask.dim() == 2:\r\n encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]\r\n\r\n encoder_extended_attention_mask = encoder_extended_attention_mask.to(\r\n dtype=next(self.parameters()).dtype\r\n ) # fp16 compatibility\r\n encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0\r\n\r\n # Prepare head mask if needed\r\n # 1.0 in head_mask indicate we keep the head\r\n # attention_probs has shape bsz x n_heads x N x N\r\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\r\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\r\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\r\n\r\n embedding_output = self.embeddings(\r\n input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds\r\n )\r\n encoder_outputs = self.encoder(\r\n embedding_output,\r\n attention_mask=extended_attention_mask,\r\n head_mask=head_mask,\r\n encoder_hidden_states=encoder_hidden_states,\r\n encoder_attention_mask=encoder_extended_attention_mask,\r\n )\r\n sequence_output = encoder_outputs[0]\r\n pooled_output = self.pooler(sequence_output)\r\n\r\n outputs = (sequence_output, pooled_output,) + encoder_outputs[\r\n 1:\r\n ] # add hidden_states and attentions if they are here\r\n return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits\r\n\r\n\r\nclass HighwayException(Exception):\r\n def __init__(self, message, exit_layer):\r\n self.message = message\r\n self.exit_layer = exit_layer # start from 1!\r\n\r\n\r\nclass BertHighway(nn.Module):\r\n \"\"\"A module to provide a shortcut\r\n from (the output of one non-final BertLayer in BertEncoder) to (cross-entropy computation in BertForSequenceClassification)\r\n \"\"\"\r\n\r\n def __init__(self, config):\r\n super().__init__()\r\n self.pooler = BertPooler(config)\r\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\r\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\r\n\r\n def forward(self, encoder_outputs):\r\n # Pooler\r\n pooler_input = encoder_outputs[0]\r\n pooler_output = self.pooler(pooler_input)\r\n # \"return\" pooler_output\r\n\r\n # BertModel\r\n bmodel_output = (pooler_input, pooler_output) + encoder_outputs[1:]\r\n # \"return\" bmodel_output\r\n\r\n # Dropout and classification\r\n pooled_output = bmodel_output[1]\r\n\r\n pooled_output = self.dropout(pooled_output)\r\n logits = self.classifier(pooled_output)\r\n\r\n return logits, pooled_output\r\n\r\n\r\n@add_start_docstrings(\r\n \"\"\"Bert Model (with early exiting - DeeBERT) with a classifier on top,\r\n also takes care of multi-layer training. \"\"\",\r\n BERT_START_DOCSTRING,\r\n)\r\nclass DeeBertForSequenceClassification(BertPreTrainedModel):\r\n def __init__(self, config):\r\n super().__init__(config)\r\n self.num_labels = config.num_labels\r\n self.num_layers = config.num_hidden_layers\r\n\r\n self.bert = DeeBertModel(config)\r\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\r\n self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)\r\n\r\n self.init_weights()\r\n\r\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING)\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n labels=None,\r\n output_layer=-1,\r\n train_highway=False,\r\n ):\r\n r\"\"\"\r\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\r\n Labels for computing the sequence classification/regression loss.\r\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\r\n If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\r\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\r\n\r\n Returns:\r\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:\r\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):\r\n Classification (or regression if config.num_labels==1) loss.\r\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):\r\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\r\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\r\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\r\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\r\n\r\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\r\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\r\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\r\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\r\n\r\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\r\n heads.\r\n highway_exits (:obj:`tuple(tuple(torch.Tensor))`:\r\n Tuple of each early exit's results (total length: number of layers)\r\n Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states.\r\n \"\"\"\r\n\r\n exit_layer = self.num_layers\r\n try:\r\n outputs = self.bert(\r\n input_ids,\r\n attention_mask=attention_mask,\r\n token_type_ids=token_type_ids,\r\n position_ids=position_ids,\r\n head_mask=head_mask,\r\n inputs_embeds=inputs_embeds,\r\n )\r\n # sequence_output, pooled_output, (hidden_states), (attentions), highway exits\r\n\r\n pooled_output = outputs[1]\r\n\r\n pooled_output = self.dropout(pooled_output)\r\n logits = self.classifier(pooled_output)\r\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\r\n except HighwayException as e:\r\n outputs = e.message\r\n exit_layer = e.exit_layer\r\n logits = outputs[0]\r\n\r\n if not self.training:\r\n original_entropy = entropy(logits)\r\n highway_entropy = []\r\n highway_logits_all = []\r\n if labels is not None:\r\n if self.num_labels == 1:\r\n # We are doing regression\r\n loss_fct = MSELoss()\r\n loss = loss_fct(logits.view(-1), labels.view(-1))\r\n else:\r\n loss_fct = CrossEntropyLoss()\r\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\r\n\r\n # work with highway exits\r\n highway_losses = []\r\n for highway_exit in outputs[-1]:\r\n highway_logits = highway_exit[0]\r\n if not self.training:\r\n highway_logits_all.append(highway_logits)\r\n highway_entropy.append(highway_exit[2])\r\n if self.num_labels == 1:\r\n # We are doing regression\r\n loss_fct = MSELoss()\r\n highway_loss = loss_fct(highway_logits.view(-1), labels.view(-1))\r\n else:\r\n loss_fct = CrossEntropyLoss()\r\n highway_loss = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1))\r\n highway_losses.append(highway_loss)\r\n\r\n if train_highway:\r\n outputs = (sum(highway_losses[:-1]),) + outputs\r\n # exclude the final highway, of course\r\n else:\r\n outputs = (loss,) + outputs\r\n if not self.training:\r\n outputs = outputs + ((original_entropy, highway_entropy), exit_layer)\r\n if output_layer >= 0:\r\n outputs = (\r\n (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]\r\n ) # use the highway of the last layer\r\n\r\n return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)\r\n", "import torch\r\nimport csv\r\nimport argparse\r\nfrom trainer import *\r\nfrom tqdm import tqdm\r\nimport random\r\nimport numpy as np\r\nimport os\r\nimport re\r\nimport glob\r\nfrom transformers import (\r\n AdamW,\r\n T5ForConditionalGeneration,\r\n T5Tokenizer,\r\n get_linear_schedule_with_warmup,\r\n AutoTokenizer\r\n)\r\nfrom dataset import RiddleSenseProcessor\r\n\r\ndef set_seed(seed):\r\n random.seed(seed)\r\n np.random.seed(seed)\r\n torch.manual_seed(seed)\r\n if torch.cuda.is_available():\r\n torch.cuda.manual_seed_all(seed)\r\n\r\ndef extractValLoss(checkpoint_path):\r\n \"\"\"Eg checkpoint path format: path_to_dir/checkpoint_epoch=4-val_loss=0.450662.ckpt\"\"\"\r\n val_loss = float(re.search('val_loss=(.+?).ckpt', checkpoint_path).group(1))\r\n return val_loss\r\n\r\ndef extractStepOREpochNum(checkpoint_path):\r\n \"\"\"Eg checkpoint path format: path_to_dir/checkpoint_epoch=4.ckpt (or)\r\n path_to_dir/checkpoint_epoch=4-step=50.ckpt (or)\r\n \"\"\"\r\n if \"step\" in checkpoint_path:\r\n num = int(re.search('step=(.+?).ckpt', checkpoint_path).group(1))\r\n else:\r\n num = int(re.search('epoch=(.+?).ckpt', checkpoint_path).group(1))\r\n return num\r\n\r\ndef getBestModelCheckpointPath(checkpoint_dir):\r\n checkpoint_list = glob.glob(os.path.join(checkpoint_dir, \"checkpoint_*.ckpt\"))\r\n\r\n try:\r\n # Get the checkpoint with lowest validation loss\r\n sorted_list = sorted(checkpoint_list, key=lambda x: extractValLoss(x.split(\"/\")[-1]))\r\n except:\r\n # If validation loss is not present, get the checkpoint with highest step number or epoch number.\r\n sorted_list = sorted(checkpoint_list, key=lambda x: extractStepOREpochNum(x.split(\"/\")[-1]), reverse=True)\r\n\r\n return sorted_list[0]\r\n\r\ndef run():\r\n #torch.multiprocessing.freeze_support()\r\n set_seed(42)\r\n\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('--data_dir', type=str, default=\"../data_dir/riddle_sense\",\r\n help='Path for Data files')\r\n parser.add_argument('--output_dir', type=str, default=\"\",\r\n help='Path to save the checkpoints')\r\n parser.add_argument('--checkpoint_dir', type=str, default=\"\",\r\n help='Checkpoint directory')\r\n parser.add_argument('--tokenizer_name_or_path', type=str, default=\"\",\r\n help='Tokenizer name or Path')\r\n parser.add_argument('--max_seq_length', type=int, default=128,\r\n help='Maximum Sequence Length')\r\n parser.add_argument('--eval_batch_size', type=int, default=8,\r\n help='Batch size for Evaluation')\r\n\r\n args = parser.parse_known_args()[0]\r\n print(args)\r\n\r\n # Create a folder if output_dir doesn't exists:\r\n if not os.path.exists(args.output_dir):\r\n os.makedirs(args.output_dir)\r\n print(\"Creating output directory\")\r\n\r\n if args.checkpoint_dir == \"\":\r\n model_name = args.model_name_or_path #\"allenai/t5-t5-3b\" # you can specify the model size here\r\n tokenizer = AutoTokenizer.from_pretrained(model_name)\r\n model = T5ForConditionalGeneration.from_pretrained(model_name)\r\n else:\r\n best_checkpoint_path = getBestModelCheckpointPath(args.checkpoint_dir)\r\n print(\"Using checkpoint = \", str(best_checkpoint_path))\r\n tokenizer = T5Tokenizer.from_pretrained(args.tokenizer_name_or_path)\r\n model = T5FineTuner.load_from_checkpoint(best_checkpoint_path).model\r\n\r\n proc = RiddleSenseProcessor()\r\n def chunks(lst, n):\r\n for i in range(0, len(lst), n):\r\n yield lst[i : i + n]\r\n\r\n dev_csvfile = open(os.path.join(args.output_dir, 'dev.csv'),'w')\r\n dev_writer = csv.writer(dev_csvfile)\r\n dev_examples = proc.get_dev_examples(args.data_dir)\r\n\r\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\n print(device)\r\n model.to(device)\r\n\r\n for batch in tqdm(list(chunks(dev_examples, args.eval_batch_size))):\r\n batch_question = [b.question for b in batch]\r\n options = [['(%s) %s' % (i, option) for i, option in zip(['A','B','C','D','E'], b.endings)] for b in batch]\r\n options = [\" \".join(opts) for opts in options]\r\n\r\n inputs = []\r\n for question, option in zip(batch_question, options):\r\n inputs.append(\"%s \\\\n %s\" % (question, option))\r\n dct = tokenizer.batch_encode_plus(inputs, max_length=args.max_seq_length, return_tensors=\"pt\", padding=True, truncation=True)\r\n outs = model.generate(input_ids=dct['input_ids'].to(device),\r\n attention_mask=dct['attention_mask'].to(device))\r\n\r\n dec = [tokenizer.decode(x, skip_special_tokens=True, clean_up_tokenization_spaces=True) for x in outs]\r\n ids = [b.example_id for b in batch]\r\n\r\n for i, d in zip(ids, dec):\r\n dev_writer.writerow([i,d])\r\n\r\n test_csvfile = open(os.path.join(args.output_dir, 'test.csv'),'w')\r\n test_writer = csv.writer(test_csvfile)\r\n test_examples = proc.get_test_examples(args.data_dir, num_choices=5)\r\n\r\n for batch in tqdm(list(chunks(test_examples, args.eval_batch_size))):\r\n batch_question = [b.question for b in batch]\r\n options = [['(%s) %s' % (i, option) for i, option in zip(['A','B','C','D','E'], b.endings)] for b in batch]\r\n options = [\" \".join(opts) for opts in options]\r\n\r\n inputs = []\r\n for question, option in zip(batch_question, options):\r\n inputs.append(\"%s \\\\n %s\" % (question, option))\r\n\r\n dct = tokenizer.batch_encode_plus(inputs, max_length=args.max_seq_length, return_tensors=\"pt\", padding=True, truncation=True)\r\n outs = model.generate(input_ids=dct['input_ids'].to(device),\r\n attention_mask=dct['attention_mask'].to(device))\r\n\r\n dec = [tokenizer.decode(x, skip_special_tokens=True, clean_up_tokenization_spaces=True) for x in outs]\r\n ids = [b.example_id for b in batch]\r\n\r\n for i, d in zip(ids, dec):\r\n test_writer.writerow([i,d])\r\n\r\n\r\nif __name__ == '__main__':\r\n run()\r\n", "# coding=utf-8\r\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\r\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\" TF 2.0 XXX model. \"\"\"\r\n\r\n####################################################\r\n# In this template, replace all the XXX (various casings) with your model name\r\n####################################################\r\n\r\nimport tensorflow as tf\r\n\r\nfrom .configuration_xxx import XxxConfig\r\nfrom .file_utils import (\r\n MULTIPLE_CHOICE_DUMMY_INPUTS,\r\n add_code_sample_docstrings,\r\n add_start_docstrings,\r\n add_start_docstrings_to_model_forward,\r\n)\r\nfrom .modeling_tf_outputs import (\r\n TFBaseModelOutputWithPooling,\r\n TFMaskedLMOutput,\r\n TFMultipleChoiceModelOutput,\r\n TFQuestionAnsweringModelOutput,\r\n TFSequenceClassifierOutput,\r\n TFTokenClassifierOutput,\r\n)\r\nfrom .modeling_tf_utils import (\r\n TFMaskedLanguageModelingLoss,\r\n TFMultipleChoiceLoss,\r\n TFPreTrainedModel,\r\n TFQuestionAnsweringLoss,\r\n TFSequenceClassificationLoss,\r\n TFTokenClassificationLoss,\r\n get_initializer,\r\n keras_serializable,\r\n shape_list,\r\n)\r\nfrom .tokenization_utils import BatchEncoding\r\nfrom .utils import logging\r\n\r\n\r\nlogger = logging.get_logger(__name__)\r\n\r\n_CONFIG_FOR_DOC = \"XXXConfig\"\r\n_TOKENIZER_FOR_DOC = \"XxxTokenizer\"\r\n\r\n####################################################\r\n# This list contrains shortcut names for some of\r\n# the pretrained weights provided with the models\r\n####################################################\r\nTF_XXX_PRETRAINED_MODEL_ARCHIVE_LIST = [\r\n \"xxx-base-uncased\",\r\n \"xxx-large-uncased\",\r\n]\r\n\r\n\r\n####################################################\r\n# TF 2.0 Models are constructed using Keras imperative API by sub-classing\r\n# - tf.keras.layers.Layer for the layers and\r\n# - TFPreTrainedModel for the models (itself a sub-class of tf.keras.Model)\r\n####################################################\r\n\r\n####################################################\r\n# Here is an example of typical layer in a TF 2.0 model of the library\r\n# The classes are usually identical to the PyTorch ones and prefixed with 'TF'.\r\n#\r\n# Note that class __init__ parameters includes **kwargs (send to 'super').\r\n# This let us have a control on class scope and variable names:\r\n# More precisely, we set the names of the class attributes (lower level layers) to\r\n# to the equivalent attributes names in the PyTorch model so we can have equivalent\r\n# class and scope structure between PyTorch and TF 2.0 models and easily load one in the other.\r\n#\r\n# See the conversion methods in modeling_tf_pytorch_utils.py for more details\r\n####################################################\r\n\r\nTFXxxAttention = tf.keras.layers.Layer\r\n\r\nTFXxxIntermediate = tf.keras.layers.Layer\r\n\r\nTFXxxOutput = tf.keras.layers.Layer\r\n\r\n\r\nclass TFXxxLayer(tf.keras.layers.Layer):\r\n def __init__(self, config, **kwargs):\r\n super().__init__(**kwargs)\r\n self.attention = TFXxxAttention(config, name=\"attention\")\r\n self.intermediate = TFXxxIntermediate(config, name=\"intermediate\")\r\n self.transformer_output = TFXxxOutput(config, name=\"output\")\r\n\r\n def call(self, inputs, training=False):\r\n hidden_states, attention_mask, head_mask = inputs\r\n\r\n attention_outputs = self.attention([hidden_states, attention_mask, head_mask], training=training)\r\n attention_output = attention_outputs[0]\r\n intermediate_output = self.intermediate(attention_output)\r\n layer_output = self.transformer_output([intermediate_output, attention_output], training=training)\r\n outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them\r\n return outputs\r\n\r\n\r\n####################################################\r\n# The full model without a specific pretrained or finetuning head is\r\n# provided as a tf.keras.layers.Layer usually called \"TFXxxMainLayer\"\r\n####################################################\r\n@keras_serializable\r\nclass TFXxxMainLayer(tf.keras.layers.Layer):\r\n def __init__(self, config, **kwargs):\r\n super().__init__(**kwargs)\r\n\r\n def get_input_embeddings(self):\r\n return self.embeddings\r\n\r\n def set_input_embeddings(self, value):\r\n self.embeddings.word_embeddings = value\r\n self.embeddings.vocab_size = value.shape[0]\r\n\r\n def _prune_heads(self, heads_to_prune):\r\n raise NotImplementedError # Not implemented yet in the library for TF 2.0 models\r\n\r\n def call(\r\n self,\r\n inputs,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n training=False,\r\n ):\r\n if isinstance(inputs, (tuple, list)):\r\n input_ids = inputs[0]\r\n attention_mask = inputs[1] if len(inputs) > 1 else attention_mask\r\n token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids\r\n position_ids = inputs[3] if len(inputs) > 3 else position_ids\r\n head_mask = inputs[4] if len(inputs) > 4 else head_mask\r\n inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds\r\n output_attentions = inputs[6] if len(inputs) > 6 else output_attentions\r\n output_hidden_states = inputs[7] if len(inputs) > 7 else output_hidden_states\r\n return_dict = inputs[8] if len(inputs) > 8 else return_dict\r\n assert len(inputs) <= 9, \"Too many inputs.\"\r\n elif isinstance(inputs, (dict, BatchEncoding)):\r\n input_ids = inputs.get(\"input_ids\")\r\n attention_mask = inputs.get(\"attention_mask\", attention_mask)\r\n token_type_ids = inputs.get(\"token_type_ids\", token_type_ids)\r\n position_ids = inputs.get(\"position_ids\", position_ids)\r\n head_mask = inputs.get(\"head_mask\", head_mask)\r\n inputs_embeds = inputs.get(\"inputs_embeds\", inputs_embeds)\r\n output_attentions = inputs.get(\"output_attentions\", output_attentions)\r\n output_hidden_states = inputs.get(\"output_hidden_states\", output_hidden_states)\r\n return_dict = inputs.get(\"return_dict\", return_dict)\r\n assert len(inputs) <= 9, \"Too many inputs.\"\r\n else:\r\n input_ids = inputs\r\n\r\n output_attentions = output_attentions if output_attentions is not None else self.output_attentions\r\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.output_hidden_states\r\n return_dict = return_dict if return_dict is not None else self.return_dict\r\n\r\n if input_ids is not None and inputs_embeds is not None:\r\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\r\n elif input_ids is not None:\r\n input_shape = shape_list(input_ids)\r\n elif inputs_embeds is not None:\r\n input_shape = shape_list(inputs_embeds)[:-1]\r\n else:\r\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\r\n\r\n if attention_mask is None:\r\n attention_mask = tf.fill(input_shape, 1)\r\n if token_type_ids is None:\r\n token_type_ids = tf.fill(input_shape, 0)\r\n\r\n # We create a 3D attention mask from a 2D tensor mask.\r\n # Sizes are [batch_size, 1, 1, to_seq_length]\r\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\r\n # this attention mask is more simple than the triangular masking of causal attention\r\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\r\n extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]\r\n\r\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\r\n # masked positions, this operation will create a tensor which is 0.0 for\r\n # positions we want to attend and -10000.0 for masked positions.\r\n # Since we are adding it to the raw scores before the softmax, this is\r\n # effectively the same as removing these entirely.\r\n\r\n extended_attention_mask = tf.cast(extended_attention_mask, tf.float32)\r\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\r\n\r\n # Prepare head mask if needed\r\n # 1.0 in head_mask indicate we keep the head\r\n # attention_probs has shape bsz x n_heads x N x N\r\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\r\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\r\n if head_mask is not None:\r\n raise NotImplementedError\r\n else:\r\n head_mask = [None] * self.num_hidden_layers\r\n # head_mask = tf.constant([0] * self.num_hidden_layers)\r\n\r\n embedding_output = self.embeddings(input_ids, position_ids, token_type_ids, inputs_embeds, training=training)\r\n encoder_outputs = self.encoder(\r\n embedding_output,\r\n extended_attention_mask,\r\n head_mask,\r\n output_attentions,\r\n output_hidden_states,\r\n return_dict,\r\n training=training,\r\n )\r\n\r\n sequence_output = encoder_outputs[0]\r\n pooled_output = self.pooler(sequence_output)\r\n\r\n if not return_dict:\r\n return (\r\n sequence_output,\r\n pooled_output,\r\n ) + encoder_outputs[1:]\r\n\r\n return TFBaseModelOutputWithPooling(\r\n last_hidden_state=sequence_output,\r\n pooler_output=pooled_output,\r\n hidden_states=encoder_outputs.hidden_states,\r\n attentions=encoder_outputs.attentions,\r\n )\r\n\r\n\r\n####################################################\r\n# TFXxxPreTrainedModel is a sub-class of tf.keras.Model\r\n# which take care of loading and saving pretrained weights\r\n# and various common utilities.\r\n# Here you just need to specify a few (self-explanatory)\r\n# pointers for your model.\r\n####################################################\r\nclass TFXxxPreTrainedModel(TFPreTrainedModel):\r\n \"\"\"An abstract class to handle weights initialization and\r\n a simple interface for downloading and loading pretrained models.\r\n \"\"\"\r\n\r\n config_class = XxxConfig\r\n base_model_prefix = \"transformer\"\r\n\r\n\r\nXXX_START_DOCSTRING = r\"\"\"\r\n\r\n The XXX model was proposed in\r\n `XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding\r\n <https://arxiv.org/abs/1810.04805>`__ by....\r\n\r\n This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the\r\n generic methods the library implements for all its model (such as downloading or saving, resizing the input\r\n embeddings, pruning heads etc.)\r\n\r\n This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass.\r\n Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general\r\n usage and behavior.\r\n\r\n .. note::\r\n\r\n TF 2.0 models accepts two formats as inputs:\r\n\r\n - having all inputs as keyword arguments (like PyTorch models), or\r\n - having all inputs as a list, tuple or dict in the first positional arguments.\r\n\r\n This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having\r\n all the tensors in the first argument of the model call function: :obj:`model(inputs)`.\r\n\r\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors\r\n in the first positional argument :\r\n\r\n - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)`\r\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\r\n :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`\r\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\r\n :obj:`model({\"input_ids\": input_ids, \"token_type_ids\": token_type_ids})`\r\n\r\n Parameters:\r\n config (:class:`~transformers.XxxConfig`): Model configuration class with all the parameters of the model.\r\n Initializing with a config file does not load the weights associated with the model, only the configuration.\r\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\r\n\"\"\"\r\n\r\nXXX_INPUTS_DOCSTRING = r\"\"\"\r\n Args:\r\n input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`):\r\n Indices of input sequence tokens in the vocabulary.\r\n\r\n Indices can be obtained using :class:`~transformers.BertTokenizer`.\r\n See :func:`transformers.PreTrainedTokenizer.__call__` and\r\n :func:`transformers.PreTrainedTokenizer.encode` for details.\r\n\r\n `What are input IDs? <../glossary.html#input-ids>`__\r\n attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):\r\n Mask to avoid performing attention on padding token indices.\r\n Mask values selected in ``[0, 1]``:\r\n\r\n - 1 for tokens that are **not masked**,\r\n - 0 for tokens that are **masked**.\r\n\r\n `What are attention masks? <../glossary.html#attention-mask>`__\r\n token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):\r\n Segment token indices to indicate first and second portions of the inputs.\r\n Indices are selected in ``[0, 1]``:\r\n\r\n - 0 corresponds to a `sentence A` token,\r\n - 1 corresponds to a `sentence B` token.\r\n\r\n `What are token type IDs? <../glossary.html#token-type-ids>`__\r\n position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):\r\n Indices of positions of each input sequence tokens in the position embeddings.\r\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\r\n\r\n `What are position IDs? <../glossary.html#position-ids>`__\r\n head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\r\n Mask to nullify selected heads of the self-attention modules.\r\n Mask values selected in ``[0, 1]``:\r\n\r\n - 1 indicates the head is **not masked**,\r\n - 0 indicates the head is **masked**.\r\n\r\n inputs_embeds (:obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`):\r\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\r\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\r\n vectors than the model's internal embedding lookup matrix.\r\n output_attentions (:obj:`bool`, `optional`):\r\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\r\n tensors for more detail.\r\n output_hidden_states (:obj:`bool`, `optional`):\r\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\r\n more detail.\r\n return_dict (:obj:`bool`, `optional`):\r\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\r\n training (:obj:`bool`, `optional`, defaults to :obj:`False`):\r\n Whether or not to use the model in training mode (some modules like dropout modules have different\r\n behaviors between training and evaluation).\r\n\"\"\"\r\n\r\n\r\n@add_start_docstrings(\r\n \"The bare XXX Model transformer outputting raw hidden-states without any specific head on top.\",\r\n XXX_START_DOCSTRING,\r\n)\r\nclass TFXxxModel(TFXxxPreTrainedModel):\r\n def __init__(self, config, *inputs, **kwargs):\r\n super().__init__(config, *inputs, **kwargs)\r\n self.transformer = TFXxxMainLayer(config, name=\"transformer\")\r\n\r\n @add_start_docstrings_to_model_forward(XXX_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\r\n @add_code_sample_docstrings(\r\n tokenizer_class=_TOKENIZER_FOR_DOC,\r\n checkpoint=\"xxx-base-cased\",\r\n output_type=TFBaseModelOutputWithPooling,\r\n config_class=_CONFIG_FOR_DOC,\r\n )\r\n def call(self, inputs, **kwargs):\r\n outputs = self.transformer(inputs, **kwargs)\r\n return outputs\r\n\r\n\r\nTFXxxMLMHead = tf.keras.layers.Layer\r\n\r\n\r\n@add_start_docstrings(\"\"\"Xxx Model with a `language modeling` head on top. \"\"\", XXX_START_DOCSTRING)\r\nclass TFXxxForMaskedLM(TFXxxPreTrainedModel, TFMaskedLanguageModelingLoss):\r\n def __init__(self, config, *inputs, **kwargs):\r\n super().__init__(config, *inputs, **kwargs)\r\n\r\n self.transformer = TFXxxMainLayer(config, name=\"transformer\")\r\n self.mlm = TFXxxMLMHead(config, self.transformer.embeddings, name=\"mlm\")\r\n\r\n @add_start_docstrings_to_model_forward(XXX_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\r\n @add_code_sample_docstrings(\r\n tokenizer_class=_TOKENIZER_FOR_DOC,\r\n checkpoint=\"xxx-base-cased\",\r\n output_type=TFMaskedLMOutput,\r\n config_class=_CONFIG_FOR_DOC,\r\n )\r\n def call(\r\n self,\r\n inputs=None,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n labels=None,\r\n training=False,\r\n ):\r\n r\"\"\"\r\n labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\r\n Labels for computing the masked language modeling loss.\r\n Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\r\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels\r\n in ``[0, ..., config.vocab_size]``\r\n \"\"\"\r\n return_dict = return_dict if return_dict is not None else self.transformer.return_dict\r\n if isinstance(inputs, (tuple, list)):\r\n labels = inputs[9] if len(inputs) > 9 else labels\r\n if len(inputs) > 9:\r\n inputs = inputs[:9]\r\n elif isinstance(inputs, (dict, BatchEncoding)):\r\n labels = inputs.pop(\"labels\", labels)\r\n\r\n outputs = self.transformer(\r\n inputs,\r\n attention_mask=attention_mask,\r\n token_type_ids=token_type_ids,\r\n position_ids=position_ids,\r\n head_mask=head_mask,\r\n inputs_embeds=inputs_embeds,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n training=training,\r\n )\r\n\r\n sequence_output = outputs[0]\r\n prediction_scores = self.mlm(sequence_output, training=training)\r\n\r\n loss = None if labels is None else self.compute_loss(labels, prediction_scores)\r\n\r\n if not return_dict:\r\n output = (prediction_scores,) + outputs[2:]\r\n return ((loss,) + output) if loss is not None else output\r\n\r\n return TFMaskedLMOutput(\r\n loss=loss,\r\n logits=prediction_scores,\r\n hidden_states=outputs.hidden_states,\r\n attentions=outputs.attentions,\r\n )\r\n\r\n\r\n@add_start_docstrings(\r\n \"\"\"XXX Model transformer with a sequence classification/regression head on top (a linear layer on top of\r\n the pooled output) e.g. for GLUE tasks. \"\"\",\r\n XXX_START_DOCSTRING,\r\n)\r\nclass TFXxxForSequenceClassification(TFXxxPreTrainedModel, TFSequenceClassificationLoss):\r\n def __init__(self, config, *inputs, **kwargs):\r\n super().__init__(config, *inputs, **kwargs)\r\n self.num_labels = config.num_labels\r\n\r\n self.transformer = TFXxxMainLayer(config, name=\"transformer\")\r\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\r\n self.classifier = tf.keras.layers.Dense(\r\n config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name=\"classifier\"\r\n )\r\n\r\n @add_start_docstrings_to_model_forward(XXX_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\r\n @add_code_sample_docstrings(\r\n tokenizer_class=_TOKENIZER_FOR_DOC,\r\n checkpoint=\"xxx-base-cased\",\r\n output_type=TFSequenceClassifierOutput,\r\n config_class=_CONFIG_FOR_DOC,\r\n )\r\n def call(\r\n self,\r\n inputs=None,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n labels=None,\r\n training=False,\r\n ):\r\n r\"\"\"\r\n labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):\r\n Labels for computing the sequence classification/regression loss.\r\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\r\n If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\r\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\r\n \"\"\"\r\n return_dict = return_dict if return_dict is not None else self.transformer.return_dict\r\n if isinstance(inputs, (tuple, list)):\r\n labels = inputs[9] if len(inputs) > 9 else labels\r\n if len(inputs) > 9:\r\n inputs = inputs[:9]\r\n elif isinstance(inputs, (dict, BatchEncoding)):\r\n labels = inputs.pop(\"labels\", labels)\r\n\r\n outputs = self.transformer(\r\n inputs,\r\n attention_mask=attention_mask,\r\n token_type_ids=token_type_ids,\r\n position_ids=position_ids,\r\n head_mask=head_mask,\r\n inputs_embeds=inputs_embeds,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n training=training,\r\n )\r\n\r\n pooled_output = outputs[1]\r\n\r\n pooled_output = self.dropout(pooled_output, training=training)\r\n logits = self.classifier(pooled_output)\r\n\r\n loss = None if labels is None else self.compute_loss(labels, logits)\r\n\r\n if not return_dict:\r\n output = (logits,) + outputs[2:]\r\n return ((loss,) + output) if loss is not None else output\r\n\r\n return TFSequenceClassifierOutput(\r\n loss=loss,\r\n logits=logits,\r\n hidden_states=outputs.hidden_states,\r\n attentions=outputs.attentions,\r\n )\r\n\r\n\r\n@add_start_docstrings(\r\n \"\"\"XXX Model with a multiple choice classification head on top (a linear layer on top of\r\n the pooled output and a softmax) e.g. for RocStories/SWAG tasks. \"\"\",\r\n XXX_START_DOCSTRING,\r\n)\r\nclass TFXxxForMultipleChoice(TFXxxPreTrainedModel, TFMultipleChoiceLoss):\r\n def __init__(self, config, *inputs, **kwargs):\r\n super().__init__(config, *inputs, **kwargs)\r\n\r\n self.transformer = TFXxxMainLayer(config, name=\"transformer\")\r\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\r\n self.classifier = tf.keras.layers.Dense(\r\n 1, kernel_initializer=get_initializer(config.initializer_range), name=\"classifier\"\r\n )\r\n\r\n @property\r\n def dummy_inputs(self):\r\n \"\"\"Dummy inputs to build the network.\r\n\r\n Returns:\r\n tf.Tensor with dummy inputs\r\n \"\"\"\r\n return {\"input_ids\": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)}\r\n\r\n @add_start_docstrings_to_model_forward(XXX_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\"))\r\n @add_code_sample_docstrings(\r\n tokenizer_class=_TOKENIZER_FOR_DOC,\r\n checkpoint=\"xxx-base-cased\",\r\n output_type=TFMultipleChoiceModelOutput,\r\n config_class=_CONFIG_FOR_DOC,\r\n )\r\n def call(\r\n self,\r\n inputs,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n labels=None,\r\n training=False,\r\n ):\r\n r\"\"\"\r\n labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):\r\n Labels for computing the multiple choice classification loss.\r\n Indices should be in ``[0, ..., num_choices]`` where :obj:`num_choices` is the size of the second dimension\r\n of the input tensors. (See :obj:`input_ids` above)\r\n heads.\r\n \"\"\"\r\n if isinstance(inputs, (tuple, list)):\r\n input_ids = inputs[0]\r\n attention_mask = inputs[1] if len(inputs) > 1 else attention_mask\r\n token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids\r\n position_ids = inputs[3] if len(inputs) > 3 else position_ids\r\n head_mask = inputs[4] if len(inputs) > 4 else head_mask\r\n inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds\r\n output_attentions = inputs[6] if len(inputs) > 6 else output_attentions\r\n output_hidden_states = inputs[7] if len(inputs) > 7 else output_hidden_states\r\n return_dict = inputs[8] if len(inputs) > 8 else return_dict\r\n labels = inputs[9] if len(inputs) > 9 else labels\r\n assert len(inputs) <= 10, \"Too many inputs.\"\r\n elif isinstance(inputs, (dict, BatchEncoding)):\r\n input_ids = inputs.get(\"input_ids\")\r\n attention_mask = inputs.get(\"attention_mask\", attention_mask)\r\n token_type_ids = inputs.get(\"token_type_ids\", token_type_ids)\r\n position_ids = inputs.get(\"position_ids\", position_ids)\r\n head_mask = inputs.get(\"head_mask\", head_mask)\r\n inputs_embeds = inputs.get(\"inputs_embeds\", inputs_embeds)\r\n output_attentions = inputs.get(\"output_attentions\", output_attentions)\r\n output_hidden_states = inputs.get(\"output_hidden_states\", output_hidden_states)\r\n return_dict = inputs.get(\"return_dict\", return_dict)\r\n labels = inputs.get(\"labels\", labels)\r\n assert len(inputs) <= 10, \"Too many inputs.\"\r\n else:\r\n input_ids = inputs\r\n return_dict = return_dict if return_dict is not None else self.transformer.return_dict\r\n\r\n if input_ids is not None:\r\n num_choices = shape_list(input_ids)[1]\r\n seq_length = shape_list(input_ids)[2]\r\n else:\r\n num_choices = shape_list(inputs_embeds)[1]\r\n seq_length = shape_list(inputs_embeds)[2]\r\n\r\n flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None\r\n flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None\r\n flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None\r\n flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None\r\n flat_inputs_embeds = (\r\n tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))\r\n if inputs_embeds is not None\r\n else None\r\n )\r\n\r\n flat_inputs = [\r\n flat_input_ids,\r\n flat_attention_mask,\r\n flat_token_type_ids,\r\n flat_position_ids,\r\n head_mask,\r\n flat_inputs_embeds,\r\n output_attentions,\r\n output_hidden_states,\r\n return_dict,\r\n ]\r\n\r\n outputs = self.transformer(flat_inputs, training=training)\r\n\r\n pooled_output = outputs[1]\r\n\r\n pooled_output = self.dropout(pooled_output, training=training)\r\n logits = self.classifier(pooled_output)\r\n reshaped_logits = tf.reshape(logits, (-1, num_choices))\r\n\r\n loss = None if labels is None else self.compute_loss(labels, reshaped_logits)\r\n\r\n if not return_dict:\r\n output = (reshaped_logits,) + outputs[2:]\r\n return ((loss,) + output) if loss is not None else output\r\n\r\n return TFMultipleChoiceModelOutput(\r\n loss=loss,\r\n logits=reshaped_logits,\r\n hidden_states=outputs.hidden_states,\r\n attentions=outputs.attentions,\r\n )\r\n\r\n\r\n@add_start_docstrings(\r\n \"\"\"XXX Model with a token classification head on top (a linear layer on top of\r\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. \"\"\",\r\n XXX_START_DOCSTRING,\r\n)\r\nclass TFXxxForTokenClassification(TFXxxPreTrainedModel, TFTokenClassificationLoss):\r\n def __init__(self, config, *inputs, **kwargs):\r\n super().__init__(config, *inputs, **kwargs)\r\n self.num_labels = config.num_labels\r\n\r\n self.transformer = TFXxxMainLayer(config, name=\"transformer\")\r\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\r\n self.classifier = tf.keras.layers.Dense(\r\n config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name=\"classifier\"\r\n )\r\n\r\n @add_start_docstrings_to_model_forward(XXX_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\r\n @add_code_sample_docstrings(\r\n tokenizer_class=_TOKENIZER_FOR_DOC,\r\n checkpoint=\"xxx-base-cased\",\r\n output_type=TFTokenClassifierOutput,\r\n config_class=_CONFIG_FOR_DOC,\r\n )\r\n def call(\r\n self,\r\n inputs=None,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n labels=None,\r\n training=False,\r\n ):\r\n r\"\"\"\r\n labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\r\n Labels for computing the token classification loss.\r\n Indices should be in ``[0, ..., config.num_labels - 1]``.\r\n \"\"\"\r\n return_dict = return_dict if return_dict is not None else self.transformer.return_dict\r\n if isinstance(inputs, (tuple, list)):\r\n labels = inputs[9] if len(inputs) > 9 else labels\r\n if len(inputs) > 9:\r\n inputs = inputs[:9]\r\n elif isinstance(inputs, (dict, BatchEncoding)):\r\n labels = inputs.pop(\"labels\", labels)\r\n\r\n outputs = self.transformer(\r\n inputs,\r\n attention_mask=attention_mask,\r\n token_type_ids=token_type_ids,\r\n position_ids=position_ids,\r\n head_mask=head_mask,\r\n inputs_embeds=inputs_embeds,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n training=training,\r\n )\r\n\r\n sequence_output = outputs[0]\r\n\r\n sequence_output = self.dropout(sequence_output, training=training)\r\n logits = self.classifier(sequence_output)\r\n\r\n loss = None if labels is None else self.compute_loss(labels, logits)\r\n\r\n if not return_dict:\r\n output = (logits,) + outputs[2:]\r\n return ((loss,) + output) if loss is not None else output\r\n\r\n return TFTokenClassifierOutput(\r\n loss=loss,\r\n logits=logits,\r\n hidden_states=outputs.hidden_states,\r\n attentions=outputs.attentions,\r\n )\r\n\r\n\r\n@add_start_docstrings(\r\n \"\"\"XXX Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\r\n layer on top of the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\r\n XXX_START_DOCSTRING,\r\n)\r\nclass TFXxxForQuestionAnswering(TFXxxPreTrainedModel, TFQuestionAnsweringLoss):\r\n def __init__(self, config, *inputs, **kwargs):\r\n super().__init__(config, *inputs, **kwargs)\r\n self.num_labels = config.num_labels\r\n\r\n self.transformer = TFXxxMainLayer(config, name=\"transformer\")\r\n self.qa_outputs = tf.keras.layers.Dense(\r\n config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name=\"qa_outputs\"\r\n )\r\n\r\n @add_start_docstrings_to_model_forward(XXX_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\r\n @add_code_sample_docstrings(\r\n tokenizer_class=_TOKENIZER_FOR_DOC,\r\n checkpoint=\"xxx-base-cased\",\r\n output_type=TFQuestionAnsweringModelOutput,\r\n config_class=_CONFIG_FOR_DOC,\r\n )\r\n def call(\r\n self,\r\n inputs=None,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n start_positions=None,\r\n end_positions=None,\r\n training=False,\r\n ):\r\n r\"\"\"\r\n start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):\r\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\r\n Positions are clamped to the length of the sequence (:obj:`sequence_length`).\r\n Position outside of the sequence are not taken into account for computing the loss.\r\n end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):\r\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\r\n Positions are clamped to the length of the sequence (:obj:`sequence_length`).\r\n Position outside of the sequence are not taken into account for computing the loss.\r\n \"\"\"\r\n return_dict = return_dict if return_dict is not None else self.transformer.return_dict\r\n if isinstance(inputs, (tuple, list)):\r\n start_positions = inputs[9] if len(inputs) > 9 else start_positions\r\n end_positions = inputs[10] if len(inputs) > 10 else end_positions\r\n if len(inputs) > 9:\r\n inputs = inputs[:9]\r\n elif isinstance(inputs, (dict, BatchEncoding)):\r\n start_positions = inputs.pop(\"start_positions\", start_positions)\r\n end_positions = inputs.pop(\"end_positions\", start_positions)\r\n\r\n outputs = self.transformer(\r\n inputs,\r\n attention_mask=attention_mask,\r\n token_type_ids=token_type_ids,\r\n position_ids=position_ids,\r\n head_mask=head_mask,\r\n inputs_embeds=inputs_embeds,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n training=training,\r\n )\r\n\r\n sequence_output = outputs[0]\r\n\r\n logits = self.qa_outputs(sequence_output)\r\n start_logits, end_logits = tf.split(logits, 2, axis=-1)\r\n start_logits = tf.squeeze(start_logits, axis=-1)\r\n end_logits = tf.squeeze(end_logits, axis=-1)\r\n\r\n loss = None\r\n if start_positions is not None and end_positions is not None:\r\n labels = {\"start_position\": start_positions}\r\n labels[\"end_position\"] = end_positions\r\n loss = self.compute_loss(labels, (start_logits, end_logits))\r\n\r\n if not return_dict:\r\n output = (start_logits, end_logits) + outputs[2:]\r\n return ((loss,) + output) if loss is not None else output\r\n\r\n return TFQuestionAnsweringModelOutput(\r\n loss=loss,\r\n start_logits=start_logits,\r\n end_logits=end_logits,\r\n hidden_states=outputs.hidden_states,\r\n attentions=outputs.attentions,\r\n )\r\n", "import getopt\r\nimport json\r\nimport os\r\n\r\n# import numpy as np\r\nimport sys\r\nfrom collections import OrderedDict\r\n\r\nimport datasets\r\nimport numpy as np\r\nimport torch\r\n\r\nfrom modeling_frcnn import GeneralizedRCNN\r\nfrom processing_image import Preprocess\r\nfrom utils import Config\r\n\r\n\r\n\"\"\"\r\nUSAGE:\r\n``python extracting_data.py -i <img_dir> -o <dataset_file>.datasets <batch_size>``\r\n\"\"\"\r\n\r\n\r\nTEST = False\r\nCONFIG = Config.from_pretrained(\"unc-nlp/frcnn-vg-finetuned\")\r\nDEFAULT_SCHEMA = datasets.Features(\r\n OrderedDict(\r\n {\r\n \"attr_ids\": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value(\"float32\")),\r\n \"attr_probs\": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value(\"float32\")),\r\n \"boxes\": datasets.Array2D((CONFIG.MAX_DETECTIONS, 4), dtype=\"float32\"),\r\n \"img_id\": datasets.Value(\"int32\"),\r\n \"obj_ids\": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value(\"float32\")),\r\n \"obj_probs\": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value(\"float32\")),\r\n \"roi_features\": datasets.Array2D((CONFIG.MAX_DETECTIONS, 2048), dtype=\"float32\"),\r\n \"sizes\": datasets.Sequence(length=2, feature=datasets.Value(\"float32\")),\r\n \"preds_per_image\": datasets.Value(dtype=\"int32\"),\r\n }\r\n )\r\n)\r\n\r\n\r\nclass Extract:\r\n def __init__(self, argv=sys.argv[1:]):\r\n inputdir = None\r\n outputfile = None\r\n subset_list = None\r\n batch_size = 1\r\n opts, args = getopt.getopt(argv, \"i:o:b:s\", [\"inputdir=\", \"outfile=\", \"batch_size=\", \"subset_list=\"])\r\n for opt, arg in opts:\r\n if opt in (\"-i\", \"--inputdir\"):\r\n inputdir = arg\r\n elif opt in (\"-o\", \"--outfile\"):\r\n outputfile = arg\r\n elif opt in (\"-b\", \"--batch_size\"):\r\n batch_size = int(arg)\r\n elif opt in (\"-s\", \"--subset_list\"):\r\n subset_list = arg\r\n\r\n assert inputdir is not None # and os.path.isdir(inputdir), f\"{inputdir}\"\r\n assert outputfile is not None and not os.path.isfile(outputfile), f\"{outputfile}\"\r\n if subset_list is not None:\r\n with open(os.path.realpath(subset_list)) as f:\r\n self.subset_list = set(map(lambda x: self._vqa_file_split()[0], tryload(f)))\r\n else:\r\n self.subset_list = None\r\n\r\n self.config = CONFIG\r\n if torch.cuda.is_available():\r\n self.config.model.device = \"cuda\"\r\n self.inputdir = os.path.realpath(inputdir)\r\n self.outputfile = os.path.realpath(outputfile)\r\n self.preprocess = Preprocess(self.config)\r\n self.model = GeneralizedRCNN.from_pretrained(\"unc-nlp/frcnn-vg-finetuned\", config=self.config)\r\n self.batch = batch_size if batch_size != 0 else 1\r\n self.schema = DEFAULT_SCHEMA\r\n\r\n def _vqa_file_split(self, file):\r\n img_id = int(file.split(\".\")[0].split(\"_\")[-1])\r\n filepath = os.path.join(self.inputdir, file)\r\n return (img_id, filepath)\r\n\r\n @property\r\n def file_generator(self):\r\n batch = []\r\n for i, file in enumerate(os.listdir(self.inputdir)):\r\n if self.subset_list is not None and i not in self.subset_list:\r\n continue\r\n batch.append(self._vqa_file_split(file))\r\n if len(batch) == self.batch:\r\n temp = batch\r\n batch = []\r\n yield list(map(list, zip(*temp)))\r\n\r\n for i in range(1):\r\n yield list(map(list, zip(*batch)))\r\n\r\n def __call__(self):\r\n # make writer\r\n if not TEST:\r\n writer = datasets.ArrowWriter(features=self.schema, path=self.outputfile)\r\n # do file generator\r\n for i, (img_ids, filepaths) in enumerate(self.file_generator):\r\n images, sizes, scales_yx = self.preprocess(filepaths)\r\n output_dict = self.model(\r\n images,\r\n sizes,\r\n scales_yx=scales_yx,\r\n padding=\"max_detections\",\r\n max_detections=self.config.MAX_DETECTIONS,\r\n pad_value=0,\r\n return_tensors=\"np\",\r\n location=\"cpu\",\r\n )\r\n output_dict[\"boxes\"] = output_dict.pop(\"normalized_boxes\")\r\n if not TEST:\r\n output_dict[\"img_id\"] = np.array(img_ids)\r\n batch = self.schema.encode_batch(output_dict)\r\n writer.write_batch(batch)\r\n if TEST:\r\n break\r\n # finalizer the writer\r\n if not TEST:\r\n num_examples, num_bytes = writer.finalize()\r\n print(f\"Success! You wrote {num_examples} entry(s) and {num_bytes >> 20} mb\")\r\n\r\n\r\ndef tryload(stream):\r\n try:\r\n data = json.load(stream)\r\n try:\r\n data = list(data.keys())\r\n except Exception:\r\n data = [d[\"img_id\"] for d in data]\r\n except Exception:\r\n try:\r\n data = eval(stream.read())\r\n except Exception:\r\n data = stream.read().split(\"\\n\")\r\n return data\r\n\r\n\r\nif __name__ == \"__main__\":\r\n extract = Extract(sys.argv[1:])\r\n extract()\r\n if not TEST:\r\n dataset = datasets.Dataset.from_file(extract.outputfile)\r\n # wala!\r\n # print(np.array(dataset[0:2][\"roi_features\"]).shape)\r\n", "# coding=utf-8\r\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\r\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"PyTorch RoBERTa model. \"\"\"\r\n\r\nimport math\r\nimport warnings\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.nn import CrossEntropyLoss, MSELoss\r\n\r\nfrom .activations import ACT2FN, gelu\r\nfrom .configuration_roberta import RobertaConfig\r\nfrom .file_utils import (\r\n add_code_sample_docstrings,\r\n add_start_docstrings,\r\n add_start_docstrings_to_model_forward,\r\n replace_return_docstrings,\r\n)\r\nfrom .modeling_outputs import (\r\n BaseModelOutput,\r\n BaseModelOutputWithPooling,\r\n CausalLMOutput,\r\n MaskedLMOutput,\r\n MultipleChoiceModelOutput,\r\n QuestionAnsweringModelOutput,\r\n SequenceClassifierOutput,\r\n TokenClassifierOutput,\r\n)\r\nfrom .modeling_utils import (\r\n PreTrainedModel,\r\n apply_chunking_to_forward,\r\n find_pruneable_heads_and_indices,\r\n prune_linear_layer,\r\n)\r\nfrom .utils import logging\r\n\r\n\r\nlogger = logging.get_logger(__name__)\r\n\r\n_CONFIG_FOR_DOC = \"RobertaConfig\"\r\n_TOKENIZER_FOR_DOC = \"RobertaTokenizer\"\r\n\r\nROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [\r\n \"roberta-base\",\r\n \"roberta-large\",\r\n \"roberta-large-mnli\",\r\n \"distilroberta-base\",\r\n \"roberta-base-openai-detector\",\r\n \"roberta-large-openai-detector\",\r\n # See all RoBERTa models at https://huggingface.co/models?filter=roberta\r\n]\r\n\r\n\r\nclass RobertaEmbeddings(nn.Module):\r\n \"\"\"\r\n Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.\r\n \"\"\"\r\n\r\n # Copied from transformers.modeling_bert.BertEmbeddings.__init__\r\n def __init__(self, config):\r\n super().__init__()\r\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\r\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\r\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\r\n\r\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\r\n # any TensorFlow checkpoint file\r\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\r\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\r\n\r\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\r\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\r\n\r\n # End copy\r\n self.padding_idx = config.pad_token_id\r\n self.position_embeddings = nn.Embedding(\r\n config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx\r\n )\r\n\r\n def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):\r\n if position_ids is None:\r\n if input_ids is not None:\r\n # Create the position ids from the input token ids. Any padded tokens remain padded.\r\n position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device)\r\n else:\r\n position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)\r\n\r\n # Copied from transformers.modeling_bert.BertEmbeddings.forward\r\n if input_ids is not None:\r\n input_shape = input_ids.size()\r\n else:\r\n input_shape = inputs_embeds.size()[:-1]\r\n\r\n seq_length = input_shape[1]\r\n\r\n if position_ids is None:\r\n position_ids = self.position_ids[:, :seq_length]\r\n\r\n if token_type_ids is None:\r\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\r\n\r\n if inputs_embeds is None:\r\n inputs_embeds = self.word_embeddings(input_ids)\r\n position_embeddings = self.position_embeddings(position_ids)\r\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\r\n\r\n embeddings = inputs_embeds + position_embeddings + token_type_embeddings\r\n embeddings = self.LayerNorm(embeddings)\r\n embeddings = self.dropout(embeddings)\r\n return embeddings\r\n\r\n def create_position_ids_from_inputs_embeds(self, inputs_embeds):\r\n \"\"\"\r\n We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.\r\n\r\n Args:\r\n inputs_embeds: torch.Tensor\r\n\r\n Returns: torch.Tensor\r\n \"\"\"\r\n input_shape = inputs_embeds.size()[:-1]\r\n sequence_length = input_shape[1]\r\n\r\n position_ids = torch.arange(\r\n self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device\r\n )\r\n return position_ids.unsqueeze(0).expand(input_shape)\r\n\r\n\r\n# Copied from transformers.modeling_bert.BertSelfAttention with Bert->Roberta\r\nclass RobertaSelfAttention(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\r\n raise ValueError(\r\n \"The hidden size (%d) is not a multiple of the number of attention \"\r\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads)\r\n )\r\n\r\n self.num_attention_heads = config.num_attention_heads\r\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\r\n self.all_head_size = self.num_attention_heads * self.attention_head_size\r\n\r\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\r\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\r\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\r\n\r\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\r\n\r\n def transpose_for_scores(self, x):\r\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\r\n x = x.view(*new_x_shape)\r\n return x.permute(0, 2, 1, 3)\r\n\r\n def forward(\r\n self,\r\n hidden_states,\r\n attention_mask=None,\r\n head_mask=None,\r\n encoder_hidden_states=None,\r\n encoder_attention_mask=None,\r\n output_attentions=False,\r\n ):\r\n mixed_query_layer = self.query(hidden_states)\r\n\r\n # If this is instantiated as a cross-attention module, the keys\r\n # and values come from an encoder; the attention mask needs to be\r\n # such that the encoder's padding tokens are not attended to.\r\n if encoder_hidden_states is not None:\r\n mixed_key_layer = self.key(encoder_hidden_states)\r\n mixed_value_layer = self.value(encoder_hidden_states)\r\n attention_mask = encoder_attention_mask\r\n else:\r\n mixed_key_layer = self.key(hidden_states)\r\n mixed_value_layer = self.value(hidden_states)\r\n\r\n query_layer = self.transpose_for_scores(mixed_query_layer)\r\n key_layer = self.transpose_for_scores(mixed_key_layer)\r\n value_layer = self.transpose_for_scores(mixed_value_layer)\r\n\r\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\r\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\r\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\r\n if attention_mask is not None:\r\n # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)\r\n attention_scores = attention_scores + attention_mask\r\n\r\n # Normalize the attention scores to probabilities.\r\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\r\n\r\n # This is actually dropping out entire tokens to attend to, which might\r\n # seem a bit unusual, but is taken from the original Transformer paper.\r\n attention_probs = self.dropout(attention_probs)\r\n\r\n # Mask heads if we want to\r\n if head_mask is not None:\r\n attention_probs = attention_probs * head_mask\r\n\r\n context_layer = torch.matmul(attention_probs, value_layer)\r\n\r\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\r\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\r\n context_layer = context_layer.view(*new_context_layer_shape)\r\n\r\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\r\n return outputs\r\n\r\n\r\n# Copied from transformers.modeling_bert.BertSelfOutput\r\nclass RobertaSelfOutput(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\r\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\r\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\r\n\r\n def forward(self, hidden_states, input_tensor):\r\n hidden_states = self.dense(hidden_states)\r\n hidden_states = self.dropout(hidden_states)\r\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\r\n return hidden_states\r\n\r\n\r\n# Copied from transformers.modeling_bert.BertAttention with Bert->Roberta\r\nclass RobertaAttention(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.self = RobertaSelfAttention(config)\r\n self.output = RobertaSelfOutput(config)\r\n self.pruned_heads = set()\r\n\r\n def prune_heads(self, heads):\r\n if len(heads) == 0:\r\n return\r\n heads, index = find_pruneable_heads_and_indices(\r\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\r\n )\r\n\r\n # Prune linear layers\r\n self.self.query = prune_linear_layer(self.self.query, index)\r\n self.self.key = prune_linear_layer(self.self.key, index)\r\n self.self.value = prune_linear_layer(self.self.value, index)\r\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\r\n\r\n # Update hyper params and store pruned heads\r\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\r\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\r\n self.pruned_heads = self.pruned_heads.union(heads)\r\n\r\n def forward(\r\n self,\r\n hidden_states,\r\n attention_mask=None,\r\n head_mask=None,\r\n encoder_hidden_states=None,\r\n encoder_attention_mask=None,\r\n output_attentions=False,\r\n ):\r\n self_outputs = self.self(\r\n hidden_states,\r\n attention_mask,\r\n head_mask,\r\n encoder_hidden_states,\r\n encoder_attention_mask,\r\n output_attentions,\r\n )\r\n attention_output = self.output(self_outputs[0], hidden_states)\r\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\r\n return outputs\r\n\r\n\r\n# Copied from transformers.modeling_bert.BertIntermediate\r\nclass RobertaIntermediate(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\r\n if isinstance(config.hidden_act, str):\r\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\r\n else:\r\n self.intermediate_act_fn = config.hidden_act\r\n\r\n def forward(self, hidden_states):\r\n hidden_states = self.dense(hidden_states)\r\n hidden_states = self.intermediate_act_fn(hidden_states)\r\n return hidden_states\r\n\r\n\r\n# Copied from transformers.modeling_bert.BertOutput\r\nclass RobertaOutput(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\r\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\r\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\r\n\r\n def forward(self, hidden_states, input_tensor):\r\n hidden_states = self.dense(hidden_states)\r\n hidden_states = self.dropout(hidden_states)\r\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\r\n return hidden_states\r\n\r\n\r\n# Copied from transformers.modeling_bert.BertLayer with Bert->Roberta\r\nclass RobertaLayer(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\r\n self.seq_len_dim = 1\r\n self.attention = RobertaAttention(config)\r\n self.is_decoder = config.is_decoder\r\n self.add_cross_attention = config.add_cross_attention\r\n if self.add_cross_attention:\r\n assert self.is_decoder, f\"{self} should be used as a decoder model if cross attention is added\"\r\n self.crossattention = RobertaAttention(config)\r\n self.intermediate = RobertaIntermediate(config)\r\n self.output = RobertaOutput(config)\r\n\r\n def forward(\r\n self,\r\n hidden_states,\r\n attention_mask=None,\r\n head_mask=None,\r\n encoder_hidden_states=None,\r\n encoder_attention_mask=None,\r\n output_attentions=False,\r\n ):\r\n self_attention_outputs = self.attention(\r\n hidden_states,\r\n attention_mask,\r\n head_mask,\r\n output_attentions=output_attentions,\r\n )\r\n attention_output = self_attention_outputs[0]\r\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\r\n\r\n if self.is_decoder and encoder_hidden_states is not None:\r\n assert hasattr(\r\n self, \"crossattention\"\r\n ), f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`\"\r\n cross_attention_outputs = self.crossattention(\r\n attention_output,\r\n attention_mask,\r\n head_mask,\r\n encoder_hidden_states,\r\n encoder_attention_mask,\r\n output_attentions,\r\n )\r\n attention_output = cross_attention_outputs[0]\r\n outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights\r\n\r\n layer_output = apply_chunking_to_forward(\r\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\r\n )\r\n outputs = (layer_output,) + outputs\r\n return outputs\r\n\r\n def feed_forward_chunk(self, attention_output):\r\n intermediate_output = self.intermediate(attention_output)\r\n layer_output = self.output(intermediate_output, attention_output)\r\n return layer_output\r\n\r\n\r\n# Copied from transformers.modeling_bert.BertEncoder with Bert->Roberta\r\nclass RobertaEncoder(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.config = config\r\n self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])\r\n\r\n def forward(\r\n self,\r\n hidden_states,\r\n attention_mask=None,\r\n head_mask=None,\r\n encoder_hidden_states=None,\r\n encoder_attention_mask=None,\r\n output_attentions=False,\r\n output_hidden_states=False,\r\n return_dict=False,\r\n ):\r\n all_hidden_states = () if output_hidden_states else None\r\n all_attentions = () if output_attentions else None\r\n for i, layer_module in enumerate(self.layer):\r\n if output_hidden_states:\r\n all_hidden_states = all_hidden_states + (hidden_states,)\r\n\r\n layer_head_mask = head_mask[i] if head_mask is not None else None\r\n\r\n if getattr(self.config, \"gradient_checkpointing\", False):\r\n\r\n def create_custom_forward(module):\r\n def custom_forward(*inputs):\r\n return module(*inputs, output_attentions)\r\n\r\n return custom_forward\r\n\r\n layer_outputs = torch.utils.checkpoint.checkpoint(\r\n create_custom_forward(layer_module),\r\n hidden_states,\r\n attention_mask,\r\n layer_head_mask,\r\n encoder_hidden_states,\r\n encoder_attention_mask,\r\n )\r\n else:\r\n layer_outputs = layer_module(\r\n hidden_states,\r\n attention_mask,\r\n layer_head_mask,\r\n encoder_hidden_states,\r\n encoder_attention_mask,\r\n output_attentions,\r\n )\r\n hidden_states = layer_outputs[0]\r\n if output_attentions:\r\n all_attentions = all_attentions + (layer_outputs[1],)\r\n\r\n if output_hidden_states:\r\n all_hidden_states = all_hidden_states + (hidden_states,)\r\n\r\n if not return_dict:\r\n return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)\r\n return BaseModelOutput(\r\n last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions\r\n )\r\n\r\n\r\n# Copied from transformers.modeling_bert.BertPooler\r\nclass RobertaPooler(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\r\n self.activation = nn.Tanh()\r\n\r\n def forward(self, hidden_states):\r\n # We \"pool\" the model by simply taking the hidden state corresponding\r\n # to the first token.\r\n first_token_tensor = hidden_states[:, 0]\r\n pooled_output = self.dense(first_token_tensor)\r\n pooled_output = self.activation(pooled_output)\r\n return pooled_output\r\n\r\n\r\nclass RobertaPreTrainedModel(PreTrainedModel):\r\n \"\"\"\r\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\r\n models.\r\n \"\"\"\r\n\r\n config_class = RobertaConfig\r\n base_model_prefix = \"roberta\"\r\n\r\n # Copied from transformers.modeling_bert.BertPreTrainedModel._init_weights\r\n def _init_weights(self, module):\r\n \"\"\" Initialize the weights \"\"\"\r\n if isinstance(module, (nn.Linear, nn.Embedding)):\r\n # Slightly different from the TF version which uses truncated_normal for initialization\r\n # cf https://github.com/pytorch/pytorch/pull/5617\r\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\r\n elif isinstance(module, nn.LayerNorm):\r\n module.bias.data.zero_()\r\n module.weight.data.fill_(1.0)\r\n if isinstance(module, nn.Linear) and module.bias is not None:\r\n module.bias.data.zero_()\r\n\r\n\r\nROBERTA_START_DOCSTRING = r\"\"\"\r\n\r\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\r\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\r\n pruning heads etc.)\r\n\r\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\r\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\r\n general usage and behavior.\r\n\r\n Parameters:\r\n config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the\r\n model. Initializing with a config file does not load the weights associated with the model, only the\r\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\r\n weights.\r\n\"\"\"\r\n\r\nROBERTA_INPUTS_DOCSTRING = r\"\"\"\r\n Args:\r\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\r\n Indices of input sequence tokens in the vocabulary.\r\n\r\n Indices can be obtained using :class:`~transformers.RobertaTokenizer`. See\r\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\r\n details.\r\n\r\n `What are input IDs? <../glossary.html#input-ids>`__\r\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\r\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\r\n\r\n - 1 for tokens that are **not masked**,\r\n - 0 for tokens that are **masked**.\r\n\r\n `What are attention masks? <../glossary.html#attention-mask>`__\r\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\r\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\r\n 1]``:\r\n\r\n - 0 corresponds to a `sentence A` token,\r\n - 1 corresponds to a `sentence B` token.\r\n\r\n `What are token type IDs? <../glossary.html#token-type-ids>`_\r\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\r\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\r\n config.max_position_embeddings - 1]``.\r\n\r\n `What are position IDs? <../glossary.html#position-ids>`_\r\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\r\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\r\n\r\n - 1 indicates the head is **not masked**,\r\n - 0 indicates the head is **masked**.\r\n\r\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\r\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\r\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\r\n vectors than the model's internal embedding lookup matrix.\r\n output_attentions (:obj:`bool`, `optional`):\r\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\r\n tensors for more detail.\r\n output_hidden_states (:obj:`bool`, `optional`):\r\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\r\n more detail.\r\n return_dict (:obj:`bool`, `optional`):\r\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\r\n\"\"\"\r\n\r\n\r\n@add_start_docstrings(\r\n \"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.\",\r\n ROBERTA_START_DOCSTRING,\r\n)\r\nclass RobertaModel(RobertaPreTrainedModel):\r\n \"\"\"\r\n\r\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\r\n cross-attention is added between the self-attention layers, following the architecture described in `Attention is\r\n all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz\r\n Kaiser and Illia Polosukhin.\r\n\r\n To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration\r\n set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`\r\n argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an\r\n input to the forward pass.\r\n\r\n .. _`Attention is all you need`: https://arxiv.org/abs/1706.03762\r\n\r\n \"\"\"\r\n\r\n authorized_missing_keys = [r\"position_ids\"]\r\n\r\n # Copied from transformers.modeling_bert.BertModel.__init__ with Bert->Roberta\r\n def __init__(self, config, add_pooling_layer=True):\r\n super().__init__(config)\r\n self.config = config\r\n\r\n self.embeddings = RobertaEmbeddings(config)\r\n self.encoder = RobertaEncoder(config)\r\n\r\n self.pooler = RobertaPooler(config) if add_pooling_layer else None\r\n\r\n self.init_weights()\r\n\r\n def get_input_embeddings(self):\r\n return self.embeddings.word_embeddings\r\n\r\n def set_input_embeddings(self, value):\r\n self.embeddings.word_embeddings = value\r\n\r\n def _prune_heads(self, heads_to_prune):\r\n \"\"\"\r\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\r\n class PreTrainedModel\r\n \"\"\"\r\n for layer, heads in heads_to_prune.items():\r\n self.encoder.layer[layer].attention.prune_heads(heads)\r\n\r\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\r\n @add_code_sample_docstrings(\r\n tokenizer_class=_TOKENIZER_FOR_DOC,\r\n checkpoint=\"roberta-base\",\r\n output_type=BaseModelOutputWithPooling,\r\n config_class=_CONFIG_FOR_DOC,\r\n )\r\n # Copied from transformers.modeling_bert.BertModel.forward\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n encoder_hidden_states=None,\r\n encoder_attention_mask=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n ):\r\n r\"\"\"\r\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\r\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\r\n the model is configured as a decoder.\r\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\r\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\r\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: ``1`` for\r\n tokens that are NOT MASKED, ``0`` for MASKED tokens.\r\n \"\"\"\r\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\r\n output_hidden_states = (\r\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\r\n )\r\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\r\n\r\n if input_ids is not None and inputs_embeds is not None:\r\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\r\n elif input_ids is not None:\r\n input_shape = input_ids.size()\r\n elif inputs_embeds is not None:\r\n input_shape = inputs_embeds.size()[:-1]\r\n else:\r\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\r\n\r\n device = input_ids.device if input_ids is not None else inputs_embeds.device\r\n\r\n if attention_mask is None:\r\n attention_mask = torch.ones(input_shape, device=device)\r\n if token_type_ids is None:\r\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\r\n\r\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\r\n # ourselves in which case we just need to make it broadcastable to all heads.\r\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)\r\n\r\n # If a 2D or 3D attention mask is provided for the cross-attention\r\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\r\n if self.config.is_decoder and encoder_hidden_states is not None:\r\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\r\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\r\n if encoder_attention_mask is None:\r\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\r\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\r\n else:\r\n encoder_extended_attention_mask = None\r\n\r\n # Prepare head mask if needed\r\n # 1.0 in head_mask indicate we keep the head\r\n # attention_probs has shape bsz x n_heads x N x N\r\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\r\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\r\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\r\n\r\n embedding_output = self.embeddings(\r\n input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds\r\n )\r\n encoder_outputs = self.encoder(\r\n embedding_output,\r\n attention_mask=extended_attention_mask,\r\n head_mask=head_mask,\r\n encoder_hidden_states=encoder_hidden_states,\r\n encoder_attention_mask=encoder_extended_attention_mask,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n )\r\n sequence_output = encoder_outputs[0]\r\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\r\n\r\n if not return_dict:\r\n return (sequence_output, pooled_output) + encoder_outputs[1:]\r\n\r\n return BaseModelOutputWithPooling(\r\n last_hidden_state=sequence_output,\r\n pooler_output=pooled_output,\r\n hidden_states=encoder_outputs.hidden_states,\r\n attentions=encoder_outputs.attentions,\r\n )\r\n\r\n\r\n@add_start_docstrings(\r\n \"\"\"RoBERTa Model with a `language modeling` head on top for CLM fine-tuning. \"\"\", ROBERTA_START_DOCSTRING\r\n)\r\nclass RobertaForCausalLM(RobertaPreTrainedModel):\r\n authorized_missing_keys = [r\"position_ids\", r\"predictions.decoder.bias\"]\r\n authorized_unexpected_keys = [r\"pooler\"]\r\n\r\n def __init__(self, config):\r\n super().__init__(config)\r\n\r\n if not config.is_decoder:\r\n logger.warning(\"If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`\")\r\n\r\n self.roberta = RobertaModel(config, add_pooling_layer=False)\r\n self.lm_head = RobertaLMHead(config)\r\n\r\n self.init_weights()\r\n\r\n def get_output_embeddings(self):\r\n return self.lm_head.decoder\r\n\r\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\r\n @replace_return_docstrings(output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC)\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n encoder_hidden_states=None,\r\n encoder_attention_mask=None,\r\n labels=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n ):\r\n r\"\"\"\r\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\r\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\r\n the model is configured as a decoder.\r\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\r\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\r\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\r\n\r\n - 1 for tokens that are **not masked**,\r\n - 0 for tokens that are **masked**.\r\n\r\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\r\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\r\n ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are\r\n ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\r\n\r\n Returns:\r\n\r\n Example::\r\n\r\n >>> from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig\r\n >>> import torch\r\n\r\n >>> tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\r\n >>> config = RobertaConfig.from_pretrained(\"roberta-base\", return_dict=True)\r\n >>> config.is_decoder = True\r\n >>> model = RobertaForCausalLM.from_pretrained('roberta-base', config=config)\r\n\r\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\r\n >>> outputs = model(**inputs)\r\n\r\n >>> prediction_logits = outputs.logits\r\n \"\"\"\r\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\r\n\r\n outputs = self.roberta(\r\n input_ids,\r\n attention_mask=attention_mask,\r\n token_type_ids=token_type_ids,\r\n position_ids=position_ids,\r\n head_mask=head_mask,\r\n inputs_embeds=inputs_embeds,\r\n encoder_hidden_states=encoder_hidden_states,\r\n encoder_attention_mask=encoder_attention_mask,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n )\r\n\r\n sequence_output = outputs[0]\r\n prediction_scores = self.lm_head(sequence_output)\r\n\r\n lm_loss = None\r\n if labels is not None:\r\n # we are doing next-token prediction; shift prediction scores and input ids by one\r\n shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()\r\n labels = labels[:, 1:].contiguous()\r\n loss_fct = CrossEntropyLoss()\r\n lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\r\n\r\n if not return_dict:\r\n output = (prediction_scores,) + outputs[2:]\r\n return ((lm_loss,) + output) if lm_loss is not None else output\r\n\r\n return CausalLMOutput(\r\n loss=lm_loss,\r\n logits=prediction_scores,\r\n hidden_states=outputs.hidden_states,\r\n attentions=outputs.attentions,\r\n )\r\n\r\n def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):\r\n input_shape = input_ids.shape\r\n\r\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\r\n if attention_mask is None:\r\n attention_mask = input_ids.new_ones(input_shape)\r\n\r\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask}\r\n\r\n\r\n@add_start_docstrings(\"\"\"RoBERTa Model with a `language modeling` head on top. \"\"\", ROBERTA_START_DOCSTRING)\r\nclass RobertaForMaskedLM(RobertaPreTrainedModel):\r\n authorized_missing_keys = [r\"position_ids\", r\"predictions.decoder.bias\"]\r\n authorized_unexpected_keys = [r\"pooler\"]\r\n\r\n def __init__(self, config):\r\n super().__init__(config)\r\n\r\n if config.is_decoder:\r\n logger.warning(\r\n \"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for \"\r\n \"bi-directional self-attention.\"\r\n )\r\n\r\n self.roberta = RobertaModel(config, add_pooling_layer=False)\r\n self.lm_head = RobertaLMHead(config)\r\n\r\n self.init_weights()\r\n\r\n def get_output_embeddings(self):\r\n return self.lm_head.decoder\r\n\r\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\r\n @add_code_sample_docstrings(\r\n tokenizer_class=_TOKENIZER_FOR_DOC,\r\n checkpoint=\"roberta-base\",\r\n output_type=MaskedLMOutput,\r\n config_class=_CONFIG_FOR_DOC,\r\n mask=\"<mask>\",\r\n )\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n encoder_hidden_states=None,\r\n encoder_attention_mask=None,\r\n labels=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n **kwargs\r\n ):\r\n r\"\"\"\r\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\r\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\r\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\r\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\r\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\r\n Used to hide legacy arguments that have been deprecated.\r\n \"\"\"\r\n if \"masked_lm_labels\" in kwargs:\r\n warnings.warn(\r\n \"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\r\n FutureWarning,\r\n )\r\n labels = kwargs.pop(\"masked_lm_labels\")\r\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\r\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\r\n\r\n outputs = self.roberta(\r\n input_ids,\r\n attention_mask=attention_mask,\r\n token_type_ids=token_type_ids,\r\n position_ids=position_ids,\r\n head_mask=head_mask,\r\n inputs_embeds=inputs_embeds,\r\n encoder_hidden_states=encoder_hidden_states,\r\n encoder_attention_mask=encoder_attention_mask,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n )\r\n sequence_output = outputs[0]\r\n prediction_scores = self.lm_head(sequence_output)\r\n\r\n masked_lm_loss = None\r\n if labels is not None:\r\n loss_fct = CrossEntropyLoss()\r\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\r\n\r\n if not return_dict:\r\n output = (prediction_scores,) + outputs[2:]\r\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\r\n\r\n return MaskedLMOutput(\r\n loss=masked_lm_loss,\r\n logits=prediction_scores,\r\n hidden_states=outputs.hidden_states,\r\n attentions=outputs.attentions,\r\n )\r\n\r\n\r\nclass RobertaLMHead(nn.Module):\r\n \"\"\"Roberta Head for masked language modeling.\"\"\"\r\n\r\n def __init__(self, config):\r\n super().__init__()\r\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\r\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\r\n\r\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\r\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\r\n\r\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\r\n self.decoder.bias = self.bias\r\n\r\n def forward(self, features, **kwargs):\r\n x = self.dense(features)\r\n x = gelu(x)\r\n x = self.layer_norm(x)\r\n\r\n # project back to size of vocabulary with bias\r\n x = self.decoder(x)\r\n\r\n return x\r\n\r\n\r\n@add_start_docstrings(\r\n \"\"\"\r\n RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\r\n pooled output) e.g. for GLUE tasks.\r\n \"\"\",\r\n ROBERTA_START_DOCSTRING,\r\n)\r\nclass RobertaForSequenceClassification(RobertaPreTrainedModel):\r\n authorized_missing_keys = [r\"position_ids\"]\r\n\r\n def __init__(self, config):\r\n super().__init__(config)\r\n self.num_labels = config.num_labels\r\n\r\n self.roberta = RobertaModel(config, add_pooling_layer=False)\r\n self.classifier = RobertaClassificationHead(config)\r\n\r\n self.init_weights()\r\n\r\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\r\n @add_code_sample_docstrings(\r\n tokenizer_class=_TOKENIZER_FOR_DOC,\r\n checkpoint=\"roberta-base\",\r\n output_type=SequenceClassifierOutput,\r\n config_class=_CONFIG_FOR_DOC,\r\n )\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n labels=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n ):\r\n r\"\"\"\r\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\r\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\r\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\r\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\r\n \"\"\"\r\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\r\n\r\n outputs = self.roberta(\r\n input_ids,\r\n attention_mask=attention_mask,\r\n token_type_ids=token_type_ids,\r\n position_ids=position_ids,\r\n head_mask=head_mask,\r\n inputs_embeds=inputs_embeds,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n )\r\n sequence_output = outputs[0]\r\n logits = self.classifier(sequence_output)\r\n\r\n loss = None\r\n if labels is not None:\r\n if self.num_labels == 1:\r\n # We are doing regression\r\n loss_fct = MSELoss()\r\n loss = loss_fct(logits.view(-1), labels.view(-1))\r\n else:\r\n loss_fct = CrossEntropyLoss()\r\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\r\n\r\n if not return_dict:\r\n output = (logits,) + outputs[2:]\r\n return ((loss,) + output) if loss is not None else output\r\n\r\n return SequenceClassifierOutput(\r\n loss=loss,\r\n logits=logits,\r\n hidden_states=outputs.hidden_states,\r\n attentions=outputs.attentions,\r\n )\r\n\r\n\r\n@add_start_docstrings(\r\n \"\"\"\r\n Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\r\n softmax) e.g. for RocStories/SWAG tasks.\r\n \"\"\",\r\n ROBERTA_START_DOCSTRING,\r\n)\r\nclass RobertaForMultipleChoice(RobertaPreTrainedModel):\r\n authorized_missing_keys = [r\"position_ids\"]\r\n\r\n def __init__(self, config):\r\n super().__init__(config)\r\n\r\n self.roberta = RobertaModel(config)\r\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\r\n self.classifier = nn.Linear(config.hidden_size, 1)\r\n\r\n self.init_weights()\r\n\r\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\"))\r\n @add_code_sample_docstrings(\r\n tokenizer_class=_TOKENIZER_FOR_DOC,\r\n checkpoint=\"roberta-base\",\r\n output_type=MultipleChoiceModelOutput,\r\n config_class=_CONFIG_FOR_DOC,\r\n )\r\n def forward(\r\n self,\r\n input_ids=None,\r\n token_type_ids=None,\r\n attention_mask=None,\r\n labels=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n ):\r\n r\"\"\"\r\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\r\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\r\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\r\n :obj:`input_ids` above)\r\n \"\"\"\r\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\r\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\r\n\r\n flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\r\n flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\r\n flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\r\n flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\r\n flat_inputs_embeds = (\r\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\r\n if inputs_embeds is not None\r\n else None\r\n )\r\n\r\n outputs = self.roberta(\r\n flat_input_ids,\r\n position_ids=flat_position_ids,\r\n token_type_ids=flat_token_type_ids,\r\n attention_mask=flat_attention_mask,\r\n head_mask=head_mask,\r\n inputs_embeds=flat_inputs_embeds,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n )\r\n pooled_output = outputs[1]\r\n\r\n pooled_output = self.dropout(pooled_output)\r\n logits = self.classifier(pooled_output)\r\n reshaped_logits = logits.view(-1, num_choices)\r\n\r\n loss = None\r\n if labels is not None:\r\n loss_fct = CrossEntropyLoss()\r\n loss = loss_fct(reshaped_logits, labels)\r\n\r\n if not return_dict:\r\n output = (reshaped_logits,) + outputs[2:]\r\n return ((loss,) + output) if loss is not None else output\r\n\r\n return MultipleChoiceModelOutput(\r\n loss=loss,\r\n logits=reshaped_logits,\r\n hidden_states=outputs.hidden_states,\r\n attentions=outputs.attentions,\r\n )\r\n\r\n\r\n@add_start_docstrings(\r\n \"\"\"\r\n Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\r\n Named-Entity-Recognition (NER) tasks.\r\n \"\"\",\r\n ROBERTA_START_DOCSTRING,\r\n)\r\nclass RobertaForTokenClassification(RobertaPreTrainedModel):\r\n authorized_unexpected_keys = [r\"pooler\"]\r\n authorized_missing_keys = [r\"position_ids\"]\r\n\r\n def __init__(self, config):\r\n super().__init__(config)\r\n self.num_labels = config.num_labels\r\n\r\n self.roberta = RobertaModel(config, add_pooling_layer=False)\r\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\r\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\r\n\r\n self.init_weights()\r\n\r\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\r\n @add_code_sample_docstrings(\r\n tokenizer_class=_TOKENIZER_FOR_DOC,\r\n checkpoint=\"roberta-base\",\r\n output_type=TokenClassifierOutput,\r\n config_class=_CONFIG_FOR_DOC,\r\n )\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n labels=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n ):\r\n r\"\"\"\r\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\r\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\r\n 1]``.\r\n \"\"\"\r\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\r\n\r\n outputs = self.roberta(\r\n input_ids,\r\n attention_mask=attention_mask,\r\n token_type_ids=token_type_ids,\r\n position_ids=position_ids,\r\n head_mask=head_mask,\r\n inputs_embeds=inputs_embeds,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n )\r\n\r\n sequence_output = outputs[0]\r\n\r\n sequence_output = self.dropout(sequence_output)\r\n logits = self.classifier(sequence_output)\r\n\r\n loss = None\r\n if labels is not None:\r\n loss_fct = CrossEntropyLoss()\r\n # Only keep active parts of the loss\r\n if attention_mask is not None:\r\n active_loss = attention_mask.view(-1) == 1\r\n active_logits = logits.view(-1, self.num_labels)\r\n active_labels = torch.where(\r\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\r\n )\r\n loss = loss_fct(active_logits, active_labels)\r\n else:\r\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\r\n\r\n if not return_dict:\r\n output = (logits,) + outputs[2:]\r\n return ((loss,) + output) if loss is not None else output\r\n\r\n return TokenClassifierOutput(\r\n loss=loss,\r\n logits=logits,\r\n hidden_states=outputs.hidden_states,\r\n attentions=outputs.attentions,\r\n )\r\n\r\n\r\nclass RobertaClassificationHead(nn.Module):\r\n \"\"\"Head for sentence-level classification tasks.\"\"\"\r\n\r\n def __init__(self, config):\r\n super().__init__()\r\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\r\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\r\n self.out_proj = nn.Linear(config.hidden_size, config.num_labels)\r\n\r\n def forward(self, features, **kwargs):\r\n x = features[:, 0, :] # take <s> token (equiv. to [CLS])\r\n x = self.dropout(x)\r\n x = self.dense(x)\r\n x = torch.tanh(x)\r\n x = self.dropout(x)\r\n x = self.out_proj(x)\r\n return x\r\n\r\n\r\n@add_start_docstrings(\r\n \"\"\"\r\n Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\r\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\r\n \"\"\",\r\n ROBERTA_START_DOCSTRING,\r\n)\r\nclass RobertaForQuestionAnswering(RobertaPreTrainedModel):\r\n authorized_unexpected_keys = [r\"pooler\"]\r\n authorized_missing_keys = [r\"position_ids\"]\r\n\r\n def __init__(self, config):\r\n super().__init__(config)\r\n self.num_labels = config.num_labels\r\n\r\n self.roberta = RobertaModel(config, add_pooling_layer=False)\r\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\r\n\r\n self.init_weights()\r\n\r\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\r\n @add_code_sample_docstrings(\r\n tokenizer_class=_TOKENIZER_FOR_DOC,\r\n checkpoint=\"roberta-base\",\r\n output_type=QuestionAnsweringModelOutput,\r\n config_class=_CONFIG_FOR_DOC,\r\n )\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n start_positions=None,\r\n end_positions=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n ):\r\n r\"\"\"\r\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\r\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\r\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\r\n sequence are not taken into account for computing the loss.\r\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\r\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\r\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\r\n sequence are not taken into account for computing the loss.\r\n \"\"\"\r\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\r\n\r\n outputs = self.roberta(\r\n input_ids,\r\n attention_mask=attention_mask,\r\n token_type_ids=token_type_ids,\r\n position_ids=position_ids,\r\n head_mask=head_mask,\r\n inputs_embeds=inputs_embeds,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n )\r\n\r\n sequence_output = outputs[0]\r\n\r\n logits = self.qa_outputs(sequence_output)\r\n start_logits, end_logits = logits.split(1, dim=-1)\r\n start_logits = start_logits.squeeze(-1)\r\n end_logits = end_logits.squeeze(-1)\r\n\r\n total_loss = None\r\n if start_positions is not None and end_positions is not None:\r\n # If we are on multi-GPU, split add a dimension\r\n if len(start_positions.size()) > 1:\r\n start_positions = start_positions.squeeze(-1)\r\n if len(end_positions.size()) > 1:\r\n end_positions = end_positions.squeeze(-1)\r\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\r\n ignored_index = start_logits.size(1)\r\n start_positions.clamp_(0, ignored_index)\r\n end_positions.clamp_(0, ignored_index)\r\n\r\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\r\n start_loss = loss_fct(start_logits, start_positions)\r\n end_loss = loss_fct(end_logits, end_positions)\r\n total_loss = (start_loss + end_loss) / 2\r\n\r\n if not return_dict:\r\n output = (start_logits, end_logits) + outputs[2:]\r\n return ((total_loss,) + output) if total_loss is not None else output\r\n\r\n return QuestionAnsweringModelOutput(\r\n loss=total_loss,\r\n start_logits=start_logits,\r\n end_logits=end_logits,\r\n hidden_states=outputs.hidden_states,\r\n attentions=outputs.attentions,\r\n )\r\n\r\n\r\ndef create_position_ids_from_input_ids(input_ids, padding_idx):\r\n \"\"\"\r\n Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols\r\n are ignored. This is modified from fairseq's `utils.make_positions`.\r\n\r\n Args:\r\n x: torch.Tensor x:\r\n\r\n Returns: torch.Tensor\r\n \"\"\"\r\n # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.\r\n mask = input_ids.ne(padding_idx).int()\r\n incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask\r\n return incremental_indices.long() + padding_idx\r\n", "# coding=utf-8\r\n# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.\r\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\" TF 2.0 OpenAI GPT model.\"\"\"\r\n\r\n\r\nfrom dataclasses import dataclass\r\nfrom typing import Optional, Tuple\r\n\r\nimport tensorflow as tf\r\n\r\nfrom .activations_tf import get_tf_activation\r\nfrom .configuration_openai import OpenAIGPTConfig\r\nfrom .file_utils import (\r\n ModelOutput,\r\n add_code_sample_docstrings,\r\n add_start_docstrings,\r\n add_start_docstrings_to_model_forward,\r\n replace_return_docstrings,\r\n)\r\nfrom .modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput\r\nfrom .modeling_tf_utils import (\r\n TFCausalLanguageModelingLoss,\r\n TFConv1D,\r\n TFPreTrainedModel,\r\n TFSequenceSummary,\r\n TFSharedEmbeddings,\r\n get_initializer,\r\n keras_serializable,\r\n shape_list,\r\n)\r\nfrom .tokenization_utils import BatchEncoding\r\nfrom .utils import logging\r\n\r\n\r\nlogger = logging.get_logger(__name__)\r\n\r\n_CONFIG_FOR_DOC = \"OpenAIGPTConfig\"\r\n_TOKENIZER_FOR_DOC = \"OpenAIGPTTokenizer\"\r\n\r\nTF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = [\r\n \"openai-gpt\",\r\n # See all OpenAI GPT models at https://huggingface.co/models?filter=openai-gpt\r\n]\r\n\r\n\r\nclass TFAttention(tf.keras.layers.Layer):\r\n def __init__(self, nx, n_ctx, config, scale=False, **kwargs):\r\n super().__init__(**kwargs)\r\n\r\n n_state = nx # in Attention: n_state=768 (nx=n_embd)\r\n # [switch nx => n_state from Block to Attention to keep identical to TF implem]\r\n assert (\r\n n_state % config.n_head == 0\r\n ), f\"Hidden dimension {n_state} not dividable by number of heads {config.n_head}\"\r\n self.n_ctx = n_ctx\r\n self.n_head = config.n_head\r\n self.split_size = n_state\r\n self.scale = scale\r\n self.output_attentions = config.output_attentions\r\n\r\n self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name=\"c_attn\")\r\n self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name=\"c_proj\")\r\n self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop)\r\n self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop)\r\n self.pruned_heads = set()\r\n\r\n def prune_heads(self, heads):\r\n pass\r\n\r\n @staticmethod\r\n def causal_attention_mask(nd, ns, dtype):\r\n \"\"\"\r\n 1's in the lower triangle, counting from the lower right corner. Same as tf.matrix_band_part(tf.ones([nd, ns]),\r\n -1, ns-nd), but doesn't produce garbage on TPUs.\r\n \"\"\"\r\n i = tf.range(nd)[:, None]\r\n j = tf.range(ns)\r\n m = i >= j - ns + nd\r\n return tf.cast(m, dtype)\r\n\r\n def _attn(self, q, k, v, attention_mask, head_mask, output_attentions, training=False):\r\n # q, k, v have shape [batch, heads, sequence, features]\r\n w = tf.matmul(q, k, transpose_b=True)\r\n if self.scale:\r\n dk = tf.cast(shape_list(k)[-1], tf.float32) # scale attention_scores\r\n w = w / tf.math.sqrt(dk)\r\n\r\n # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.\r\n _, _, nd, ns = shape_list(w)\r\n b = self.causal_attention_mask(nd, ns, dtype=w.dtype)\r\n b = tf.reshape(b, [1, 1, nd, ns])\r\n w = w * b - 1e4 * (1 - b)\r\n\r\n if attention_mask is not None:\r\n # Apply the attention mask\r\n w = w + attention_mask\r\n\r\n w = tf.nn.softmax(w, axis=-1)\r\n w = self.attn_dropout(w, training=training)\r\n\r\n # Mask heads if we want to\r\n if head_mask is not None:\r\n w = w * head_mask\r\n\r\n outputs = [tf.matmul(w, v)]\r\n if output_attentions:\r\n outputs.append(w)\r\n return outputs\r\n\r\n def merge_heads(self, x):\r\n x = tf.transpose(x, [0, 2, 1, 3])\r\n x_shape = shape_list(x)\r\n new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]]\r\n return tf.reshape(x, new_x_shape)\r\n\r\n def split_heads(self, x):\r\n x_shape = shape_list(x)\r\n new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head]\r\n x = tf.reshape(x, new_x_shape)\r\n return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)\r\n\r\n def call(self, x, attention_mask, head_mask, output_attentions, training=False):\r\n x = self.c_attn(x)\r\n query, key, value = tf.split(x, 3, axis=2)\r\n query = self.split_heads(query)\r\n key = self.split_heads(key)\r\n value = self.split_heads(value)\r\n\r\n attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions, training=training)\r\n a = attn_outputs[0]\r\n\r\n a = self.merge_heads(a)\r\n a = self.c_proj(a)\r\n a = self.resid_dropout(a, training=training)\r\n\r\n outputs = [a] + attn_outputs[1:]\r\n return outputs # a, (attentions)\r\n\r\n\r\nclass TFMLP(tf.keras.layers.Layer):\r\n def __init__(self, n_state, config, **kwargs):\r\n super().__init__(**kwargs)\r\n nx = config.n_embd\r\n self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name=\"c_fc\")\r\n self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name=\"c_proj\")\r\n self.act = get_tf_activation(\"gelu\")\r\n self.dropout = tf.keras.layers.Dropout(config.resid_pdrop)\r\n\r\n def call(self, x, training=False):\r\n h = self.act(self.c_fc(x))\r\n h2 = self.c_proj(h)\r\n h2 = self.dropout(h2, training=training)\r\n return h2\r\n\r\n\r\nclass TFBlock(tf.keras.layers.Layer):\r\n def __init__(self, n_ctx, config, scale=False, **kwargs):\r\n super().__init__(**kwargs)\r\n nx = config.n_embd\r\n self.attn = TFAttention(nx, n_ctx, config, scale, name=\"attn\")\r\n self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name=\"ln_1\")\r\n self.mlp = TFMLP(4 * nx, config, name=\"mlp\")\r\n self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name=\"ln_2\")\r\n\r\n def call(self, x, attention_mask, head_mask, output_attentions, training=False):\r\n output_attn = self.attn(x, attention_mask, head_mask, output_attentions, training=training)\r\n a = output_attn[0] # output_attn: a, (attentions)\r\n\r\n n = self.ln_1(x + a)\r\n m = self.mlp(n, training=training)\r\n h = self.ln_2(n + m)\r\n\r\n outputs = [h] + output_attn[1:]\r\n return outputs # x, (attentions)\r\n\r\n\r\n@keras_serializable\r\nclass TFOpenAIGPTMainLayer(tf.keras.layers.Layer):\r\n config_class = OpenAIGPTConfig\r\n\r\n def __init__(self, config, *inputs, **kwargs):\r\n super().__init__(*inputs, **kwargs)\r\n self.output_hidden_states = config.output_hidden_states\r\n self.output_attentions = config.output_attentions\r\n self.return_dict = config.use_return_dict\r\n self.num_hidden_layers = config.n_layer\r\n self.vocab_size = config.vocab_size\r\n self.n_embd = config.n_embd\r\n\r\n self.tokens_embed = TFSharedEmbeddings(\r\n config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name=\"tokens_embed\"\r\n )\r\n self.positions_embed = tf.keras.layers.Embedding(\r\n config.n_positions,\r\n config.n_embd,\r\n embeddings_initializer=get_initializer(config.initializer_range),\r\n name=\"positions_embed\",\r\n )\r\n self.drop = tf.keras.layers.Dropout(config.embd_pdrop)\r\n self.h = [TFBlock(config.n_ctx, config, scale=True, name=\"h_._{}\".format(i)) for i in range(config.n_layer)]\r\n\r\n def get_input_embeddings(self):\r\n return self.tokens_embed\r\n\r\n def set_input_embeddings(self, value):\r\n self.tokens_embed.weight = value\r\n self.tokens_embed.vocab_size = value.shape[0]\r\n\r\n def _prune_heads(self, heads_to_prune):\r\n \"\"\"\r\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n def call(\r\n self,\r\n inputs,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n training=False,\r\n ):\r\n if isinstance(inputs, (tuple, list)):\r\n input_ids = inputs[0]\r\n attention_mask = inputs[1] if len(inputs) > 1 else attention_mask\r\n token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids\r\n position_ids = inputs[3] if len(inputs) > 3 else position_ids\r\n head_mask = inputs[4] if len(inputs) > 4 else head_mask\r\n inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds\r\n output_attentions = inputs[6] if len(inputs) > 6 else output_attentions\r\n output_hidden_states = inputs[7] if len(inputs) > 7 else output_hidden_states\r\n return_dict = inputs[8] if len(inputs) > 8 else return_dict\r\n assert len(inputs) <= 9, \"Too many inputs.\"\r\n elif isinstance(inputs, (dict, BatchEncoding)):\r\n input_ids = inputs.get(\"input_ids\")\r\n attention_mask = inputs.get(\"attention_mask\", attention_mask)\r\n token_type_ids = inputs.get(\"token_type_ids\", token_type_ids)\r\n position_ids = inputs.get(\"position_ids\", position_ids)\r\n head_mask = inputs.get(\"head_mask\", head_mask)\r\n inputs_embeds = inputs.get(\"inputs_embeds\", inputs_embeds)\r\n output_attentions = inputs.get(\"output_attentions\", output_attentions)\r\n output_hidden_states = inputs.get(\"output_hidden_states\", output_hidden_states)\r\n return_dict = inputs.get(\"return_dict\", return_dict)\r\n assert len(inputs) <= 9, \"Too many inputs.\"\r\n else:\r\n input_ids = inputs\r\n\r\n output_attentions = output_attentions if output_attentions is not None else self.output_attentions\r\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.output_hidden_states\r\n return_dict = return_dict if return_dict is not None else self.return_dict\r\n\r\n if input_ids is not None and inputs_embeds is not None:\r\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\r\n elif input_ids is not None:\r\n input_shape = shape_list(input_ids)\r\n input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])\r\n elif inputs_embeds is not None:\r\n input_shape = shape_list(inputs_embeds)[:-1]\r\n else:\r\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\r\n\r\n if position_ids is None:\r\n position_ids = tf.range(input_shape[-1], dtype=tf.int32)[tf.newaxis, :]\r\n\r\n if attention_mask is not None:\r\n # We create a 3D attention mask from a 2D tensor mask.\r\n # Sizes are [batch_size, 1, 1, to_seq_length]\r\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\r\n # this attention mask is more simple than the triangular masking of causal attention\r\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\r\n attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]\r\n\r\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\r\n # masked positions, this operation will create a tensor which is 0.0 for\r\n # positions we want to attend and -10000.0 for masked positions.\r\n # Since we are adding it to the raw scores before the softmax, this is\r\n # effectively the same as removing these entirely.\r\n\r\n attention_mask = tf.cast(attention_mask, tf.float32)\r\n attention_mask = (1.0 - attention_mask) * -10000.0\r\n else:\r\n attention_mask = None\r\n\r\n # Prepare head mask if needed\r\n # 1.0 in head_mask indicate we keep the head\r\n # attention_probs has shape bsz x n_heads x N x N\r\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\r\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\r\n if head_mask is not None:\r\n raise NotImplementedError\r\n else:\r\n head_mask = [None] * self.num_hidden_layers\r\n # head_mask = tf.constant([0] * self.num_hidden_layers)\r\n\r\n position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])\r\n\r\n if inputs_embeds is None:\r\n inputs_embeds = self.tokens_embed(input_ids, mode=\"embedding\")\r\n position_embeds = self.positions_embed(position_ids)\r\n if token_type_ids is not None:\r\n token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])\r\n token_type_embeds = self.tokens_embed(token_type_ids, mode=\"embedding\")\r\n else:\r\n token_type_embeds = 0\r\n hidden_states = inputs_embeds + position_embeds + token_type_embeds\r\n hidden_states = self.drop(hidden_states, training=training)\r\n\r\n output_shape = input_shape + [shape_list(hidden_states)[-1]]\r\n\r\n all_attentions = () if output_attentions else None\r\n all_hidden_states = () if output_hidden_states else None\r\n for i, block in enumerate(self.h):\r\n if output_hidden_states:\r\n all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)\r\n\r\n outputs = block(hidden_states, attention_mask, head_mask[i], output_attentions, training=training)\r\n hidden_states = outputs[0]\r\n if output_attentions:\r\n all_attentions = all_attentions + (outputs[1],)\r\n\r\n hidden_states = tf.reshape(hidden_states, output_shape)\r\n # Add last hidden state\r\n if output_hidden_states:\r\n all_hidden_states = all_hidden_states + (hidden_states,)\r\n\r\n if output_attentions:\r\n # let the number of heads free (-1) so we can extract attention even after head pruning\r\n attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]\r\n all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)\r\n\r\n if not return_dict:\r\n return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)\r\n\r\n return TFBaseModelOutput(\r\n last_hidden_state=hidden_states,\r\n hidden_states=all_hidden_states,\r\n attentions=all_attentions,\r\n )\r\n\r\n\r\nclass TFOpenAIGPTPreTrainedModel(TFPreTrainedModel):\r\n \"\"\"\r\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\r\n models.\r\n \"\"\"\r\n\r\n config_class = OpenAIGPTConfig\r\n base_model_prefix = \"transformer\"\r\n\r\n\r\n@dataclass\r\nclass TFOpenAIGPTDoubleHeadsModelOutput(ModelOutput):\r\n \"\"\"\r\n Base class for outputs of models predicting if two sentences are consecutive or not.\r\n\r\n Args:\r\n logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):\r\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\r\n mc_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices)`):\r\n Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).\r\n hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\r\n Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of\r\n shape :obj:`(batch_size, sequence_length, hidden_size)`.\r\n\r\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\r\n attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\r\n Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,\r\n sequence_length)`.\r\n\r\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\r\n heads.\r\n \"\"\"\r\n\r\n logits: tf.Tensor = None\r\n mc_logits: tf.Tensor = None\r\n hidden_states: Optional[Tuple[tf.Tensor]] = None\r\n attentions: Optional[Tuple[tf.Tensor]] = None\r\n\r\n\r\nOPENAI_GPT_START_DOCSTRING = r\"\"\"\r\n\r\n This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the\r\n generic methods the library implements for all its model (such as downloading or saving, resizing the input\r\n embeddings, pruning heads etc.)\r\n\r\n This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use\r\n it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage\r\n and behavior.\r\n\r\n .. note::\r\n\r\n TF 2.0 models accepts two formats as inputs:\r\n\r\n - having all inputs as keyword arguments (like PyTorch models), or\r\n - having all inputs as a list, tuple or dict in the first positional arguments.\r\n\r\n This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all\r\n the tensors in the first argument of the model call function: :obj:`model(inputs)`.\r\n\r\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors in\r\n the first positional argument :\r\n\r\n - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)`\r\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\r\n :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`\r\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\r\n :obj:`model({\"input_ids\": input_ids, \"token_type_ids\": token_type_ids})`\r\n\r\n\r\n Parameters:\r\n config (:class:`~transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model.\r\n Initializing with a config file does not load the weights associated with the model, only the\r\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\r\n weights.\r\n\"\"\"\r\n\r\nOPENAI_GPT_INPUTS_DOCSTRING = r\"\"\"\r\n Args:\r\n input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):\r\n Indices of input sequence tokens in the vocabulary.\r\n\r\n Indices can be obtained using :class:`~transformers.OpenAIGPTTokenizer`. See\r\n :func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for\r\n details.\r\n\r\n `What are input IDs? <../glossary.html#input-ids>`__\r\n attention_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`):\r\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\r\n\r\n - 1 for tokens that are **not masked**,\r\n - 0 for tokens that are **masked**.\r\n\r\n `What are attention masks? <../glossary.html#attention-mask>`__\r\n token_type_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`):\r\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\r\n 1]``:\r\n\r\n - 0 corresponds to a `sentence A` token,\r\n - 1 corresponds to a `sentence B` token.\r\n\r\n `What are token type IDs? <../glossary.html#token-type-ids>`__\r\n position_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`):\r\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\r\n config.max_position_embeddings - 1]``.\r\n\r\n `What are position IDs? <../glossary.html#position-ids>`__\r\n head_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\r\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\r\n\r\n - 1 indicates the head is **not masked**,\r\n - 0 indicates the head is **masked**.\r\n\r\n inputs_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\r\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\r\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\r\n vectors than the model's internal embedding lookup matrix.\r\n output_attentions (:obj:`bool`, `optional`):\r\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\r\n tensors for more detail.\r\n output_hidden_states (:obj:`bool`, `optional`):\r\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\r\n more detail.\r\n return_dict (:obj:`bool`, `optional`):\r\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\r\n training (:obj:`bool`, `optional`, defaults to :obj:`False`):\r\n Whether or not to use the model in training mode (some modules like dropout modules have different\r\n behaviors between training and evaluation).\r\n\"\"\"\r\n\r\n\r\n@add_start_docstrings(\r\n \"The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.\",\r\n OPENAI_GPT_START_DOCSTRING,\r\n)\r\nclass TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel):\r\n def __init__(self, config, *inputs, **kwargs):\r\n super().__init__(config, *inputs, **kwargs)\r\n self.transformer = TFOpenAIGPTMainLayer(config, name=\"transformer\")\r\n\r\n @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)\r\n @add_code_sample_docstrings(\r\n tokenizer_class=_TOKENIZER_FOR_DOC,\r\n checkpoint=\"openai-gpt\",\r\n output_type=TFBaseModelOutput,\r\n config_class=_CONFIG_FOR_DOC,\r\n )\r\n def call(self, inputs, **kwargs):\r\n outputs = self.transformer(inputs, **kwargs)\r\n return outputs\r\n\r\n\r\n@add_start_docstrings(\r\n \"\"\"\r\n OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input\r\n embeddings).\r\n \"\"\",\r\n OPENAI_GPT_START_DOCSTRING,\r\n)\r\nclass TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel, TFCausalLanguageModelingLoss):\r\n def __init__(self, config, *inputs, **kwargs):\r\n super().__init__(config, *inputs, **kwargs)\r\n self.transformer = TFOpenAIGPTMainLayer(config, name=\"transformer\")\r\n\r\n def get_output_embeddings(self):\r\n return self.transformer.tokens_embed\r\n\r\n @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)\r\n @add_code_sample_docstrings(\r\n tokenizer_class=_TOKENIZER_FOR_DOC,\r\n checkpoint=\"openai-gpt\",\r\n output_type=TFCausalLMOutput,\r\n config_class=_CONFIG_FOR_DOC,\r\n )\r\n def call(\r\n self,\r\n inputs,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n labels=None,\r\n training=False,\r\n ):\r\n r\"\"\"\r\n labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\r\n Labels for computing the cross entropy classification loss. Indices should be in ``[0, ...,\r\n config.vocab_size - 1]``.\r\n \"\"\"\r\n return_dict = return_dict if return_dict is not None else self.transformer.return_dict\r\n if isinstance(inputs, (tuple, list)):\r\n labels = inputs[9] if len(inputs) > 9 else labels\r\n if len(inputs) > 9:\r\n inputs = inputs[:9]\r\n elif isinstance(inputs, (dict, BatchEncoding)):\r\n labels = inputs.pop(\"labels\", labels)\r\n\r\n transformer_outputs = self.transformer(\r\n inputs,\r\n attention_mask=attention_mask,\r\n token_type_ids=token_type_ids,\r\n position_ids=position_ids,\r\n head_mask=head_mask,\r\n inputs_embeds=inputs_embeds,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n training=training,\r\n )\r\n hidden_states = transformer_outputs[0]\r\n\r\n logits = self.transformer.tokens_embed(hidden_states, mode=\"linear\")\r\n\r\n loss = None\r\n if labels is not None:\r\n # shift labels to the left and cut last logit token\r\n logits = logits[:, :-1]\r\n labels = labels[:, 1:]\r\n loss = self.compute_loss(labels, logits)\r\n\r\n if not return_dict:\r\n output = (logits,) + transformer_outputs[1:]\r\n return ((loss,) + output) if loss is not None else output\r\n\r\n return TFCausalLMOutput(\r\n loss=loss,\r\n logits=logits,\r\n hidden_states=transformer_outputs.hidden_states,\r\n attentions=transformer_outputs.attentions,\r\n )\r\n\r\n\r\n@add_start_docstrings(\r\n \"\"\"\r\n OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for\r\n RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the\r\n input embeddings, the classification head takes as input the input of a specified classification token index in the\r\n input sequence).\r\n \"\"\",\r\n OPENAI_GPT_START_DOCSTRING,\r\n)\r\nclass TFOpenAIGPTDoubleHeadsModel(TFOpenAIGPTPreTrainedModel):\r\n def __init__(self, config, *inputs, **kwargs):\r\n super().__init__(config, *inputs, **kwargs)\r\n config.num_labels = 1\r\n self.transformer = TFOpenAIGPTMainLayer(config, name=\"transformer\")\r\n self.multiple_choice_head = TFSequenceSummary(\r\n config, initializer_range=config.initializer_range, name=\"multiple_choice_head\"\r\n )\r\n\r\n def get_output_embeddings(self):\r\n return self.transformer.tokens_embed\r\n\r\n @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)\r\n @replace_return_docstrings(output_type=TFOpenAIGPTDoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)\r\n def call(\r\n self,\r\n inputs,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n mc_token_ids=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n training=False,\r\n ):\r\n r\"\"\"\r\n mc_token_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input):\r\n Index of the classification token in each input sequence. Selected in the range ``[0, input_ids.size(-1) -\r\n 1]``.\r\n\r\n Return:\r\n\r\n Examples::\r\n\r\n >>> import tensorflow as tf\r\n >>> from transformers import OpenAIGPTTokenizer, TFOpenAIGPTDoubleHeadsModel\r\n\r\n >>> tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\r\n >>> model = TFOpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt')\r\n\r\n >>> # Add a [CLS] to the vocabulary (we should train it also!)\r\n >>> tokenizer.add_special_tokens({'cls_token': '[CLS]'})\r\n >>> model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size\r\n >>> print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary\r\n\r\n >>> choices = [\"Hello, my dog is cute [CLS]\", \"Hello, my cat is cute [CLS]\"]\r\n >>> encoding = tokenizer(choices, return_tensors=\"tf\")\r\n >>> inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()}\r\n >>> inputs[\"mc_token_ids\"]= tf.constant([inputs[\"input_ids\"].shape[-1] - 1, inputs[\"input_ids\"].shape[-1] - 1])[None, :] # Batch size 1\r\n >>> outputs = model(inputs)\r\n >>> lm_prediction_scores, mc_prediction_scores = outputs[:2]\r\n \"\"\"\r\n\r\n if isinstance(inputs, (tuple, list)):\r\n input_ids = inputs[0]\r\n attention_mask = inputs[1] if len(inputs) > 1 else attention_mask\r\n token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids\r\n position_ids = inputs[3] if len(inputs) > 3 else position_ids\r\n head_mask = inputs[4] if len(inputs) > 4 else head_mask\r\n inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds\r\n mc_token_ids = inputs[6] if len(inputs) > 6 else mc_token_ids\r\n output_attentions = inputs[7] if len(inputs) > 7 else output_attentions\r\n output_hidden_states = inputs[8] if len(inputs) > 8 else output_hidden_states\r\n return_dict = inputs[9] if len(inputs) > 9 else return_dict\r\n assert len(inputs) <= 10, \"Too many inputs.\"\r\n elif isinstance(inputs, (dict, BatchEncoding)):\r\n input_ids = inputs.get(\"input_ids\")\r\n attention_mask = inputs.get(\"attention_mask\", attention_mask)\r\n token_type_ids = inputs.get(\"token_type_ids\", token_type_ids)\r\n position_ids = inputs.get(\"position_ids\", position_ids)\r\n head_mask = inputs.get(\"head_mask\", head_mask)\r\n inputs_embeds = inputs.get(\"inputs_embeds\", inputs_embeds)\r\n mc_token_ids = inputs.get(\"mc_token_ids\", mc_token_ids)\r\n output_attentions = inputs.get(\"output_attentions\", output_attentions)\r\n output_hidden_states = inputs.get(\"output_hidden_states\", output_hidden_states)\r\n return_dict = inputs.get(\"return_dict\", return_dict)\r\n assert len(inputs) <= 10, \"Too many inputs.\"\r\n else:\r\n input_ids = inputs\r\n return_dict = return_dict if return_dict is not None else self.transformer.return_dict\r\n\r\n if input_ids is not None:\r\n input_shapes = shape_list(input_ids)\r\n else:\r\n input_shapes = shape_list(inputs_embeds)[:-1]\r\n\r\n seq_length = input_shapes[-1]\r\n flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None\r\n flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None\r\n flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None\r\n flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None\r\n transformer_outputs = self.transformer(\r\n flat_input_ids,\r\n flat_attention_mask,\r\n flat_token_type_ids,\r\n flat_position_ids,\r\n head_mask,\r\n inputs_embeds,\r\n output_attentions,\r\n output_hidden_states,\r\n return_dict=return_dict,\r\n training=training,\r\n )\r\n hidden_states = transformer_outputs[0]\r\n hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:])\r\n lm_logits = self.transformer.tokens_embed(hidden_states, mode=\"linear\")\r\n mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids, training=training)\r\n mc_logits = tf.squeeze(mc_logits, axis=-1)\r\n\r\n if not return_dict:\r\n return (lm_logits, mc_logits) + transformer_outputs[1:]\r\n\r\n return TFOpenAIGPTDoubleHeadsModelOutput(\r\n logits=lm_logits,\r\n mc_logits=mc_logits,\r\n hidden_states=transformer_outputs.hidden_states,\r\n attentions=transformer_outputs.attentions,\r\n )\r\n", "#!/usr/bin/env python\r\n\r\nimport argparse\r\nimport glob\r\nimport logging\r\nimport os\r\nimport sys\r\nimport time\r\nfrom collections import defaultdict\r\nfrom pathlib import Path\r\nfrom typing import Dict, List, Tuple\r\n\r\nimport numpy as np\r\nimport pytorch_lightning as pl\r\nimport torch\r\nfrom torch.utils.data import DataLoader\r\n\r\nfrom callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback\r\nfrom transformers import MBartTokenizer, T5ForConditionalGeneration\r\nfrom transformers.modeling_bart import shift_tokens_right\r\nfrom utils import (\r\n ROUGE_KEYS,\r\n LegacySeq2SeqDataset,\r\n Seq2SeqDataset,\r\n assert_all_frozen,\r\n calculate_bleu,\r\n calculate_rouge,\r\n check_output_dir,\r\n flatten_list,\r\n freeze_embeds,\r\n freeze_params,\r\n get_git_info,\r\n label_smoothed_nll_loss,\r\n lmap,\r\n pickle_save,\r\n save_git_info,\r\n save_json,\r\n use_task_specific_params,\r\n)\r\n\r\n\r\n# need the parent dir module\r\nsys.path.insert(2, str(Path(__file__).resolve().parents[1]))\r\nfrom lightning_base import BaseTransformer, add_generic_args, generic_train # noqa\r\n\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass SummarizationModule(BaseTransformer):\r\n mode = \"summarization\"\r\n loss_names = [\"loss\"]\r\n metric_names = ROUGE_KEYS\r\n default_val_metric = \"rouge2\"\r\n\r\n def __init__(self, hparams, **kwargs):\r\n if hparams.sortish_sampler and hparams.gpus > 1:\r\n hparams.replace_sampler_ddp = False\r\n elif hparams.max_tokens_per_batch is not None:\r\n if hparams.gpus > 1:\r\n raise NotImplementedError(\"Dynamic Batch size does not work for multi-gpu training\")\r\n if hparams.sortish_sampler:\r\n raise ValueError(\"--sortish_sampler and --max_tokens_per_batch may not be used simultaneously\")\r\n\r\n super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs)\r\n use_task_specific_params(self.model, \"summarization\")\r\n save_git_info(self.hparams.output_dir)\r\n self.metrics_save_path = Path(self.output_dir) / \"metrics.json\"\r\n self.hparams_save_path = Path(self.output_dir) / \"hparams.pkl\"\r\n pickle_save(self.hparams, self.hparams_save_path)\r\n self.step_count = 0\r\n self.metrics = defaultdict(list)\r\n self.model_type = self.config.model_type\r\n self.vocab_size = self.config.tgt_vocab_size if self.model_type == \"fsmt\" else self.config.vocab_size\r\n\r\n self.dataset_kwargs: dict = dict(\r\n data_dir=self.hparams.data_dir,\r\n max_source_length=self.hparams.max_source_length,\r\n prefix=self.model.config.prefix or \"\",\r\n )\r\n n_observations_per_split = {\r\n \"train\": self.hparams.n_train,\r\n \"val\": self.hparams.n_val,\r\n \"test\": self.hparams.n_test,\r\n }\r\n self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}\r\n\r\n self.target_lens = {\r\n \"train\": self.hparams.max_target_length,\r\n \"val\": self.hparams.val_max_target_length,\r\n \"test\": self.hparams.test_max_target_length,\r\n }\r\n assert self.target_lens[\"train\"] <= self.target_lens[\"val\"], f\"target_lens: {self.target_lens}\"\r\n assert self.target_lens[\"train\"] <= self.target_lens[\"test\"], f\"target_lens: {self.target_lens}\"\r\n if self.hparams.freeze_embeds:\r\n freeze_embeds(self.model)\r\n if self.hparams.freeze_encoder:\r\n freeze_params(self.model.get_encoder())\r\n assert_all_frozen(self.model.get_encoder())\r\n\r\n self.hparams.git_sha = get_git_info()[\"repo_sha\"]\r\n self.num_workers = hparams.num_workers\r\n self.decoder_start_token_id = None # default to config\r\n if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, MBartTokenizer):\r\n self.decoder_start_token_id = self.tokenizer.lang_code_to_id[hparams.tgt_lang]\r\n self.model.config.decoder_start_token_id = self.decoder_start_token_id\r\n self.dataset_class = (\r\n Seq2SeqDataset if hasattr(self.tokenizer, \"prepare_seq2seq_batch\") else LegacySeq2SeqDataset\r\n )\r\n self.already_saved_batch = False\r\n self.eval_beams = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams\r\n if self.hparams.eval_max_gen_length is not None:\r\n self.eval_max_length = self.hparams.eval_max_gen_length\r\n else:\r\n self.eval_max_length = self.model.config.max_length\r\n self.val_metric = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric\r\n\r\n def save_readable_batch(self, batch: Dict[str, torch.Tensor]) -> Dict[str, List[str]]:\r\n \"\"\"A debugging utility\"\"\"\r\n readable_batch = {\r\n k: self.tokenizer.batch_decode(v.tolist()) if \"mask\" not in k else v.shape for k, v in batch.items()\r\n }\r\n save_json(readable_batch, Path(self.output_dir) / \"text_batch.json\")\r\n save_json({k: v.tolist() for k, v in batch.items()}, Path(self.output_dir) / \"tok_batch.json\")\r\n\r\n self.already_saved_batch = True\r\n return readable_batch\r\n\r\n def forward(self, input_ids, **kwargs):\r\n return self.model(input_ids, **kwargs)\r\n\r\n def ids_to_clean_text(self, generated_ids: List[int]):\r\n gen_text = self.tokenizer.batch_decode(\r\n generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True\r\n )\r\n return lmap(str.strip, gen_text)\r\n\r\n def _step(self, batch: dict) -> Tuple:\r\n pad_token_id = self.tokenizer.pad_token_id\r\n src_ids, src_mask = batch[\"input_ids\"], batch[\"attention_mask\"]\r\n tgt_ids = batch[\"labels\"]\r\n if isinstance(self.model, T5ForConditionalGeneration):\r\n decoder_input_ids = self.model._shift_right(tgt_ids)\r\n else:\r\n decoder_input_ids = shift_tokens_right(tgt_ids, pad_token_id)\r\n if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero\r\n batch[\"decoder_input_ids\"] = decoder_input_ids\r\n self.save_readable_batch(batch)\r\n\r\n outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False)\r\n lm_logits = outputs[0]\r\n if self.hparams.label_smoothing == 0:\r\n # Same behavior as modeling_bart.py, besides ignoring pad_token_id\r\n ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=pad_token_id)\r\n\r\n assert lm_logits.shape[-1] == self.vocab_size\r\n loss = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), tgt_ids.view(-1))\r\n else:\r\n lprobs = torch.nn.functional.log_softmax(lm_logits, dim=-1)\r\n loss, nll_loss = label_smoothed_nll_loss(\r\n lprobs, tgt_ids, self.hparams.label_smoothing, ignore_index=pad_token_id\r\n )\r\n return (loss,)\r\n\r\n @property\r\n def pad(self) -> int:\r\n return self.tokenizer.pad_token_id\r\n\r\n def training_step(self, batch, batch_idx) -> Dict:\r\n loss_tensors = self._step(batch)\r\n\r\n logs = {name: loss for name, loss in zip(self.loss_names, loss_tensors)}\r\n # tokens per batch\r\n logs[\"tpb\"] = batch[\"input_ids\"].ne(self.pad).sum() + batch[\"labels\"].ne(self.pad).sum()\r\n logs[\"bs\"] = batch[\"input_ids\"].shape[0]\r\n logs[\"src_pad_tok\"] = batch[\"input_ids\"].eq(self.pad).sum()\r\n logs[\"src_pad_frac\"] = batch[\"input_ids\"].eq(self.pad).float().mean()\r\n # TODO(SS): make a wandb summary metric for this\r\n return {\"loss\": loss_tensors[0], \"log\": logs}\r\n\r\n def validation_step(self, batch, batch_idx) -> Dict:\r\n return self._generative_step(batch)\r\n\r\n def validation_epoch_end(self, outputs, prefix=\"val\") -> Dict:\r\n self.step_count += 1\r\n losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}\r\n loss = losses[\"loss\"]\r\n generative_metrics = {\r\n k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + [\"gen_time\", \"gen_len\"]\r\n }\r\n metric_val = (\r\n generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]\r\n )\r\n metric_tensor: torch.FloatTensor = torch.tensor(metric_val).type_as(loss)\r\n generative_metrics.update({k: v.item() for k, v in losses.items()})\r\n losses.update(generative_metrics)\r\n all_metrics = {f\"{prefix}_avg_{k}\": x for k, x in losses.items()}\r\n all_metrics[\"step_count\"] = self.step_count\r\n self.metrics[prefix].append(all_metrics) # callback writes this to self.metrics_save_path\r\n preds = flatten_list([x[\"preds\"] for x in outputs])\r\n return {\r\n \"log\": all_metrics,\r\n \"preds\": preds,\r\n f\"{prefix}_loss\": loss,\r\n f\"{prefix}_{self.val_metric}\": metric_tensor,\r\n }\r\n\r\n def calc_generative_metrics(self, preds, target) -> Dict:\r\n return calculate_rouge(preds, target)\r\n\r\n def _generative_step(self, batch: dict) -> dict:\r\n t0 = time.time()\r\n\r\n # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')\r\n generated_ids = self.model.generate(\r\n batch[\"input_ids\"],\r\n attention_mask=batch[\"attention_mask\"],\r\n use_cache=True,\r\n decoder_start_token_id=self.decoder_start_token_id,\r\n num_beams=self.eval_beams,\r\n max_length=self.eval_max_length,\r\n )\r\n gen_time = (time.time() - t0) / batch[\"input_ids\"].shape[0]\r\n preds: List[str] = self.ids_to_clean_text(generated_ids)\r\n target: List[str] = self.ids_to_clean_text(batch[\"labels\"])\r\n loss_tensors = self._step(batch)\r\n base_metrics = {name: loss for name, loss in zip(self.loss_names, loss_tensors)}\r\n rouge: Dict = self.calc_generative_metrics(preds, target)\r\n summ_len = np.mean(lmap(len, generated_ids))\r\n base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **rouge)\r\n return base_metrics\r\n\r\n def test_step(self, batch, batch_idx):\r\n return self._generative_step(batch)\r\n\r\n def test_epoch_end(self, outputs):\r\n return self.validation_epoch_end(outputs, prefix=\"test\")\r\n\r\n def get_dataset(self, type_path) -> Seq2SeqDataset:\r\n n_obs = self.n_obs[type_path]\r\n max_target_length = self.target_lens[type_path]\r\n dataset = self.dataset_class(\r\n self.tokenizer,\r\n type_path=type_path,\r\n n_obs=n_obs,\r\n max_target_length=max_target_length,\r\n **self.dataset_kwargs,\r\n )\r\n return dataset\r\n\r\n def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False) -> DataLoader:\r\n dataset = self.get_dataset(type_path)\r\n\r\n if self.hparams.sortish_sampler and type_path != \"test\" and type_path != \"val\":\r\n sampler = dataset.make_sortish_sampler(batch_size, distributed=self.hparams.gpus > 1)\r\n return DataLoader(\r\n dataset,\r\n batch_size=batch_size,\r\n collate_fn=dataset.collate_fn,\r\n shuffle=False,\r\n num_workers=self.num_workers,\r\n sampler=sampler,\r\n )\r\n\r\n elif self.hparams.max_tokens_per_batch is not None and type_path != \"test\" and type_path != \"val\":\r\n batch_sampler = dataset.make_dynamic_sampler(\r\n self.hparams.max_tokens_per_batch, distributed=self.hparams.gpus > 1\r\n )\r\n return DataLoader(\r\n dataset,\r\n batch_sampler=batch_sampler,\r\n collate_fn=dataset.collate_fn,\r\n # shuffle=False,\r\n num_workers=self.num_workers,\r\n # batch_size=None,\r\n )\r\n else:\r\n return DataLoader(\r\n dataset,\r\n batch_size=batch_size,\r\n collate_fn=dataset.collate_fn,\r\n shuffle=shuffle,\r\n num_workers=self.num_workers,\r\n sampler=None,\r\n )\r\n\r\n def train_dataloader(self) -> DataLoader:\r\n dataloader = self.get_dataloader(\"train\", batch_size=self.hparams.train_batch_size, shuffle=True)\r\n return dataloader\r\n\r\n def val_dataloader(self) -> DataLoader:\r\n return self.get_dataloader(\"val\", batch_size=self.hparams.eval_batch_size)\r\n\r\n def test_dataloader(self) -> DataLoader:\r\n return self.get_dataloader(\"test\", batch_size=self.hparams.eval_batch_size)\r\n\r\n @staticmethod\r\n def add_model_specific_args(parser, root_dir):\r\n BaseTransformer.add_model_specific_args(parser, root_dir)\r\n add_generic_args(parser, root_dir)\r\n parser.add_argument(\r\n \"--max_source_length\",\r\n default=1024,\r\n type=int,\r\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\r\n \"than this will be truncated, sequences shorter will be padded.\",\r\n )\r\n parser.add_argument(\r\n \"--max_target_length\",\r\n default=56,\r\n type=int,\r\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\r\n \"than this will be truncated, sequences shorter will be padded.\",\r\n )\r\n parser.add_argument(\r\n \"--val_max_target_length\",\r\n default=142, # these defaults are optimized for CNNDM. For xsum, see README.md.\r\n type=int,\r\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\r\n \"than this will be truncated, sequences shorter will be padded.\",\r\n )\r\n parser.add_argument(\r\n \"--test_max_target_length\",\r\n default=142,\r\n type=int,\r\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\r\n \"than this will be truncated, sequences shorter will be padded.\",\r\n )\r\n parser.add_argument(\"--freeze_encoder\", action=\"store_true\")\r\n parser.add_argument(\"--freeze_embeds\", action=\"store_true\")\r\n parser.add_argument(\"--sortish_sampler\", action=\"store_true\", default=False)\r\n parser.add_argument(\"--overwrite_output_dir\", action=\"store_true\", default=False)\r\n parser.add_argument(\"--max_tokens_per_batch\", type=int, default=None)\r\n parser.add_argument(\"--logger_name\", type=str, choices=[\"default\", \"wandb\", \"wandb_shared\"], default=\"default\")\r\n parser.add_argument(\"--n_train\", type=int, default=-1, required=False, help=\"# examples. -1 means use all.\")\r\n parser.add_argument(\"--n_val\", type=int, default=500, required=False, help=\"# examples. -1 means use all.\")\r\n parser.add_argument(\"--n_test\", type=int, default=-1, required=False, help=\"# examples. -1 means use all.\")\r\n parser.add_argument(\r\n \"--task\", type=str, default=\"summarization\", required=False, help=\"# examples. -1 means use all.\"\r\n )\r\n parser.add_argument(\"--label_smoothing\", type=float, default=0.0, required=False)\r\n parser.add_argument(\"--src_lang\", type=str, default=\"\", required=False)\r\n parser.add_argument(\"--tgt_lang\", type=str, default=\"\", required=False)\r\n parser.add_argument(\"--eval_beams\", type=int, default=None, required=False)\r\n parser.add_argument(\r\n \"--val_metric\", type=str, default=None, required=False, choices=[\"bleu\", \"rouge2\", \"loss\", None]\r\n )\r\n parser.add_argument(\"--eval_max_gen_length\", type=int, default=None, help=\"never generate more than n tokens\")\r\n parser.add_argument(\"--save_top_k\", type=int, default=1, required=False, help=\"How many checkpoints to save\")\r\n parser.add_argument(\r\n \"--early_stopping_patience\",\r\n type=int,\r\n default=-1,\r\n required=False,\r\n help=\"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So val_check_interval will effect it.\",\r\n )\r\n return parser\r\n\r\n\r\nclass TranslationModule(SummarizationModule):\r\n mode = \"translation\"\r\n loss_names = [\"loss\"]\r\n metric_names = [\"bleu\"]\r\n default_val_metric = \"bleu\"\r\n\r\n def __init__(self, hparams, **kwargs):\r\n super().__init__(hparams, **kwargs)\r\n self.dataset_kwargs[\"src_lang\"] = hparams.src_lang\r\n self.dataset_kwargs[\"tgt_lang\"] = hparams.tgt_lang\r\n\r\n def calc_generative_metrics(self, preds, target) -> dict:\r\n return calculate_bleu(preds, target)\r\n\r\n\r\ndef main(args, model=None) -> SummarizationModule:\r\n Path(args.output_dir).mkdir(exist_ok=True)\r\n check_output_dir(args, expected_items=3)\r\n\r\n if model is None:\r\n if \"summarization\" in args.task:\r\n model: SummarizationModule = SummarizationModule(args)\r\n else:\r\n model: SummarizationModule = TranslationModule(args)\r\n dataset = Path(args.data_dir).name\r\n if (\r\n args.logger_name == \"default\"\r\n or args.fast_dev_run\r\n or str(args.output_dir).startswith(\"/tmp\")\r\n or str(args.output_dir).startswith(\"/var\")\r\n ):\r\n logger = True # don't pollute wandb logs unnecessarily\r\n elif args.logger_name == \"wandb\":\r\n from pytorch_lightning.loggers import WandbLogger\r\n\r\n project = os.environ.get(\"WANDB_PROJECT\", dataset)\r\n logger = WandbLogger(name=model.output_dir.name, project=project)\r\n\r\n elif args.logger_name == \"wandb_shared\":\r\n from pytorch_lightning.loggers import WandbLogger\r\n\r\n logger = WandbLogger(name=model.output_dir.name, project=f\"hf_{dataset}\")\r\n\r\n if args.early_stopping_patience >= 0:\r\n es_callback = get_early_stopping_callback(model.val_metric, args.early_stopping_patience)\r\n else:\r\n es_callback = False\r\n\r\n lower_is_better = args.val_metric == \"loss\"\r\n trainer: pl.Trainer = generic_train(\r\n model,\r\n args,\r\n logging_callback=Seq2SeqLoggingCallback(),\r\n checkpoint_callback=get_checkpoint_callback(\r\n args.output_dir, model.val_metric, args.save_top_k, lower_is_better\r\n ),\r\n early_stopping_callback=es_callback,\r\n logger=logger,\r\n )\r\n pickle_save(model.hparams, model.output_dir / \"hparams.pkl\")\r\n if not args.do_predict:\r\n return model\r\n\r\n model.hparams.test_checkpoint = \"\"\r\n checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, \"*.ckpt\"), recursive=True)))\r\n if checkpoints:\r\n model.hparams.test_checkpoint = checkpoints[-1]\r\n trainer.resume_from_checkpoint = checkpoints[-1]\r\n trainer.logger.log_hyperparams(model.hparams)\r\n\r\n # test() without a model tests using the best checkpoint automatically\r\n trainer.test()\r\n return model\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser = pl.Trainer.add_argparse_args(parser)\r\n parser = SummarizationModule.add_model_specific_args(parser, os.getcwd())\r\n\r\n args = parser.parse_args()\r\n\r\n main(args)\r\n", "import argparse\r\nimport os\r\n\r\nimport torch\r\n\r\nfrom transformers.file_utils import WEIGHTS_NAME\r\n\r\n\r\nDIALOGPT_MODELS = [\"small\", \"medium\", \"large\"]\r\n\r\nOLD_KEY = \"lm_head.decoder.weight\"\r\nNEW_KEY = \"lm_head.weight\"\r\n\r\n\r\ndef convert_dialogpt_checkpoint(checkpoint_path: str, pytorch_dump_folder_path: str):\r\n d = torch.load(checkpoint_path)\r\n d[NEW_KEY] = d.pop(OLD_KEY)\r\n os.makedirs(pytorch_dump_folder_path, exist_ok=True)\r\n torch.save(d, os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--dialogpt_path\", default=\".\", type=str)\r\n args = parser.parse_args()\r\n for MODEL in DIALOGPT_MODELS:\r\n checkpoint_path = os.path.join(args.dialogpt_path, f\"{MODEL}_ft.pkl\")\r\n pytorch_dump_folder_path = f\"./DialoGPT-{MODEL}\"\r\n convert_dialogpt_checkpoint(\r\n checkpoint_path,\r\n pytorch_dump_folder_path,\r\n )\r\n", "# coding=utf-8\r\n# Copyright 2019-present, the HuggingFace Inc. team.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"\r\nPreprocessing script before training DistilBERT.\r\nSpecific to BERT -> DistilBERT.\r\n\"\"\"\r\nimport argparse\r\n\r\nimport torch\r\n\r\nfrom transformers import BertForMaskedLM\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser(\r\n description=\"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned Distillation\"\r\n )\r\n parser.add_argument(\"--model_type\", default=\"bert\", choices=[\"bert\"])\r\n parser.add_argument(\"--model_name\", default=\"bert-base-uncased\", type=str)\r\n parser.add_argument(\"--dump_checkpoint\", default=\"serialization_dir/tf_bert-base-uncased_0247911.pth\", type=str)\r\n parser.add_argument(\"--vocab_transform\", action=\"store_true\")\r\n args = parser.parse_args()\r\n\r\n if args.model_type == \"bert\":\r\n model = BertForMaskedLM.from_pretrained(args.model_name)\r\n prefix = \"bert\"\r\n else:\r\n raise ValueError('args.model_type should be \"bert\".')\r\n\r\n state_dict = model.state_dict()\r\n compressed_sd = {}\r\n\r\n for w in [\"word_embeddings\", \"position_embeddings\"]:\r\n compressed_sd[f\"distilbert.embeddings.{w}.weight\"] = state_dict[f\"{prefix}.embeddings.{w}.weight\"]\r\n for w in [\"weight\", \"bias\"]:\r\n compressed_sd[f\"distilbert.embeddings.LayerNorm.{w}\"] = state_dict[f\"{prefix}.embeddings.LayerNorm.{w}\"]\r\n\r\n std_idx = 0\r\n for teacher_idx in [0, 2, 4, 7, 9, 11]:\r\n for w in [\"weight\", \"bias\"]:\r\n compressed_sd[f\"distilbert.transformer.layer.{std_idx}.attention.q_lin.{w}\"] = state_dict[\r\n f\"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}\"\r\n ]\r\n compressed_sd[f\"distilbert.transformer.layer.{std_idx}.attention.k_lin.{w}\"] = state_dict[\r\n f\"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}\"\r\n ]\r\n compressed_sd[f\"distilbert.transformer.layer.{std_idx}.attention.v_lin.{w}\"] = state_dict[\r\n f\"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}\"\r\n ]\r\n\r\n compressed_sd[f\"distilbert.transformer.layer.{std_idx}.attention.out_lin.{w}\"] = state_dict[\r\n f\"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}\"\r\n ]\r\n compressed_sd[f\"distilbert.transformer.layer.{std_idx}.sa_layer_norm.{w}\"] = state_dict[\r\n f\"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}\"\r\n ]\r\n\r\n compressed_sd[f\"distilbert.transformer.layer.{std_idx}.ffn.lin1.{w}\"] = state_dict[\r\n f\"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}\"\r\n ]\r\n compressed_sd[f\"distilbert.transformer.layer.{std_idx}.ffn.lin2.{w}\"] = state_dict[\r\n f\"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}\"\r\n ]\r\n compressed_sd[f\"distilbert.transformer.layer.{std_idx}.output_layer_norm.{w}\"] = state_dict[\r\n f\"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}\"\r\n ]\r\n std_idx += 1\r\n\r\n compressed_sd[\"vocab_projector.weight\"] = state_dict[\"cls.predictions.decoder.weight\"]\r\n compressed_sd[\"vocab_projector.bias\"] = state_dict[\"cls.predictions.bias\"]\r\n if args.vocab_transform:\r\n for w in [\"weight\", \"bias\"]:\r\n compressed_sd[f\"vocab_transform.{w}\"] = state_dict[f\"cls.predictions.transform.dense.{w}\"]\r\n compressed_sd[f\"vocab_layer_norm.{w}\"] = state_dict[f\"cls.predictions.transform.LayerNorm.{w}\"]\r\n\r\n print(f\"N layers selected for distillation: {std_idx}\")\r\n print(f\"Number of params transfered for distillation: {len(compressed_sd.keys())}\")\r\n\r\n print(f\"Save transfered checkpoint to {args.dump_checkpoint}.\")\r\n torch.save(compressed_sd, args.dump_checkpoint)\r\n" ]
[ [ "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.ones", "torch.zeros", "torch.sum", "torch.exp", "torch.nn.Linear", "torch.log", "torch.nn.MSELoss" ], [ "torch.manual_seed", "torch.cuda.is_available", "numpy.random.seed", "torch.cuda.manual_seed_all" ], [ "tensorflow.fill", "tensorflow.constant", "tensorflow.cast", "tensorflow.reshape", "tensorflow.squeeze", "tensorflow.keras.layers.Dropout", "tensorflow.split" ], [ "numpy.array", "torch.cuda.is_available" ], [ "torch.nn.Softmax", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.ones", "torch.zeros", "torch.nn.Embedding", "torch.nn.LayerNorm", "torch.nn.Tanh", "torch.nn.Linear", "torch.matmul", "torch.tanh", "torch.tensor", "torch.arange", "torch.cumsum", "torch.nn.MSELoss" ], [ "tensorflow.keras.layers.LayerNormalization", "tensorflow.matmul", "tensorflow.nn.softmax", "tensorflow.transpose", "tensorflow.math.sqrt", "tensorflow.range", "tensorflow.cast", "tensorflow.reshape", "tensorflow.squeeze", "tensorflow.keras.layers.Dropout", "tensorflow.split" ], [ "torch.nn.CrossEntropyLoss", "torch.nn.functional.log_softmax", "torch.utils.data.DataLoader", "torch.tensor", "torch.stack", "numpy.array" ], [ "torch.load" ], [ "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pailabteam/pailab
[ "3995b25f105827ae631e6120f380748d7d284c9f", "3995b25f105827ae631e6120f380748d7d284c9f" ]
[ "pailab/tools/tree.py", "pailab/analysis/tools_jupyter.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"This module contains all functions and classes for the MLTree. The MLTree buils a tree-like\nstructure of the objects in a given repository. This allows the user to access objects in a\ncomfortable way allowing for autocompletion (i.e. in Jupyter notebooks).\n\nTo use it one can simply call the :py:meth:`pailab.tools.tree.MLTree.add_tree` method to \nadd such a tree to the current repository::\n\n >>from pailab.tools.tree import MLTree\n >>MLTree.add_tree(ml_repo)\n\nAfter the tree has been added, one can simply use the tree. Here, using autocompletion makes the basic work wih repo objects quite simply.\nEach tree node provides useful functions that can be applied:\n\n- ``load`` loads the object of the given tree node or the child tree nodes of the current node. a\n After calling load the respective nodes have a new attribute ``obj`` that contains the respective loaded object. To load all objects belonging to the models subtree like \n parameters, evaluations or measures one can call::\n\n >> ml_repo.tree.models.load()\n\n- ``history`` lists the history of all objects of the respective subtree, where history excepts certain parameters such as a range of versions or \n which repo object information to include. To list th history of all training data just use::\n\n >> ml_repo.tree.training_data.history()\n\n- ``modifications`` lists all objects of the respective subtree that have been modified and no yet been committed.\n\nThere are also node dependent function (depending on what object the node represents).\n\"\"\"\nimport logging\nfrom numpy import load\nfrom deepdiff import DeepDiff\nfrom pailab.ml_repo.repo import MLObjectType, MLRepo\nfrom pailab.ml_repo.repo_objects import RepoInfoKey, DataSet # pylint: disable=E0401\nfrom pailab.ml_repo.repo_store import RepoStore # pylint: disable=E0401\nimport pailab.ml_repo.repo_store as repo_store\nimport pailab.ml_repo.repo_objects as repo_objects\nlogger = logging.getLogger(__name__)\n\n#region collections and items\n\n\n\nclass _RepoObjectItem:\n\n def __init__(self, name, ml_repo, repo_obj = None):\n self._name = name\n self._repo = ml_repo\n if repo_obj is not None:\n self.obj = repo_obj\n \n def _set(self, path, items):\n if len(path) > 0:\n if len(path) == 1:\n setattr(self, path[0], items[0])\n return\n if hasattr(self, path[0]):\n getattr(self, path[0])._set(path[1:], items[1:])\n else:\n setattr(self, path[0], items[0])\n items[0]._set(path[1:], items[1:])\n\n def load(self, version=repo_store.LAST_VERSION, full_object=False,\n modifier_versions=None, containing_str=None):\n \"\"\"Loads the object into the tree and stores it in obj member.\n \n Args:\n version (str, optional): The version of the object to be loaded. Defaults to repo_store.LAST_VERSION.\n full_object (bool, optional): If True, also the bigobject-members of the object will be loaded and stored. Defaults to False.\n modifier_versions (dict of str to str, optional): The version of the object that has been created with the objects \n and their respective versions defined in the dict will be loaded. Defaults to None.\n containing_str (str, optional): The object will only be loaded if the given string is contained in the objects \n name (intended for internal use). Defaults to None.\n \"\"\"\n if containing_str is None or containing_str in self._name:\n if self._repo is not None:\n self.obj = self._repo.get(self._name, version, full_object, modifier_versions, throw_error_not_exist = False)\n for v in self.__dict__.values():\n if hasattr(v,'load'):\n v.load(version, full_object, modifier_versions, containing_str)\n\n def modifications(self, commit=False, commit_message=''):\n result = {}\n if self._name is not None:\n try:\n if self._repo is not None:\n obj_orig = self._repo.get(\n self.obj.repo_info[RepoInfoKey.NAME], version=self.obj.repo_info[RepoInfoKey.VERSION])\n diff = DeepDiff(obj_orig, self.obj,\n ignore_order=True)\n except AttributeError:\n return None\n if len(diff) == 0:\n return None\n else:\n if commit and (self._repo is not None):\n version = self._repo.add(\n self.obj, message=commit_message)\n self.obj = self._repo.get(self._name, version=version)\n result = {self._name: diff}\n for v in self.__dict__.values():\n if hasattr(v, 'modifications'):\n tmp = v.modifications(commit, commit_message)\n if tmp is not None:\n result.update(tmp)\n return result\n\n def history(self, version = (repo_store.FIRST_VERSION,repo_store.LAST_VERSION), \n repo_info = [RepoInfoKey.NAME, RepoInfoKey.AUTHOR, RepoInfoKey.COMMIT_DATE, RepoInfoKey.COMMIT_MESSAGE], \n obj_data = []):\n history = []\n if self._repo is not None:\n history = self._repo.get(self._name, version = version, throw_error_not_exist=False)\n if not isinstance(history, list):\n history = [history]\n result = {}\n tmp = []\n for h in history:\n r = {}\n for r_info in repo_info:\n r[str(r_info)] = h.repo_info[r_info]\n for o_info in obj_data:\n r[o_info] = obj_data.__dict__[o_info]\n tmp.append(r)\n result[self._name] = tmp\n for v in self.__dict__.values():\n if isinstance(v, _RepoObjectItem):\n tmp2 = v.history(version, repo_info, obj_data)\n if tmp2 is not None:\n result.update(tmp2)\n if len(result) > 0:\n return result\n \n\n def __call__(self, containing_str=None):\n # if len(self.__dict__) == 1:\n if containing_str is not None:\n result = []\n if containing_str in self._name:\n result.append(self._name)\n for v in self.__dict__.values():\n if isinstance(v, _RepoObjectItem):\n d = v(containing_str)\n if isinstance(d, str):\n result.append(d)\n else:\n result.extend(d)\n return [x for x in result if containing_str in x]\n else:\n return self._name\n return result\n\n\nclass _RawDataItem(_RepoObjectItem):\n def __init__(self, name, ml_repo, repo_obj = None):\n super(_RawDataItem,self).__init__(name, ml_repo, repo_obj)\n\n def append(self, x_data, y_data = None):\n \"\"\"Append data to a RawData object\n\n It appends data to the given RawData object and updates all training and test DataSets which implicitely changed by this update.\n\n Args:\n name (string): name of RawData object\n x_data (numpy matrix): the x_data to append\n y_data (numpy matrix, optional): Defaults to None. The y_data to append\n \n Raises:\n Exception: If the data is not consistent to the RawData (e.g. different number of x-coordinates) it throws an exception.\n \"\"\"\n logger.info('Start appending ' + str(x_data.shape[0]) + ' datapoints to RawData' + self._name)\n raw_data = self._repo.get(self._name)\n if len(raw_data.x_coord_names) != x_data.shape[1]:\n raise Exception('Number of columns of x_data of RawData object is not equal to number of columns of additional x_data.')\n if raw_data.y_coord_names is None and y_data is not None:\n raise Exception('RawData object does not contain y_data but y_data is given')\n if raw_data.y_coord_names is not None:\n if y_data is None:\n raise Exception('RawData object has y_data but no y_data is given')\n if y_data.shape[1] != len(raw_data.y_coord_names ):\n raise Exception('Number of columns of y_data of RawData object is not equal to number of columns of additional y_data.')\n numpy_dict = {'x_data' : x_data}\n if raw_data.y_coord_names is not None:\n numpy_dict['y_data'] = y_data\n raw_data.n_data += x_data.shape[0]\n old_version = raw_data.repo_info[RepoInfoKey.VERSION]\n new_version = self._repo.add(raw_data)\n self._repo._numpy_repo.append(self._name, old_version, new_version, numpy_dict)\n # now find all datasets which are affected by the updated data\n changed_data_sets = []\n training_data = self._repo.get_training_data(full_object = False)\n if isinstance(training_data, DataSet):\n if training_data.raw_data == self._name and training_data.raw_data_version == repo_store.RepoStore.LAST_VERSION:\n if training_data.end_index is None or training_data.end_index < 0:\n training_data.raw_data_version = new_version\n changed_data_sets.append(training_data)\n test_data = self._repo.get_names(MLObjectType.TEST_DATA)\n for d in test_data:\n data = self._repo.get(d)\n if isinstance(data, DataSet):\n if data.raw_data == self._name and data.raw_data_version == repo_store.RepoStore.LAST_VERSION:\n if data.end_index is None or data.end_index < 0:\n data.raw_data_version = new_version\n changed_data_sets.append(data)\n self._repo.add(changed_data_sets, 'RawData ' + self._name + ' updated, add DataSets depending om the updated RawData.')\n if hasattr(self, 'obj'):#update current object\n self.obj = self._repo.get(self._name, version=new_version)\n logger.info('Finished appending data to RawData' + self._name)\n\nclass _RawDataCollection(_RepoObjectItem):\n @staticmethod\n def __get_name_from_path(path):\n return path.split('/')[-1]\n\n def __init__(self, repo):\n super(_RawDataCollection, self).__init__('raw_data', repo)\n names = repo.get_names(MLObjectType.RAW_DATA)\n for n in names:\n setattr(self, _RawDataCollection.__get_name_from_path(n), _RawDataItem(n, repo))\n \n def add(self, name, data, input_variables = None, target_variables = None):\n \"\"\"Add raw data to the repository\n\n Arguments:\n data_name {name of data} -- the name of the data added\n data {pandas DataFrame} -- the data as pandas datatable\n input_variables {str or iterable of str} -- column name or iterable of column names defining the input variables of the given data\n target_variables {str or iterable of str} -- column name or iterable of column names defining the target variables of the given data\n \n Keyword Arguments:\n input_variables {list of strings} -- list of column names defining the input variables for the machine learning (default: {None}). If None, all variables are used as input\n target_variables {list of strings} -- list of column names defining the target variables for the machine learning (default: {None}). If None, no target data is added from the table.\n \"\"\"\n path = 'raw_data/' + name\n\n if input_variables is None:\n input_variables = list(data)\n if not target_variables is None:\n [input_variables.remove(x) for x in target_variables]\n else:\n if isinstance(input_variables, str):\n input_variables = [input_variables]\n # check whether the input_variables are included in the data\n if not [item for item in input_variables if item in list(data)] == list(input_variables):\n raise Exception('RawData does not include at least one column included in input_variables')\n \n if target_variables is not None:\n if isinstance(target_variables, str):\n target_variables = [target_variables]\n # check if target variables are in list\n if not [item for item in target_variables if item in list(data)] == list(target_variables):\n raise Exception('RawData does not include at least one column included in target_variables')\n raw_data = repo_objects.RawData(data.loc[:, input_variables].values, input_variables, data.loc[:, target_variables].values, \n target_variables, repo_info = {RepoInfoKey.NAME: path})\n else:\n raw_data = repo_objects.RawData(data.loc[:, input_variables].values, input_variables, repo_info = {RepoInfoKey.NAME: path})\n v = self._repo.add(raw_data, 'data ' + path + ' added to repository' , category = MLObjectType.RAW_DATA)\n obj = self._repo.get(path, version=v, full_object = False)\n setattr(self, name, _RawDataItem(path, self._repo, obj))\n\n def add_from_numpy_file(self, name, filename_X, x_names, filename_Y=None, y_names = None):\n path = name\n X = load(filename_X)\n Y = None\n if filename_Y is not None:\n Y = load(filename_Y)\n raw_data = repo_objects.RawData(X, x_names, Y, y_names, repo_info = {RepoInfoKey.NAME: path})\n v = self._repo.add(raw_data, 'data ' + path + ' added to repository' , category = MLObjectType.RAW_DATA)\n obj = self._repo.get(path, version=v, full_object = False)\n setattr(self, name, _RawDataItem(path, self._repo, obj))\n\nclass _TrainingDataCollection(_RepoObjectItem):\n\n @staticmethod\n def __get_name_from_path(path):\n return path.split('/')[-1]\n \n def __init__(self, repo):\n super(_TrainingDataCollection, self).__init__('training_data', None)\n self.__repo = repo # we store ml_repo in __repo to circumvent that obj is loaded from eneric base class\n names = repo.get_names(MLObjectType.TRAINING_DATA)\n for n in names:\n setattr(self, _TrainingDataCollection.__get_name_from_path(n), _RepoObjectItem(n, repo))\n \n def add(self, name, raw_data, start_index=0, \n end_index=None, raw_data_version='last'):\n #path = 'training_data/' + name\n data_set = repo_objects.DataSet(raw_data, start_index, end_index, \n raw_data_version, repo_info = {RepoInfoKey.NAME: name, RepoInfoKey.CATEGORY: MLObjectType.TRAINING_DATA})\n v = self.__repo.add(data_set)\n tmp = self.__repo.get(name, version=v)\n item = _RepoObjectItem(name, self.__repo, tmp)\n setattr(self, name, item)\n\nclass _TestDataCollection(_RepoObjectItem):\n @staticmethod\n def __get_name_from_path(path):\n return path.split('/')[-1]\n \n def __init__(self, repo):\n super(_TestDataCollection, self).__init__('test_data', None)\n self.__repo = repo # we store ml_repo in __repo to circumvent that obj is loaded from eneric base class\n names = repo.get_names(MLObjectType.TEST_DATA)\n for n in names:\n setattr(self, _TestDataCollection.__get_name_from_path(n), _RepoObjectItem(n,repo))\n \n def add(self, name, raw_data, start_index=0, \n end_index=None, raw_data_version='last'):\n data_set = repo_objects.DataSet(raw_data, start_index, end_index, \n raw_data_version, repo_info = {RepoInfoKey.NAME: name, RepoInfoKey.CATEGORY: MLObjectType.TEST_DATA})\n v = self.__repo.add(data_set)\n tmp = self.__repo.get(name, version=v)\n item = _RepoObjectItem(name, self.__repo, tmp)\n setattr(self, name, item)\n\nclass _MeasureItem(_RepoObjectItem):\n def __init__(self, name, ml_repo, repo_obj = None):\n super(_MeasureItem, self).__init__(name, ml_repo, repo_obj) \n\nclass _JobItem(_RepoObjectItem):\n def __init__(self, name, ml_repo, repo_obj = None):\n super(_JobItem, self).__init__(name, ml_repo, repo_obj) \n\nclass _MeasureCollection(_RepoObjectItem):\n def __init__(self, name, ml_repo):\n super(_MeasureCollection, self).__init__('measures', None)\n names = ml_repo.get_names(MLObjectType.MEASURE)\n for n in names:\n path = n.split('/')[2:]\n items = [None] * len(path)\n for i in range(len(items)-1):\n items[i] = _RepoObjectItem(path[i], None)\n items[-1] = _MeasureItem(n, ml_repo)\n self._set(path, items)\n #items[-2] = MeasuresOnDataItem\n\nclass _EvalCollection(_RepoObjectItem):\n def __init__(self, name, ml_repo):\n super(_EvalCollection, self).__init__('eval', None)\n names = ml_repo.get_names(MLObjectType.EVAL_DATA)\n for n in names:\n path = n.split('/')[2:]\n items = [None] * len(path)\n for i in range(len(items)-1):\n items[i] = _RepoObjectItem(path[i], None)\n items[-1] = _MeasureItem(n, ml_repo)\n self._set(path, items)\n\nclass _TestCollection(_RepoObjectItem):\n def __init__(self, name, ml_repo):\n super(_TestCollection, self).__init__('tests', None)\n names = ml_repo.get_names(MLObjectType.TEST)\n for n in names:\n path = n.split('/')[2:]\n items = [None] * len(path)\n for i in range(len(items)-1):\n items[i] = _RepoObjectItem(path[i], None)\n items[-1] = _RepoObjectItem(n, ml_repo)\n self._set(path, items)\n\nclass _JobCollection(_RepoObjectItem):\n def __init__(self, name, ml_repo, model_name):\n super(_JobCollection, self).__init__('jobs', None)\n names = ml_repo.get_names(MLObjectType.JOB)\n for n in names:\n if model_name in n:\n path = n.split('/')\n path = path[path.index('jobs')+1:]\n items = [None] * len(path)\n for i in range(len(items)-1):\n items[i] = _RepoObjectItem(path[i], None)\n items[-1] = _JobItem(n, ml_repo)\n self._set(path, items)\n\nclass _ModelItem(_RepoObjectItem):\n def __init__(self, name, ml_repo, repo_obj = None):\n super(_ModelItem,self).__init__(name, ml_repo, repo_obj)\n self.model = _RepoObjectItem(name + '/model', ml_repo)\n self.eval = _EvalCollection(name + '/eval', ml_repo)\n self.model_param = _RepoObjectItem(name + '/model_param', ml_repo)\n self.tests = _TestCollection(name + '/tests', ml_repo)\n self.measures = _MeasureCollection(name+ '/measure', ml_repo)\n self.jobs = _JobCollection(name+'/jobs', ml_repo, name)\n if ml_repo._object_exists(name+'/training_stat'):\n self.training_statistic = _RepoObjectItem(name+'/training_stat', ml_repo)\n if ml_repo._object_exists(name+'/training_param'):\n self.training_param = _RepoObjectItem(name + '/training_param', ml_repo)\n\n\n def set_label(self, label_name, version = repo_store.RepoStore.LAST_VERSION, message=''):\n self._repo.set_label(label_name, self._name+ '/model', version, message)\n\nclass _LabelCollection(_RepoObjectItem):\n def __init__(self, repo):\n super(_LabelCollection,self).__init__('labels', None)\n names = repo.get_names(MLObjectType.LABEL)\n for n in names:\n #label = ml_repo.get()\n setattr(self, n, _RepoObjectItem(n, repo))\n \nclass _ModelCollection(_RepoObjectItem):\n @staticmethod\n def __get_name_from_path(name):\n return name\n\n def __init__(self, repo):\n super(_ModelCollection,self).__init__('models', None)\n names = repo.get_names(MLObjectType.MODEL)\n for n in names:\n setattr(self, _ModelCollection.__get_name_from_path(n), _ModelItem(n, repo))\n self.labels = _LabelCollection(repo)\n \n def add(self, name):\n setattr(self, name, _ModelItem(name,self._repo))\n\n\n\nclass _CacheDataCollection(_RepoObjectItem):\n\n @staticmethod\n def __get_name_from_path(path):\n return path.split('/')[-1]\n \n def __init__(self, repo):\n super(_CacheDataCollection, self).__init__('cache', None)\n self.__repo = repo # we store ml_repo in __repo to circumvent that obj is loaded from eneric base class\n names = repo.get_names(MLObjectType.CACHED_VALUE)\n for n in names:\n setattr(self, _CacheDataCollection.__get_name_from_path(n), _RepoObjectItem(n, repo))\n#endregion\n\n\n\nclass MLTree:\n\n @staticmethod\n def add_tree(ml_repo):\n \"\"\"Adds an MLTree to a repository.\n\n Args:\n ml_repo (MLRepo): the repository the tre is added\n \"\"\"\n setattr(ml_repo, 'tree', MLTree(ml_repo))\n ml_repo._add_triggers.append(ml_repo.tree.reload)\n \n def __create(self):\n self.raw_data = _RawDataCollection(self.__ml_repo)\n self.training_data = _TrainingDataCollection(self.__ml_repo)\n self.test_data = _TestDataCollection(self.__ml_repo)\n self.models = _ModelCollection(self.__ml_repo)\n self.cache = _CacheDataCollection(self.__ml_repo)\n\n def __init__(self, ml_repo):\n self.__ml_repo = ml_repo\n self.__create()\n\n def reload(self, **kwargs):\n \"\"\"Method to reload the tree after objects have been added or deleted from the repository.\n \"\"\"\n self.__create() # todo make this more efficient by just updating collections and items which are affected by this\n\n def modifications(self):\n \"\"\"Return a dictionary of all objects that were modified but no yet \n commited to the repository.\n \n Returns:\n dict: dictionary mapping object ids to dictionary of the modified attributes \n \"\"\"\n result = {}\n tmp = self.raw_data.modifications()\n if tmp is not None:\n result.update(tmp)\n tmp = self.training_data.modifications()\n if tmp is not None:\n result.update(tmp)\n tmp = self.test_data.modifications()\n if tmp is not None:\n result.update(stmp)\n tmp = self.models.modifications()\n if tmp is not None:\n result.update(tmp)\n if len(result) == 0:\n return None\n return result\n\n \n", "import numpy as np\nimport copy\nimport logging\nfrom IPython.display import display, clear_output\nfrom collections import defaultdict\nimport pailab.analysis.plot as paiplot\nimport pailab.analysis.plot_helper as plt_helper\nimport ipywidgets as widgets\n\nfrom pailab import MLObjectType, RepoInfoKey, FIRST_VERSION, LAST_VERSION\nfrom pailab.ml_repo.repo import NamingConventions\nimport pailab.tools.checker as checker\nimport pailab.tools.tools as tools\nimport pailab.tools.interpretation as interpretation\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n\nlogger = logging.getLogger(__name__)\n\n# set option so that long lines have a linebreak\npd.set_option('display.max_colwidth', -1)\n# set widget use to True so that plotlys FigureWidget is used\npaiplot.use_within_widget = True\n\nif paiplot.has_plotly:\n import plotly.graph_objs as go\n\nbeakerX = False\nif beakerX:\n from beakerx import TableDisplay\n # from beakerx.object import beakerx\nelse:\n def TableDisplay(dt):\n display(dt)\n\n\nclass _MLRepoModel:\n\n class _DataModel:\n def __init__(self, ml_repo):\n self._training_data = {}\n self._test_data = {}\n for k in ml_repo.get_names(MLObjectType.TRAINING_DATA):\n tmp = ml_repo.get(k)\n self._training_data[k] = tmp.n_data\n self._x_coord_names = tmp.x_coord_names\n self._y_coord_names = tmp.y_coord_names\n for k in ml_repo.get_names(MLObjectType.TEST_DATA):\n tmp = ml_repo.get(k)\n self._test_data[k] = tmp.n_data\n\n def get_data_names(self):\n result = [k for k in self._test_data.keys()]\n result.extend([k for k in self._training_data.keys()])\n return result\n\n def get_num_data(self, data):\n result = []\n for d in data:\n if d in self._test_data.keys():\n result.append(self._test_data[d])\n elif d in self._training_data.keys():\n result.append(self._training_data[d])\n else:\n raise Exception('Cannot find data ' + d)\n return result\n\n class _ModelModel:\n def __init__(self, ml_repo):\n self.labels = {} # dictionary label->model and version\n # dictionary (model,version)->labelname or None\n self.model_to_label = defaultdict(lambda: None)\n self._setup_labels(ml_repo)\n self._model_info_table = self._setup_model_info_table(ml_repo)\n self._model_names = ml_repo.get_names(\n MLObjectType.CALIBRATED_MODEL)\n\n def _setup_labels(self, ml_repo):\n label_names = ml_repo.get_names(MLObjectType.LABEL)\n if label_names is None:\n return\n if isinstance(label_names, str):\n label_names = [label_names]\n for l in label_names:\n label = ml_repo.get(l)\n self.labels[l] = {'model': label.name,\n 'version': label.version}\n self.model_to_label[(label.name, label.version,)] = l\n\n def _setup_model_info_table(self, ml_repo):\n model_rows = []\n model_names = ml_repo.get_names(MLObjectType.CALIBRATED_MODEL)\n for model_name in model_names:\n models = ml_repo.get(model_name, version=(\n FIRST_VERSION, LAST_VERSION), full_object=False)\n if not isinstance(models, list):\n models = [models]\n for model in models:\n tmp = copy.deepcopy(model.repo_info.get_dictionary())\n tmp['model'] = tmp['name']\n del tmp['big_objects']\n del tmp['modifiers']\n del tmp['modification_info']\n tmp['label'] = self.model_to_label[(\n tmp['model'], tmp['version'],)]\n tmp['widget_key'] = tmp['commit_date'][0:16] + ' | ' + \\\n tmp['author'] + ' | ' + \\\n str(tmp['label']) + ' | ' + tmp['version']\n model_rows.append(tmp)\n model_info_table = pd.DataFrame(model_rows)\n model_info_table.set_index(['model', 'version'], inplace=True)\n return model_info_table\n\n def get_models(self):\n return self._model_names\n\n def get_info_table(self):\n return self._model_info_table\n\n def setup_error_measure_table(self, ml_repo, data_sets, measures):\n tmp = []\n for measure in measures:\n for data in data_sets:\n tmp.append(pd.DataFrame(\n tools.get_model_measure_list(ml_repo, measure, data)))\n tmp[-1].set_index(['model', 'version'], inplace=True)\n result = self.get_info_table()\n tmp.insert(0, result)\n return pd.concat(tmp, axis=1)\n\n class _ConsistencyModel:\n def __init__(self, ml_repo):\n self.tests = checker.Tests.run(ml_repo)\n self.model = checker.Model.run(ml_repo)\n self.data = checker.Data.run(ml_repo)\n\n def __init__(self):\n pass\n\n def set_repo(self, ml_repo):\n self.ml_repo = ml_repo\n self._setup()\n\n def _setup(self):\n self.object_types = {}\n for k in MLObjectType:\n self.object_types[k.value] = self.ml_repo.get_names(k)\n self.data = _MLRepoModel._DataModel(self.ml_repo)\n self.model = _MLRepoModel._ModelModel(self.ml_repo)\n self.consistency = _MLRepoModel._ConsistencyModel(self.ml_repo)\n self._setup_measures()\n self._setup_labels()\n # now set label information into\n\n def _setup_labels(self): # todo: das hier muss weg\n self.labels = {}\n label_names = self.ml_repo.get_names(MLObjectType.LABEL)\n if label_names is None:\n return\n if isinstance(label_names, str):\n label_names = [label_names]\n for l in label_names:\n label = self.ml_repo.get(l)\n self.labels[l] = {'model': label.name, 'version': label.version}\n\n def _setup_measures(self):\n measure_names = self.ml_repo.get_names(\n MLObjectType.MEASURE_CONFIGURATION)\n if len(measure_names) == 0:\n self.measures = []\n else:\n measure_config = self.ml_repo.get(measure_names[0])\n self.measures = [x for x in measure_config.measures.keys()]\n\n def get_model_statistics(self):\n model_stats = {}\n models = self.ml_repo.get_names(MLObjectType.CALIBRATED_MODEL)\n if isinstance(models, str):\n models = [models]\n for m in models:\n model = self.ml_repo.get(m)\n model_stats[model.repo_info.name] = {\n 'last commit': model.repo_info.commit_date,\n '#total commits': self.model.get_info_table().shape[0]\n }\n return model_stats\n\n def get_versions(self, name):\n return self.ml_repo.get_history(name, obj_member_fields=[])\n\n # def get_model_parameter(self, model_name)\n\n\nwidget_repo = _MLRepoModel()\n\n# region helpers\n\n\ndef _add_title_and_border(name):\n def _get_widget(get_widget):\n def wrapper(self):\n return widgets.VBox(children=[\n # , layout = widgets.Layout(width = '100%')),\n widgets.HTML(\n value='<h3 style=\"Color: white; background-color:#d1d1e0; text-align: center\"> ' + name + '</h3>'),\n get_widget(self),\n # , layout = widgets.Layout(width = '100%'))\n widgets.HTML(\n value='<h3 style=\"Color: white; background-color:#d1d1e0; text-align: center\"> </h3>')\n ], layout=widgets.Layout(padding='0px 0px 0px 0px', overflow_x='auto') # , overflow_y='auto', )\n ) # layout=widgets.Layout(border='solid 1px'))\n return wrapper\n return _get_widget\n\n\ndef _highlight_max(data, color='red'):\n '''\n highlight the maximum in a Series or DataFrame\n '''\n attr = 'color: {}'.format(color)\n # remove % and cast to float\n # data = data.replace('%','', regex=True).astype(float)\n if data.ndim == 1: # Series from .apply(axis=0) or axis=1\n is_max = data == data.max()\n return [attr if v else '' for v in is_max]\n else: # from .apply(axis=None)\n is_max = data == data.max().max()\n return pd.DataFrame(np.where(is_max, attr, ''),\n index=data.index, columns=data.columns)\n\n\ndef _highlight_min(data, color='green'):\n '''\n highlight the maximum in a Series or DataFrame\n '''\n attr = 'color: {}'.format(color)\n # remove % and cast to float\n # data = data.replace('%','', regex=True).astype(float)\n if data.ndim == 1: # Series from .apply(axis=0) or axis=1\n is_max = data == data.min()\n return [attr if v else '' for v in is_max]\n else: # from .apply(axis=None)\n is_max = data == data.min().min()\n return pd.DataFrame(np.where(is_max, attr, ''),\n index=data.index, columns=data.columns)\n\n\nclass _TableViewer:\n def __init__(self, table, table_name, selected_columns=None):\n self._table = table\n self._table_name = table_name\n self._columns = table.columns\n if selected_columns is None:\n self._selected_columns = self._columns\n else:\n self._selected_columns = selected_columns\n\n self._selected_columns = widgets.SelectMultiple(\n options=self._columns, value=self._selected_columns)\n self._output = widgets.Output()\n\n self._settings = widgets.HBox(children=[])\n self._tab = widgets.Tab(children=[self._output, self._settings], title=[\n 'Table', 'Table Settings'])\n\n self._button_update = widgets.Button(description='update')\n self._button_update.on_click(self.get_overview)\n\n def get_overview(self, d):\n with self._output:\n clear_output(wait=True)\n # , orient='index'))\n TableDisplay(self._table[self._selected_columns.value])\n\n def get_widget(self):\n return self._tab\n\n\nclass _ObjectCategorySelector:\n\n def __init__(self, *args, **kwargs):\n selection = []\n for k, v in widget_repo.object_types.items():\n if len(v) > 0:\n selection.append(k + ' (' + str(len(v)) + ')')\n if 'layout' not in kwargs.keys():\n kwargs['layout'] = widgets.Layout(width='300px', height='250px')\n kwargs['value'] = []\n self._selector = widgets.SelectMultiple(options=selection,\n # value = [selection[0]],\n **kwargs)\n\n def get_selection(self):\n return [k.split(' ')[0] for k in self._selector.value]\n\n def get_widget(self):\n return widgets.VBox(children=[\n widgets.Label(value='Object Types'),\n self._selector\n ]\n )\n\n\nclass _DataSelector:\n \"\"\"Widget to select training and test data.\n \"\"\"\n\n def __init__(self, **kwargs):\n names = widget_repo.data.get_data_names()\n # if len(names) > 0:\n self._selection_widget = widgets.SelectMultiple(\n options=names, value=[names[0]], **kwargs)\n\n def get_widget(self):\n return widgets.VBox(children=[widgets.Label(value='Data'), self._selection_widget])\n\n def get_selection(self):\n return self._selection_widget.value\n\n\nclass _DataSelectorWithVersion:\n \"\"\"Widget to select training and test data.\n \"\"\"\n\n def __init__(self, display_selection=True, **kwargs):\n names = widget_repo.data.get_data_names()\n self._update_callbacks = []\n self._display_selection = display_selection\n self._selection = {}\n self._selection_options = {}\n self._key_to_version = {}\n self._updating_version = {}\n for n in names:\n self._selection[n] = []\n self._selection_options[n] = []\n self._key_to_version[n] = {}\n self._selected_overview = widgets.Output()\n self._selection_data = widgets.Dropdown(\n options=names, value=None, **kwargs)\n\n self._selection_data.observe(self._update_version, names='value')\n\n self._selection_version = widgets.SelectMultiple(\n options=[], value=[], **kwargs)\n self._selection_version.observe(\n self._display_selected_overview, names='value')\n\n def _get_state(self):\n return self._selection, self._selection_options, self._key_to_version\n\n def _set_state(self, state):\n self._selection = state[0]\n self._selection_options = state[1]\n self._key_to_version = state[2]\n\n def _set_update_callback(self, cb):\n \"\"\"Set a callback (called at every update of this widget)\n\n Args:\n cb (function): Callback function called at every update.\n \"\"\"\n self._update_callbacks.append(cb)\n\n def _update_version(self, change):\n self._updating_version = True\n data_selected = self._selection_data.value\n tmp = widget_repo.ml_repo.get_history(data_selected)\n key_to_version = {}\n versions = []\n for x in tmp:\n key = x['repo_info']['commit_date'][0:16] + ' | ' + \\\n x['repo_info']['author'] + ' | ' + x['repo_info']['version']\n key_to_version[key] = x['repo_info']['version']\n versions.append(key)\n self._key_to_version[data_selected] = key_to_version\n self._selection_version.options = versions\n self._selection_version.value = self._selection_options[data_selected]\n for cb in self._update_callbacks:\n cb(change)\n self._updating_version = False\n # self._selection[self._selection_data.value] = [x for x in self._selection_version.value]\n\n def _display_selected_overview(self, change):\n if self._updating_version:\n return\n data_selected = self._selection_data.value\n key_to_version = self._key_to_version[data_selected]\n self._selection[data_selected] = [key_to_version[x]\n for x in self._selection_version.value]\n self._selection_options[data_selected] = [\n x for x in self._selection_version.value]\n tmp = {}\n tmp['data'] = []\n tmp['version'] = []\n for n, x in self._selection.items():\n for y in x:\n tmp['data'].append(n)\n tmp['version'].append(y)\n for cb in self._update_callbacks:\n cb(change)\n with self._selected_overview:\n clear_output(wait=True)\n display(pd.DataFrame.from_dict(tmp))\n\n def get_widget(self):\n if self._display_selection:\n return widgets.VBox(children=[widgets.Label(value='Data'), self._selection_data,\n widgets.Label(\n value='Versions'), self._selection_version,\n self._selected_overview, ])\n else:\n return widgets.VBox(children=[widgets.Label(value='Data'), self._selection_data,\n widgets.Label(value='Versions'), self._selection_version])\n\n def get_selection(self):\n return self._selection\n\n def get_data(self):\n data = {}\n for d_name, d_v in self._selection.items():\n if len(d_v) > 0:\n data[d_name] = d_v\n return data\n\n\nclass _ModelSelectorWithVersion:\n\n @staticmethod\n def _filter_models(labels=None, commit_start=None, commit_end=None, authors=None, model_versions=None):\n \"\"\"Filter the model table according to the given attributes.\n\n Args:\n labels ([str or iterable of str], optional): If set, returns only models with the selected labels. Defaults to None.\n commit_start (str, optional): String of earliest commit date.. Defaults to None.\n commit_end (str, optional): String of latest commit date. Defaults to None.\n authors (str or iterable of str, optional): If set it return only the models with the corresponding author(s). Defaults to None.\n model_versions (str or iterable of str, optional): If set only modes with respective version(s) are returned. Defaults to None.\n\n Returns:\n pandas DataFrame: The correspondign models.\n \"\"\"\n result = widget_repo.model.get_info_table()\n if labels is not None:\n if isinstance(labels, str):\n result = result[result['label'] == labels]\n else:\n result = result[result['label'].isin(labels)]\n if commit_start is not None:\n result = result[result['commit_date'] >= commit_start]\n if commit_end is not None:\n result = result[result['commit_date'] <= commit_end]\n if authors is not None:\n if isinstance(authors, str):\n result = result[result['author'] == authors]\n else:\n result = result[result['author'].isin(authors)]\n if model_versions is not None:\n if isinstance(model_versions, str):\n result = result[result['version'] == model_versions]\n else:\n result = result[result['version'].isin(model_versions)]\n return result\n\n def __init__(self, display_selection=True, **kwargs):\n self._display_selection = display_selection\n self._selection = defaultdict(list)\n self._selection_model_name = widgets.Dropdown(\n options=widget_repo.model.get_models(), value=None, **kwargs)\n self._selection_model_name.observe(\n self._selected_model_changes, names='value')\n\n self._selection_version = widgets.SelectMultiple(\n options=[], value=[], rows=8, layout=widgets.Layout(width=\"100%\"), **kwargs)\n\n self._selected_overview = widgets.Output()\n self._selection_version.observe(\n self._selected_version_changed, names='value')\n\n self._model_changed_callable = None\n\n # Filtering\n #\n labels = widget_repo.ml_repo.get_names(MLObjectType.LABEL)\n self._label_selector = widgets.SelectMultiple(options=labels)\n self._commit_data_start = widgets.DatePicker()\n self._commit_data_end = widgets.DatePicker()\n self._author_selector = widgets.SelectMultiple(\n options=widget_repo.model.get_info_table()['author'].unique())\n self._apply_button = widgets.Button(description='Apply')\n self._apply_button.on_click(self._apply_filter)\n self._clear_button = widgets.Button(description='Clear')\n self._clear_button.on_click(self._clear_filter)\n self._filter = widgets.VBox(children=[\n widgets.Label(value='Labels'),\n self._label_selector,\n widgets.Label(value='Commit Start'),\n self._commit_data_start,\n widgets.Label(value='Commit End'),\n self._commit_data_end,\n widgets.Label(value='Authors'),\n self._author_selector,\n widgets.HBox(children=[\n self._apply_button,\n self._clear_button])\n ]\n )\n\n def get_models(self):\n \"\"\"Returns all selected models as list of tuples (first element is model name, second model version)\n \"\"\"\n models = widget_repo.model.get_info_table()\n result = {}\n for k, v in self._selection.items():\n if len(v) > 0:\n result[k] = [models[models['widget_key'] == w].index[0][1]\n for w in v]\n return result\n\n def observe_model_change(self, handler):\n \"\"\"Setup a handler when the model trait changed\n\n Args:\n handler (callable): A callable that is called when the model trait changes.\n \"\"\"\n self._model_changed_callable = handler\n\n def _selected_model_changes(self, change):\n self._update_version(change)\n if self._model_changed_callable is not None:\n self._model_changed_callable(change)\n\n def _selected_version_changed(self, change):\n self._display_selected_overview(change)\n\n def _apply_filter(self, dummy):\n self._updating_version = True\n data_selected = self._selection_model_name.value\n labels = self._label_selector.value\n if len(labels) == 0:\n labels = None\n if self._commit_data_start.value is None:\n commit_start = None\n else:\n commit_start = str(self._commit_data_start.value)\n if self._commit_data_end.value is None:\n commit_end = None\n else:\n commit_end = str(self._commit_data_end.value)\n authors = None\n if len(self._author_selector.value) > 0:\n authors = self._author_selector.value\n models = _ModelSelectorWithVersion._filter_models(labels=labels, authors=authors,\n commit_start=commit_start, commit_end=commit_end)\n self._selection_model_name.options = [\n x for x in models['name'].unique()]\n models = models[models['name'] == data_selected]\n widget_keys = models['widget_key'].values\n self._selection_version.options = [x for x in models['widget_key']]\n self._selection_version.value = [\n x for x in self._selection[data_selected] if x in widget_keys]\n self._updating_version = False\n\n def _clear_filter(self, dummy):\n self._commit_data_start.value = None\n self._commit_data_end.value = None\n self._author_selector.value = []\n self._label_selector.value = []\n self._apply_filter(dummy)\n\n def _update_version(self, change):\n if change['old'] is not None:\n pass\n self._updating_version = True\n data_selected = self._selection_model_name.value\n models = widget_repo.model.get_info_table()\n models = models[models['name'] == data_selected]\n self._selection_version.options = [x for x in models['widget_key']]\n self._selection_version.value = self._selection[data_selected]\n self._updating_version = False\n\n def _update_selected_versions(self, change):\n data_selected = self._selection_model_name.value\n # now handle changes of version selection: Remove versions that have been\n # deselected and add versions that have been selected\n old = set(change['old'])\n new = set(change['new'])\n # remove versions that have been deselected\n diff = old-new\n self._selection[data_selected] = list(\n set(self._selection[data_selected])-diff)\n # add new elements\n diff = new - old\n self._selection[data_selected].extend(diff)\n\n def _display_selected_overview(self, change):\n if self._updating_version:\n return\n self._update_selected_versions(change)\n versions = []\n for n, x in self._selection.items():\n versions.extend(x)\n with self._selected_overview:\n clear_output(wait=True)\n models = widget_repo.model.get_info_table()\n display(models[models['widget_key'].isin(versions)])\n\n def get_widget(self):\n filter_widget = widgets.Accordion(\n children=[self._filter], selected_index=None)\n filter_widget.set_title(0, 'Filter')\n if self._display_selection:\n return widgets.VBox(children=[\n widgets.VBox(children=[\n widgets.Label(value='Model'),\n self._selection_model_name,\n widgets.Label(value='Versions'),\n self._selection_version,\n self._selected_overview,\n ]\n ),\n filter_widget])\n\n else:\n return widgets.VBox(children=[\n widgets.VBox(children=[\n widgets.Label(value='Model'),\n self._selection_model_name,\n widgets.Label(value='Versions'),\n self._selection_version\n ]\n ),\n filter_widget])\n\n\nclass _ModelAndDataSelectorWithVersion:\n \"\"\"Widget to select a model together with data used in conjunction with the selected model.\n\n Returns:\n [type]: [description]\n \"\"\"\n\n def __init__(self, display_selection=True, **kwargs):\n self._display_selection = display_selection\n names = widget_repo.model.get_models()\n self._data = _DataSelectorWithVersion(display_selection=False)\n self._model = _ModelSelectorWithVersion(display_selection=False)\n self._data._set_update_callback(self._display_selected_overview)\n self._selected_overview = widgets.Output()\n\n def get_models(self):\n \"\"\"Returns all selected models as dictionary from model to list of selected model's versions\n \"\"\"\n return self._model.get_models()\n\n def get_data(self):\n return self._data.get_data()\n\n def _display_selected_overview(self, change):\n # if self._updating_version:\n # return\n # data_selected = self._selection_data.value\n # key_to_version = self._key_to_version[data_selected]\n # self._selection[data_selected] = [key_to_version[x] for x in self._selection_version.value]\n # self._selection_options[data_selected] = [x for x in self._selection_version.value]\n # tmp ={}\n # tmp['model'] = []\n # tmp['model version'] =[]\n # tmp['data'] = []\n # tmp['data version'] =[]\n # for n, x in self._selection.items():\n # for y in x:\n # for data_name, data_versions in self._model_to_data_states[n][0].items():\n # for data_version in data_versions:\n # tmp['model'].append(n)\n # tmp['model version'].append(y)\n # tmp['data'].append(data_name)\n # tmp['data version'].append(data_version)\n\n # with self._selected_overview:\n # clear_output(wait = True)\n # df = pd.DataFrame.from_dict(tmp)\n # df = df[['model', 'model version', 'data', 'data version']]\n # #arrays=[tmp['model'],tmp['model version'], tmp['data']]\n # #df = pd.DataFrame([tmp['data version']], index=arrays)\n # #multi_index = pd.MultiIndex.from_arrays(arrays, names=('model','model version', 'data', 'data version'))\n # #df.reindex(index = multi_index)\n # display(df)\n pass\n\n def get_widget(self):\n model_selection = widgets.Accordion(\n children=[self._model.get_widget()])\n model_selection.set_title(0, 'Model')\n model_selection.selected_index = None\n data_selection = widgets.Accordion(children=[self._data.get_widget()])\n data_selection.set_title(0, 'Data')\n data_selection.selected_index = None\n if self._display_selection:\n return widgets.VBox(children=[\n model_selection,\n data_selection,\n self._selected_overview, ])\n else:\n return widgets.VBox(children=[\n model_selection,\n data_selection])\n\n\nclass _MeasureSelector:\n \"\"\"Widget to select measures.\n \"\"\"\n\n def __init__(self, **kwargs):\n self._selection_widget = widgets.SelectMultiple(\n options=widget_repo.measures, **kwargs)\n\n def get_widget(self):\n return widgets.VBox(children=[widgets.Label(value='Measures'), self._selection_widget])\n\n def get_selection(self):\n return self._selection_widget.value\n\n# endregion\n\n\nclass ObjectOverviewList:\n def __init__(self, beakerX=False):\n self._categories = _ObjectCategorySelector(\n layout=widgets.Layout(width='250px', height='250px'))\n self._repo_info = widgets.SelectMultiple(\n options=[k.value for k in RepoInfoKey], value=['category', 'name', 'commit_date', 'version'],\n layout=widgets.Layout(width='200px', height='250px', margin='10px')\n )\n # self._settings = widgets.HBox(children=[self.categories, self._repo_info])\n\n self._button_update = widgets.Button(description='update')\n self._button_update.on_click(self.get_overview)\n\n self._output = widgets.Output(layout=widgets.Layout(\n height='300px', width='1000px', overflow_y='auto', overflow_x='auto'))\n self._input_box = widgets.HBox(\n children=[\n self._categories.get_widget(),\n widgets.VBox(children=[\n widgets.Label(value='Info Fields'),\n self._repo_info\n ]\n ),\n widgets.VBox(children=[\n self._button_update,\n self._output\n ],\n layout=widgets.Layout(margin='10px 10px 10px 10px')\n )\n ]\n )\n\n def get_overview(self, d):\n result = {}\n for info in self._repo_info.value:\n result[info] = []\n\n for k in self._categories.get_selection():\n for n in widget_repo.object_types[k]:\n obj = widget_repo.ml_repo.get(n)\n for info in self._repo_info.value:\n if isinstance(obj.repo_info[info], MLObjectType):\n result[info].append(obj.repo_info[info].value)\n else:\n result[info].append(str(obj.repo_info[info]))\n with self._output:\n clear_output(wait=True)\n TableDisplay(pd.DataFrame.from_dict(result)) # , orient='index'))\n\n @_add_title_and_border('Object Overview')\n def get_widget(self):\n return self._input_box\n\n\nclass ObjectView:\n\n def _setup_names(self, change=None):\n names = []\n for k in self._categories.get_selection():\n names.extend(widget_repo.ml_repo.get_names(k))\n self._names.options = names\n\n def __init__(self):\n self._categories = _ObjectCategorySelector()\n self._names = widgets.SelectMultiple(\n options=[]\n )\n self._setup_names()\n self._categories.observe(self._setup_names, 'value')\n\n self._button_update = widgets.Button(description='show history')\n self._button_update.on_click(self.show_history)\n self._output = widgets.Output()\n self._input_box = widgets.HBox(\n children=[self._categories.get_widget(), self._names, self._button_update, self._output], layout=widgets.Layout(border='solid 1px')\n )\n\n def show_history(self, d):\n result = {RepoInfoKey.NAME.value: [],\n RepoInfoKey.AUTHOR.value: [],\n RepoInfoKey.VERSION.value: [],\n RepoInfoKey.COMMIT_DATE.value: []}\n for k in self._names.value:\n history = widget_repo.ml_repo.get_history(k)\n for l in history:\n for m in result.keys():\n result[m].append(l['repo_info'][m])\n with self._output:\n clear_output(wait=True)\n TableDisplay(pd.DataFrame.from_dict(result))\n\n @_add_title_and_border('Object View')\n def get_widget(self):\n return self._input_box\n\n\nclass RepoOverview:\n def __init__(self):\n self._repo_name = widgets.HTML(\n value='<div style=\"background-color:#c2c2d6\"><h4 stype=\"text-align: center\"> Repository: '\n + widget_repo.ml_repo._config['name'] + '</h4>') # , margin = '0px 0px 0px 0px'))\n self._data_statistics = widgets.Output(\n layout=widgets.Layout(width='450px', height='450px'))\n self._plot_data_statistics()\n self._measures = widgets.Output(\n layout=widgets.Layout(width='450px', height='450px'))\n self._consistency = self._setup_consistency()\n self._labels = self._setup_labels()\n self._model_stats = self._setup_model_stats()\n\n # check consistency\n\n def _setup_consistency(self):\n def create_consistency_html(**kwargs):\n result = '<div style=\"background-color:#c2c2d6\">'\n result += '<h4 stype=\"text-align: center\">Consistency</h4>'\n for k, v in kwargs.items():\n if len(v) > 0:\n result += '<p style=\"background-color:red\"> ' + \\\n str(v) + ' ' + k + ' issues found!</p>'\n else:\n result += '<p style=\"background-color:lightgreen\">No ' + k + ' issues found.</p>'\n result += '</div>'\n return result\n\n return widgets.HTML(create_consistency_html(model=widget_repo.consistency.model,\n test=widget_repo.consistency.tests,\n data=widget_repo.consistency.data),\n layout=widgets.Layout(margin='0% 0% 0% 0%', width='400px'))\n\n def _setup_labels(self):\n header = widgets.HTML(\n '<div style=\"background-color:#c2c2d6\"><h4 stype=\"text-align: center\">Labels</h4>')\n label_output = None\n\n if len(widget_repo.labels) > 0:\n label_output = widgets.Output(\n layout=widgets.Layout(width='400px', height='100px', overflow_y='auto', overflow_x='auto'))\n with label_output:\n clear_output(wait=True)\n display(pd.DataFrame.from_dict(\n widget_repo.labels, orient='index'))\n else:\n label_output = widgets.HTML(\n '<div style=\"background-color:#ff4d4d\"><h4 stype=\"text-align: center\">No labels defined.</h4>')\n\n return widgets.VBox(children=[header, label_output])\n\n def _setup_model_stats(self):\n header = widgets.HTML(\n '<div style=\"background-color:#c2c2d6\"><h4 stype=\"text-align: center\">Models</h4>')\n model_stats_output = widgets.Output(\n layout=widgets.Layout(width='400px', height='100px', overflow_y='auto', overflow_x='auto'))\n with model_stats_output:\n clear_output(wait=True)\n display(pd.DataFrame.from_dict(\n widget_repo.get_model_statistics(), orient='index'))\n return widgets.VBox(children=[header, model_stats_output])\n\n def _plot_data_statistics(self):\n data_names = widget_repo.data.get_data_names()\n data_num_points = widget_repo.data.get_num_data(data_names)\n with self._data_statistics:\n clear_output(wait=True)\n plt.rcdefaults()\n _, ax = plt.subplots()\n y_pos = np.arange(len(data_names))\n ax.barh(y_pos, data_num_points, align='center',\n color='green', ecolor='black')\n ax.set_yticks(y_pos)\n ax.set_yticklabels(data_names, rotation=45, va='top')\n ax.invert_yaxis()\n ax.set_xlabel('number of datapoints')\n ax.set_title('Datasets')\n plt.show()\n\n def _plot_measures(self):\n with self._measures:\n clear_output(wait=True)\n plt.rcdefaults()\n fig = plt.figure()\n ax = fig.add_subplot(111)\n for data in widget_repo.data.get_data_names():\n for measure in widget_repo.measures:\n error = widget_repo.model.setup_error_measure_table(\n widget_repo.ml_repo, [data], [measure])\n error = error.sort_values(by='commit_date')\n plt.plot(error['commit_date'],\n error[measure + ', ' + data], '-x', label=measure + ', ' + data)\n plt.xlabel('commit date')\n ax.grid(True)\n ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')\n for label in ax.get_xticklabels():\n label.set_rotation(40)\n label.set_horizontalalignment('right')\n # fig.autofmt_xdate()\n plt.legend()\n ax.set_title('Measures')\n # plt.setp(ax.get_xticklabels(), ha=\"right\", rotation=45)\n plt.show()\n # plt.set_title('Measures')\n\n @_add_title_and_border('Repository Overview')\n def get_widget(self):\n self._plot_measures()\n return widgets.HBox(children=[\n widgets.VBox(children=[self._repo_name, self._model_stats, self._labels, self._consistency],\n layout=widgets.Layout(width='400px')),\n widgets.VBox(children=[self._measures]),\n widgets.VBox(children=[self._data_statistics])\n ],\n layout=widgets.Layout(width='100%', height='100%')\n )\n\n\nclass MeasureView:\n def __init__(self, beakerX=False):\n self._data = _DataSelector()\n self._measures = _MeasureSelector()\n self._repo_info = widgets.SelectMultiple(\n options=[k.value for k in RepoInfoKey], value=['category', 'name', 'commit_date', 'version'], layout=widgets.Layout(width='200px', height='250px')\n )\n self._output = widgets.Output(layout=widgets.Layout(\n width='1000px', height='450px', overflow_y='auto', overflow_x='auto'))\n self._button_update = widgets.Button(description='update')\n self._button_update.on_click(self.get_measures)\n\n def _get_columns_selected(self):\n columns = [x for x in self._repo_info.value]\n for data in self._data.get_selection():\n for m in self._measures.get_selection():\n columns.append(m+', '+data)\n return columns\n\n @_add_title_and_border('Measure View')\n def get_widget(self):\n self._tab = widgets.Tab(children=[\n self._output,\n widgets.HBox(children=[\n self._data.get_widget(),\n self._measures.get_widget(),\n widgets.VBox(children=[\n widgets.Label(\n value='Model Columns'),\n self._repo_info]\n ),\n self._button_update\n ])\n ],\n title=['Table', 'Settings']\n )\n self._tab.set_title(0, 'Table')\n self._tab.set_title(1, 'Settings')\n return self._tab\n\n def get_measures(self, d):\n self._tab.selected_index = 0\n tmp = widget_repo.model.setup_error_measure_table(\n widget_repo.ml_repo, self._data.get_selection(), self._measures.get_selection())\n columns = [c for c in tmp.columns if c in self._get_columns_selected()]\n tmp2 = tmp[columns]\n with self._output:\n clear_output(wait=True)\n # apply highlighting to floating columns only\n floats = [x.kind == 'f' for x in tmp2.dtypes]\n float_columns = tmp2.columns[floats]\n TableDisplay(tmp2.style.apply(_highlight_max, subset=float_columns).apply(\n _highlight_min, subset=float_columns)) # , orient='index'))\n\n\nclass ConsistencyChecker:\n\n def _consistency_check(self):\n self._test_results = checker.Tests.run(self._ml_repo)\n self._model_results = checker.Model.run(self._ml_repo)\n self._data_results = checker.Data.run(self._ml_repo)\n\n def __init__(self, ml_repo, beakerX=False):\n self._ml_repo = ml_repo\n\n self._overview_output = widgets.Output()\n\n self._button_update = widgets.Button(description='update')\n self._button_update.on_click(self.show_checks)\n\n self._widget_main = widgets.VBox(\n children=[self._button_update, self._overview_output]\n )\n\n def show_checks(self, d):\n self._consistency_check()\n with self._overview_output:\n clear_output(wait=True)\n print('test issues: ' + str(len(self._test_results)) + ', model issues: ' +\n str(len(self._model_results)) + ', data issues: ' + str(len(self._data_results)))\n result = {'test': [], 'message': [], 'model': [], 'type': []}\n for k, v in self._test_results.items():\n for a, b in v.items():\n result['type'].append('test')\n result['model'].append(k)\n result['test'].append(a)\n result['message'].append(b)\n for k, v in self._model_results.items():\n for a, b in v.items():\n result['type'].append('model')\n result['model'].append(k)\n result['test'].append(a)\n result['message'].append(b)\n for k, v in self._data_results.items():\n for a, b in v.items():\n result['type'].append('data')\n result['model'].append(k)\n result['test'].append(a)\n result['message'].append(b)\n\n display(pd.DataFrame.from_dict(result))\n\n @_add_title_and_border('Consistency')\n def get_widget(self):\n return self._widget_main\n\n\nclass ModelErrorHistogram:\n \"\"\"Class to plot histograms of model errors.\n Please make sure that if you use plotly, also the jupyter plotlywidgets are installed via:\n jupyter nbextension install --py --sys-prefix plotlywidget\n otherwise you may encounter problems using this class.\n \"\"\"\n\n def __init__(self):\n self._model_data_selector = _ModelAndDataSelectorWithVersion(\n display_selection=False)\n\n self._update_button = widgets.Button(description='update')\n self._update_button.on_click(self._plot)\n self._output = widgets.Output()\n self._coord = widgets.SelectMultiple(\n options=widget_repo.data._y_coord_names,\n value=[widget_repo.data._y_coord_names[0]],\n disabled=False\n )\n\n def _plot(self, d):\n with self._output:\n clear_output(wait=True)\n display(go.FigureWidget(paiplot.histogram_model_error(widget_repo.ml_repo, self._model_data_selector.get_models(),\n self._model_data_selector.get_data(), y_coordinate=self._coord.value)))\n\n @_add_title_and_border('Pointwise Model Error Histogram')\n def get_widget(self):\n y_coord = widgets.Accordion(children=[self._coord])\n y_coord.set_title(0, 'Y-coordinates')\n return widgets.HBox(children=[\n widgets.VBox(children=[\n self._model_data_selector.get_widget(),\n y_coord,\n self._update_button\n ]),\n self._output\n ])\n\n\nclass ModelErrorConditionalHistogram:\n \"\"\"Plots the distribution of input data along a given axis for the largest absolute pointwise errors in comparison to the distribution of all data.\n \"\"\"\n\n def __init__(self):\n self._data_model_selection = _ModelAndDataSelectorWithVersion(\n display_selection=False)\n self._update_button = widgets.Button(description='update')\n self._update_button.on_click(self._plot)\n self._output = widgets.Output()\n self._recommendation_output = widgets.Output()\n self._recommendation_table = None\n self._output_tab = widgets.Tab(children=[self._output,\n self._recommendation_output])\n self._output_tab.set_title(0, 'histograms')\n self._output_tab.set_title(1, 'recommendations')\n self._quantile = widgets.FloatSlider(\n value=10,\n min=1,\n max=50,\n step=1,\n readout=True,\n readout_format='.2f',\n )\n self._coord = widgets.Select(\n options=widget_repo.data._y_coord_names,\n value=widget_repo.data._y_coord_names[0],\n disabled=False\n )\n self._x_coord = widgets.Select(\n options=widget_repo.data._x_coord_names,\n value=widget_repo.data._x_coord_names[0],\n disabled=False\n )\n self._accordion = widgets.Accordion(children=[\n self._get_selection_widget(),\n self._get_recommendation_widget()\n ])\n self._accordion.set_title(0, 'Selection')\n self._accordion.set_title(1, 'Recommendation')\n\n def _get_selection_widget(self):\n coordinate_selection = widgets.Accordion(children=[\n widgets.VBox(children=[\n widgets.Label(value='y-coordinates'),\n self._coord,\n widgets.Label(value='x-coordinates'),\n self._x_coord])\n ])\n coordinate_selection.set_title(0, 'Coordinates')\n return widgets.VBox(children=[\n self._data_model_selection.get_widget(),\n coordinate_selection,\n self._quantile,\n self._update_button])\n\n def _get_recommendation_widget(self):\n self._update_recommendation = widgets.Button(description='update')\n self._max_num_recommendations = widgets.IntText(value=20,\n description='maximum number of recommendations')\n self._cache_in_repo = widgets.Checkbox(\n value=True, description='cache MMD in repo')\n self._scale = widgets.Checkbox(\n value=True, description='scale x-values to zero mean and unit variance')\n self._update_recommendation.on_click(self._recommend)\n self._kernel_selection = widgets.Dropdown(options=[\n 'rbf', 'linear', 'polynomial', 'sigmoid', 'laplacian', 'chi2'\n ],\n value='rbf',\n description='kernel')\n self._gamma = widgets.FloatText(value=1.0, description='gamma')\n self._gamma_for_kernel = [\n 'rbf', 'polynomial', 'sigmoid', 'laplacian', 'chi2']\n self._kernel_selection.observe(self._on_kernel_change, names='value')\n self._recommendation_selection = widgets.IntText(\n description='recommendation id')\n self._recomendation_selection_apply = widgets.Button(\n description='apply recommendation')\n self._recomendation_selection_apply.on_click(self._apply_recommend)\n return widgets.VBox(children=[\n self._max_num_recommendations,\n self._cache_in_repo,\n self._scale,\n self._kernel_selection,\n self._gamma,\n self._update_recommendation,\n self._recommendation_selection,\n self._recomendation_selection_apply\n ])\n self._recommendation_table = None\n\n def _on_kernel_change(self, d):\n if self._kernel_selection in self._gamma_for_kernel:\n self._gamma.disabled = False\n else:\n self._gamma.disabled = True\n\n def _apply_recommend(self, d):\n if self._recommendation_table is None:\n logger.error(\n 'Recommendation table is empty, please first update the recommendation.')\n with self._output:\n clear_output(wait=True)\n print(\n 'Recommendation table is empty, please first update the recommendation.')\n return\n\n if self._recommendation_selection.value is not None:\n self._coord.value = self._recommendation_table['y-coord'][self._recommendation_selection.value]\n self._x_coord.value = self._recommendation_table[\n 'x-coord'][self._recommendation_selection.value]\n self._models.value = [\n self._recommendation_table['model'][self._recommendation_selection.value]]\n self._data.value = [\n self._recommendation_table['data'][self._recommendation_selection.value]]\n self._plot(None)\n\n def _plot(self, d):\n with self._output:\n clear_output(wait=True)\n display(go.FigureWidget(\n paiplot.histogram_data_conditional_error(widget_repo.ml_repo,\n self._data_model_selection.get_models(), self._data_model_selection.get_data(),\n x_coordinate=self._x_coord.value,\n y_coordinate=self._coord.value,\n percentile=self._quantile.value/100.0)\n ))\n self._output_tab.selected_index = 0\n\n def _recommend(self, d):\n self._output_tab.set_title(1, 'computing...')\n self._recommendation_table = pd.DataFrame.from_dict(\n plt_helper.get_ptws_error_dist_mmd(widget_repo.ml_repo, self._data_model_selection.get_models(),\n data=self._data_model_selection.get_data(),\n start_index=0, end_index=-1, percentile=self._quantile.value/100.0,\n scale=self._scale.value,\n cache=self._cache_in_repo,\n metric=self._kernel_selection.value,\n gamma=self._gamma.value)\n )\n self._recommendation_table['model version']\n self._recommendation_table['data version']\n self._recommendation_table.sort_values(\n ['mmd'], ascending=False, inplace=True)\n with self._recommendation_output:\n clear_output(wait=True)\n display(\n self._recommendation_table.iloc[0:self._max_num_recommendations.value])\n self._output_tab.selected_index = 1\n self._output_tab.set_title(1, 'recommendations')\n self._recommendation_selection.value = self._recommendation_table.index[0]\n\n @_add_title_and_border('Data Distribution of Largest Pointwise Errors.')\n def get_widget(self):\n return widgets.HBox(children=[\n self._accordion,\n self._output_tab\n ])\n\n\nclass ScatterModelError:\n def __init__(self):\n self._model_data_selector = _ModelAndDataSelectorWithVersion(\n display_selection=False)\n self._update_button = widgets.Button(description='update')\n self._update_button.on_click(self._plot)\n self._output = widgets.Output()\n self._coord = widgets.Select(\n options=widget_repo.data._y_coord_names,\n value=widget_repo.data._y_coord_names[0],\n disabled=False\n )\n self._x_coord = widgets.Select(\n options=widget_repo.data._x_coord_names,\n value=widget_repo.data._x_coord_names[0],\n disabled=False\n )\n\n def _get_selection_widget(self):\n coordinates = widgets.Accordion(children=[\n widgets.VBox(children=[\n widgets.Label(value='y-coordinates'),\n self._coord,\n widgets.Label(value='x-coordinates'),\n self._x_coord,\n ]\n )\n ]\n )\n coordinates.set_title(0, 'Coordinates')\n return widgets.VBox(children=[\n self._model_data_selector.get_widget(),\n coordinates,\n self._update_button]\n )\n\n def _plot(self, d):\n with self._output:\n clear_output(wait=True)\n display(go.FigureWidget(\n paiplot.scatter_model_error(widget_repo.ml_repo,\n self._model_data_selector.get_models(),\n self._model_data_selector.get_data(),\n x_coordinate=self._x_coord.value,\n y_coordinate=self._coord.value)\n ))\n\n @_add_title_and_border('Scatter Plot Pointwise Errors.')\n def get_widget(self):\n return widgets.HBox(children=[\n self._get_selection_widget(),\n self._output\n ])\n\n\nclass IndividualConditionalExpectation:\n \"\"\"Plots the individual conditional expectation at a certain point.\n \"\"\"\n\n def __init__(self):\n names = widget_repo.data.get_data_names()\n self._model_data_selection = _ModelAndDataSelectorWithVersion()\n self._update_button = widgets.Button(description='update')\n self._update_button.on_click(self._plot)\n self._output = widgets.Output()\n self._cluster_statistics_output = widgets.Output()\n self._output_tab = widgets.Tab(children=[self._output,\n self._cluster_statistics_output\n ])\n self._output_tab.set_title(0, 'ICE plots')\n self._output_tab.set_title(1, 'clustering')\n self._coord = widgets.Select(\n options=widget_repo.data._y_coord_names,\n value=widget_repo.data._y_coord_names[0],\n disabled=False\n )\n self._x_coord = widgets.Select(\n options=widget_repo.data._x_coord_names,\n value=widget_repo.data._x_coord_names[0],\n disabled=False\n )\n self._x_value_start = widgets.FloatText(value=-1.0)\n self._x_value_end = widgets.FloatText(value=1.0)\n self._n_x_points = widgets.IntText(value=10)\n self._accordion = widgets.Accordion(children=[\n self._get_selection_widget(),\n self._get_clustering_widget()\n ])\n\n self._accordion.set_title(0, 'Selection')\n self._accordion.set_title(1, 'Clustering')\n\n def _get_selection_widget(self):\n return widgets.VBox(children=[\n self._model_data_selection.get_widget(),\n widgets.Label(value='y-coordinates'),\n self._coord,\n widgets.Label(value='x-coordinates'),\n self._x_coord,\n widgets.Label(value='x-start'),\n self._x_value_start,\n widgets.Label(value='x-end'),\n self._x_value_end,\n widgets.Label(value='num x-points'),\n self._n_x_points,\n self._update_button])\n\n def _get_clustering_widget(self):\n self._update_clustering = widgets.Button(description='update')\n self._use_clustering = widgets.Checkbox(\n value=True, description='apply clustering')\n self._max_num_clusters = widgets.IntText(value=20,\n description='maximum number of clusters')\n self._random_state = widgets.IntText(\n value=42, description='Random State')\n self._cache_in_repo = widgets.Checkbox(\n value=True, description='cache ICE in repo')\n self._scale = widgets.Checkbox(\n value=True, description='scale x-values to zero mean and unit variance')\n self._update_clustering.on_click(self._cluster)\n\n return widgets.VBox(children=[\n self._use_clustering,\n self._max_num_clusters,\n self._random_state,\n self._cache_in_repo,\n self._scale\n ])\n\n def _plot(self, d):\n cluster_param = None\n if self._use_clustering.value:\n cluster_param = {'n_clusters': self._max_num_clusters.value,\n 'random_state': self._random_state.value}\n # since the numpy cannot json serialized by default,\n # caching would not working, therefore we convert it into list\n x_points = [x for x in np.linspace(self._x_value_start.value, self._x_value_end.value,\n self._n_x_points.value)]\n self._ice = []\n for model, model_versions in self._model_data_selection.get_models().items():\n for data, data_versions in self._model_data_selection.get_data().items():\n for model_version in model_versions:\n for data_version in data_versions:\n self._ice.append((model, model_version, data, data_version,\n interpretation.compute_ice(widget_repo.ml_repo,\n x_points,\n data,\n model=model,\n model_version=model_version,\n data_version=data_version,\n y_coordinate=self._coord.value,\n x_coordinate=self._x_coord.value,\n cache=self._cache_in_repo.value,\n clustering_param=cluster_param,\n end_index=200),\n )\n )\n\n with self._output:\n clear_output(wait=True)\n display(go.FigureWidget(\n paiplot.ice(self._ice)\n ))\n self._output_tab.selected_index = 0\n if len(self._ice) > 0:\n if self._ice[0][-1].cluster_centers is not None:\n with self._cluster_statistics_output:\n clear_output(wait=True)\n display(go.FigureWidget(\n paiplot.ice_clusters(self._ice)\n ))\n\n def _cluster(self, d):\n\n self._output_tab.set_title(1, 'computing...')\n models = [x for x in self._models.value]\n\n for x in self._labels.value:\n l = widget_repo.labels[x]\n models.append((l['model'], l['version'],))\n\n with self._cluster_statistics_output:\n clear_output(wait=True)\n # display(self._recommendation_table.iloc[0:self._max_num_recommendations.value])\n self._output_tab.selected_index = 1\n self._output_tab.set_title(1, 'cluster statistics')\n # self._recommendation_selection.value = self._recommendation_table.index[0]\n\n @_add_title_and_border('Individual Conditional Expectation Plots')\n def get_widget(self):\n return widgets.HBox(children=[\n self._accordion,\n self._output_tab\n ])\n\n\nclass PlotMeasureVsParameter:\n def __init__(self):\n self._model_selector = widgets.Dropdown(\n options=widget_repo.model.get_models(), value=None)\n self._data_selector = _DataSelectorWithVersion(display_selection=False)\n # self._model_data_selector = _ModelAndDataSelectorWithVersion(\n # display_selection=False)\n self._measure_selector = widgets.Dropdown(options=widget_repo.measures)\n self._model_selector.observe(\n self._update_param_selector)\n self._param_selector = widgets.Dropdown(options=[])\n self._output = widgets.Output()\n self._update_button = widgets.Button(description='update')\n self._update_button.on_click(self._plot)\n\n def _print(self, message):\n with self._output:\n clear_output(wait=True)\n print(message)\n\n def _update_param_selector(self, change):\n # print(change)\n if self._model_selector.value is None:\n return\n model = self._model_selector.value\n model_param_name = NamingConventions.get_model_param_name(\n model)\n params = []\n try:\n model_params = widget_repo.ml_repo.get(model_param_name)\n for p in model_params.get_params().keys():\n params.append(p)\n except:\n pass\n train_param_name = str(NamingConventions.TrainingParam(model))\n try:\n train_params = widget_repo.ml_repo.get(train_param_name)\n for p in train_params.get_params().keys():\n params.append(p)\n except:\n pass\n self._param_selector.options = params\n\n def _plot(self, change):\n measures = []\n model = self._model_selector.value\n if model is None:\n self._print('Please select a model.')\n return\n data = self._data_selector.get_data()\n for d, w in data.items():\n if len(w) > 0:\n measures.append(str(NamingConventions.Measure(\n model=NamingConventions.get_model_from_name(model), data=d, measure_type=self._measure_selector.value)))\n if len(measures) == 0:\n self._print('Please select data together with data versions.')\n return\n with self._output:\n clear_output(wait=True)\n # create measure names from selected models, data and measures\n display(go.FigureWidget(\n paiplot.measure_by_parameter(widget_repo.ml_repo,\n measures, self._param_selector.value)\n ))\n\n @_add_title_and_border('Measure vs Parameter')\n def get_widget(self):\n return widgets.HBox(children=[\n widgets.VBox(children=[\n widgets.VBox(children=[\n widgets.Label(value='Model'),\n self._model_selector]\n ),\n self._data_selector.get_widget(),\n self._measure_selector,\n self._param_selector,\n self._update_button\n ]),\n self._output\n ])\n" ]
[ [ "numpy.load" ], [ "matplotlib.pyplot.legend", "pandas.concat", "matplotlib.dates.DateFormatter", "numpy.linspace", "matplotlib.pyplot.subplots", "pandas.DataFrame", "matplotlib.pyplot.plot", "pandas.DataFrame.from_dict", "pandas.set_option", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.rcdefaults", "matplotlib.pyplot.show", "numpy.where", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
liwanjunit/ASRGAN
[ "ac01e546939c435c246fbdce64606464f8fdfc00" ]
[ "loss/loss_new.py" ]
[ "import torch\nfrom torch import nn\nfrom torchvision.models.vgg import vgg16\n\n\nclass GeneratorLoss_NEW(nn.Module):\n def __init__(self):\n super(GeneratorLoss_NEW, self).__init__()\n vgg = vgg16(pretrained=True)\n # loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()\n loss_network = nn.Sequential(*list(vgg.features)[:35]).eval()\n for param in loss_network.parameters():\n param.requires_grad = False\n self.loss_network = loss_network\n self.mse_loss = nn.MSELoss()\n self.tv_loss = TVLoss()\n self.charbonnier_loss = L1_Charbonnier_loss()\n\n def forward(self, out_labels, out_images, target_images):\n # Adversarial Loss\n adversarial_loss = torch.mean(1 - out_labels)\n # Perception Loss\n # perception_loss = self.mse_loss(self.loss_network(out_images), self.loss_network(target_images))\n perception_loss = self.charbonnier_loss(self.loss_network(out_images), self.loss_network(target_images))\n # Image Loss\n # image_loss = self.mse_loss(out_images, target_images)\n image_loss = self.charbonnier_loss(out_images, target_images)\n # TV Loss\n tv_loss = self.tv_loss(out_images)\n return image_loss + 0.001 * adversarial_loss + 0.006 * perception_loss + 2e-8 * tv_loss\n\n\nclass TVLoss(nn.Module):\n def __init__(self, tv_loss_weight=1):\n super(TVLoss, self).__init__()\n self.tv_loss_weight = tv_loss_weight\n\n def forward(self, x):\n batch_size = x.size()[0]\n h_x = x.size()[2]\n w_x = x.size()[3]\n count_h = self.tensor_size(x[:, :, 1:, :])\n count_w = self.tensor_size(x[:, :, :, 1:])\n h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()\n w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()\n return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size\n\n @staticmethod\n def tensor_size(t):\n return t.size()[1] * t.size()[2] * t.size()[3]\n\n\nclass L1_Charbonnier_loss(torch.nn.Module):\n \"\"\"L1 Charbonnierloss.\"\"\"\n def __init__(self):\n super(L1_Charbonnier_loss, self).__init__()\n self.eps = 1e-6\n\n def forward(self, X, Y):\n diff = torch.add(X, -Y)\n error = torch.sqrt(diff * diff + self.eps)\n loss = torch.mean(error)\n return loss\n\n\nif __name__ == \"__main__\":\n g_loss = GeneratorLoss_NEW()\n print(g_loss)\n" ]
[ [ "torch.mean", "torch.nn.MSELoss", "torch.add", "torch.sqrt", "torch.pow" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LSanselme/kerod
[ "cb52775ed501cbe4bd5fc0f22ec0359ca1d5f902" ]
[ "src/kerod/core/sampling_ops.py" ]
[ "# Copyright 2017 The TensorFlow Authors and modified by Emilien Garreau. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Method to subsample minibatches by balancing positives and negatives.\n\nSubsamples minibatches based on a pre-specified positive fraction in range\n[0,1]. The class presumes there are many more negatives than positive examples:\nif the desired sample_size cannot be achieved with the pre-specified positive\nfraction, it fills the rest with negative examples. If this is not sufficient\nfor obtaining the desired sample_size, it returns fewer examples.\n\nThe main function to call is Subsample(self, indicator, labels). For convenience\none can also call SubsampleWeights(self, weights, labels) which is defined in\nthe minibatch_sampler base class.\n\nWhen is_static is True, it implements a method that guarantees static shapes.\nIt also ensures the length of output of the subsample is always sample_size, even\nwhen number of examples set to True in indicator is less than sample_size.\n\"\"\"\n\nimport tensorflow as tf\n\nfrom kerod.utils import ops\n\n\ndef subsample_indicator(indicator, num_samples):\n \"\"\"Subsample indicator vector.\n\n Given a boolean indicator vector with M elements set to `True`, the function\n assigns all but `num_samples` of these previously `True` elements to\n `False`. If `num_samples` is greater than M, the original indicator vector\n is returned.\n\n Arguments:\n - *indicator*: a 1-dimensional boolean tensor indicating which elements\n are allowed to be sampled and which are not.\n\n - *num_samples*: int32 scalar tensor\n\n Returns:\n\n A boolean tensor with the same shape as input (indicator) tensor\n \"\"\"\n indices = tf.where(indicator)\n indices = tf.random.shuffle(indices)\n indices = tf.reshape(indices, [-1])\n\n num_samples = tf.minimum(tf.size(indices), num_samples)\n selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))\n\n selected_indicator = ops.indices_to_dense_vector(selected_indices, tf.shape(indicator)[0])\n\n return tf.equal(selected_indicator, 1)\n\n\ndef sample_balanced_positive_negative(indicator, sample_size, labels, positive_fraction=0.5):\n \"\"\"Subsamples minibatches to a desired balance of positives and negatives.\n\n Arguments:\n\n - *indicator*: boolean tensor of shape [N] whose True entries can be sampled.\n - *sample_size*: desired batch size. If None, keeps all positive samples and\n randomly selects negative samples so that the positive sample fraction\n matches positive_fraction.\n - *labels*: boolean tensor of shape [N] denoting positive(=True) and negative\n (=False) examples.\n - *positive_fraction*: desired fraction of positive examples (scalar in [0,1])\n in the batch.\n\n Returns:\n\n *sampled_idx_indicator*: boolean tensor of shape [N], True for entries which are sampled.\n \"\"\"\n\n negative_idx = tf.logical_not(labels)\n positive_idx = tf.logical_and(labels, indicator)\n negative_idx = tf.logical_and(negative_idx, indicator)\n\n # Sample positive and negative samples separately\n if sample_size is None:\n max_num_pos = tf.reduce_sum(tf.cast(positive_idx, dtype=tf.int32))\n else:\n max_num_pos = int(positive_fraction * sample_size)\n sampled_pos_idx = subsample_indicator(positive_idx, max_num_pos)\n num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32))\n if sample_size is None:\n negative_positive_ratio = (1 - positive_fraction) / positive_fraction\n max_num_neg = tf.cast(negative_positive_ratio * tf.cast(num_sampled_pos, dtype=tf.float32),\n dtype=tf.int32)\n else:\n max_num_neg = sample_size - num_sampled_pos\n sampled_neg_idx = subsample_indicator(negative_idx, max_num_neg)\n\n return tf.logical_or(sampled_pos_idx, sampled_neg_idx)\n\n\ndef batch_sample_balanced_positive_negative(indicators,\n sample_size,\n labels,\n positive_fraction=0.5,\n dtype=tf.float32):\n \"\"\"Subsamples minibatches to a desired balance of positives and negatives.\n\n Arguments:\n\n - *indicator*: boolean tensor of shape [batch_size, N] whose True entries can be sampled.\n - *sample_size*: desired batch size. If None, keeps all positive samples and\n randomly selects negative samples so that the positive sample fraction\n matches positive_fraction.\n - *labels*: boolean tensor of shape [batch_size, N] denoting positive(=True) and negative\n (=False) examples.\n - *positive_fraction*: desired fraction of positive examples (scalar in [0,1])\n in the batch.\n\n Returns:\n\n A boolean tensor of shape [M, N], True for entries which are sampled.\n \"\"\"\n\n def _minibatch_subsample_fn(inputs):\n indicators, targets = inputs\n return sample_balanced_positive_negative(tf.cast(indicators, tf.bool),\n sample_size,\n tf.cast(targets, tf.bool),\n positive_fraction=positive_fraction)\n\n return tf.cast(tf.map_fn(_minibatch_subsample_fn, [indicators, labels],\n dtype=tf.bool,\n parallel_iterations=16,\n back_prop=True),\n dtype=dtype)\n" ]
[ [ "tensorflow.shape", "tensorflow.logical_or", "tensorflow.reshape", "tensorflow.equal", "tensorflow.cast", "tensorflow.random.shuffle", "tensorflow.map_fn", "tensorflow.where", "tensorflow.logical_not", "tensorflow.size", "tensorflow.logical_and" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
tapasi-brahma/nobrainer
[ "c46586658d226bc3ca22869fd45a2674fdd52be9" ]
[ "nobrainer/metrics.py" ]
[ "\"\"\"Implementations of metrics for 3D semantic segmentation.\"\"\"\n\nimport tensorflow as tf\n\n\ndef average_volume_difference():\n raise NotImplementedError()\n\n\ndef dice(y_true, y_pred, axis=(1, 2, 3, 4)):\n \"\"\"Calculate Dice similarity between labels and predictions.\n\n Dice similarity is in [0, 1], where 1 is perfect overlap and 0 is no\n overlap. If both labels and predictions are empty (e.g., all background),\n then Dice similarity is 1.\n\n If we assume the inputs are rank 5 [`(batch, x, y, z, classes)`], then an\n axis parameter of `(1, 2, 3)` will result in a tensor that contains a Dice\n score for every class in every item in the batch. The shape of this tensor\n will be `(batch, classes)`. If the inputs only have one class (e.g., binary\n segmentation), then an axis parameter of `(1, 2, 3, 4)` should be used.\n This will result in a tensor of shape `(batch,)`, where every value is the\n Dice similarity for that prediction.\n\n Implemented according to https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4533825/#Equ6\n\n Returns\n -------\n Tensor of Dice similarities.\n\n Citations\n ---------\n Taha AA, Hanbury A. Metrics for evaluating 3D medical image segmentation:\n analysis, selection, and tool. BMC Med Imaging. 2015;15:29. Published 2015\n Aug 12. doi:10.1186/s12880-015-0068-x\n \"\"\"\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.cast(y_true, y_pred.dtype)\n eps = tf.keras.backend.epsilon()\n\n intersection = tf.reduce_sum(y_true * y_pred, axis=axis)\n summation = tf.reduce_sum(y_true, axis=axis) + tf.reduce_sum(y_pred, axis=axis)\n return (2 * intersection + eps) / (summation + eps)\n\n\ndef generalized_dice(y_true, y_pred, axis=(1, 2, 3)):\n \"\"\"Calculate Generalized Dice similarity. This is useful for multi-class\n predictions.\n\n If we assume the inputs are rank 5 [`(batch, x, y, z, classes)`], then an\n axis parameter of `(1, 2, 3)` should be used. This will result in a tensor\n of shape `(batch,)`, where every value is the Generalized Dice similarity\n for that prediction, across all classes.\n\n Returns\n -------\n Tensor of Generalized Dice similarities.\n \"\"\"\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.cast(y_true, y_pred.dtype)\n\n if y_true.get_shape().ndims < 2 or y_pred.get_shape().ndims < 2:\n raise ValueError(\"y_true and y_pred must be at least rank 2.\")\n\n epsilon = tf.keras.backend.epsilon()\n \n w = tf.math.reciprocal(tf.square(tf.reduce_sum(y_true, axis=axis)))\n w = tf.where(tf.math.is_finite(w), w, epsilon)\n num = 2 * tf.reduce_sum(w * tf.reduce_sum(y_true * y_pred, axis= axis), axis=-1)\n den = tf.reduce_sum(w * tf.reduce_sum(y_true + y_pred, axis= axis), axis=-1)\n gdice = num/den\n gdice = tf.where(tf.math.is_finite(gdice), gdice, tf.zeros_like(gdice))\n return gdice\n\n\ndef hamming(y_true, y_pred, axis=(1, 2, 3)):\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.cast(y_true, y_pred.dtype)\n return tf.reduce_mean(tf.not_equal(y_pred, y_true), axis=axis)\n\n\ndef haussdorf():\n raise NotADirectoryError()\n\n\ndef jaccard(y_true, y_pred, axis=(1, 2, 3, 4)):\n \"\"\"Calculate Jaccard similarity between labels and predictions.\n\n Jaccard similarity is in [0, 1], where 1 is perfect overlap and 0 is no\n overlap. If both labels and predictions are empty (e.g., all background),\n then Jaccard similarity is 1.\n\n If we assume the inputs are rank 5 [`(batch, x, y, z, classes)`], then an\n axis parameter of `(1, 2, 3)` will result in a tensor that contains a Jaccard\n score for every class in every item in the batch. The shape of this tensor\n will be `(batch, classes)`. If the inputs only have one class (e.g., binary\n segmentation), then an axis parameter of `(1, 2, 3, 4)` should be used.\n This will result in a tensor of shape `(batch,)`, where every value is the\n Jaccard similarity for that prediction.\n\n Implemented according to https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4533825/#Equ7\n\n Returns\n -------\n Tensor of Jaccard similarities.\n\n Citations\n ---------\n Taha AA, Hanbury A. Metrics for evaluating 3D medical image segmentation:\n analysis, selection, and tool. BMC Med Imaging. 2015;15:29. Published 2015\n Aug 12. doi:10.1186/s12880-015-0068-x\n \"\"\"\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.cast(y_true, y_pred.dtype)\n eps = tf.keras.backend.epsilon()\n\n intersection = tf.reduce_sum(y_true * y_pred, axis=axis)\n union = tf.reduce_sum(y_true, axis=axis) + tf.reduce_sum(y_pred, axis=axis)\n return (intersection + eps) / (union - intersection + eps)\n\n\ndef tversky(y_true, y_pred, axis=(1, 2, 3), alpha=0.3, beta=0.7):\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.cast(y_true, y_pred.dtype)\n\n if y_true.get_shape().ndims < 2 or y_pred.get_shape().ndims < 2:\n raise ValueError(\"y_true and y_pred must be at least rank 2.\")\n\n eps = tf.keras.backend.epsilon()\n\n num = tf.reduce_sum(y_pred * y_true, axis=axis)\n den = (\n num\n + alpha * tf.reduce_sum(y_pred * (1 - y_true), axis=axis)\n + beta * tf.reduce_sum((1 - y_pred) * y_true, axis=axis)\n )\n # Sum over classes.\n return tf.reduce_sum((num + eps) / (den + eps), axis=-1)\n\ndef dice_coef_multilabel(y_true, y_pred):\n n_classes= tf.shape(y_pred)[-1]\n dice_coeff=0\n for index in range(n_classes):\n dice_coeff -= dice(y_true[:,:,:,:,index], y_pred[:,:,:,:,index])\n return dice_coeff\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.not_equal", "tensorflow.math.is_finite", "tensorflow.shape", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.zeros_like", "tensorflow.keras.backend.epsilon" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
huylb314/AVIAD_AVIJST
[ "bf8e0617849b4f8f4b95ea345be1565ea063ee38" ]
[ "avijst/tensorflow/data.py" ]
[ "import numpy as np\nfrom sklearn import metrics\nimport math\nfrom keras.preprocessing import sequence\nfrom keras.preprocessing.text import Tokenizer\nfrom typing import *\n\n# fastai utility\ndef listify(o):\n if o is None: return []\n if isinstance(o, list): return o\n if isinstance(o, str): return [o]\n if isinstance(o, Iterable): return list(o)\n return [o]\n\ndef compose(x, funcs, *args, **kwargs):\n for f in listify(funcs): \n x = f(x, **kwargs)\n return x\n\nclass Onehotify():\n def __init__(self, vocab_size):\n self.vocab_size = vocab_size\n self.tokenizer = Tokenizer(num_words=vocab_size)\n def __call__(self, item):\n return self.tokenizer.sequences_to_matrix([item], mode='binary')\n\nclass Padify():\n def __init__(self, maxlen):\n self.maxlen = maxlen\n def __call__(self, item):\n return sequence.pad_sequences([item], maxlen=self.maxlen)\n\nclass YOnehotify():\n def __init__(self, num_classes):\n self.num_classes = num_classes\n def __call__(self, item):\n categorical = np.zeros((1, self.num_classes))\n categorical[0, item] = 1\n return categorical\n\nclass Dataset():\n def __init__(self, x, y, tfms_x, tfms_y): \n self.x, self.y = x, y\n self.x_tfms, self.y_tfms = tfms_x, tfms_y\n def __len__(self): \n return len(self.x)\n def _get_transform(self, i, tfms):\n return compose(i, tfms)\n def __getitem__(self, i): \n batch_x, batch_y = self.x[i], self.y[i]\n return_x, return_y = [], []\n if isinstance(i, slice): \n return_x = [self._get_transform(o, self.x_tfms) for o in batch_x]\n if isinstance(i, slice):\n return_y = [self._get_transform(o, self.y_tfms) for o in batch_y]\n return np.vstack(return_x), np.vstack(return_y)\n\nclass DataLoader():\n def __init__(self, ds, bs, drop_last=True): self.ds, self.bs, self.drop_last = ds, bs, drop_last\n def __iter__(self):\n length = len(self.ds) // self.bs if self.drop_last else math.ceil(len(self.ds) / self.bs)\n for i in range(0, length, 1):\n yield self.ds[(i*self.bs):(i*self.bs)+self.bs]" ]
[ [ "numpy.zeros", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
orrinjelo/AedanWallpaper
[ "c5d67c45d7d295d90bc979f2cda645e0b578f10c" ]
[ "scripts/rainbow.py" ]
[ "from PIL import Image\nimport numpy as np\nimport colorsys\nimport os, sys\nimport argparse\nimport matplotlib.pyplot as plt \n\n\nrgb_to_hsv = np.vectorize(colorsys.rgb_to_hsv)\nhsv_to_rgb = np.vectorize(colorsys.hsv_to_rgb)\n\ndef crop(image, box=None):\n if box:\n imageBox = box\n else:\n imageBox = image.getbbox()\n return image.crop(imageBox)\n\ndef hue_shift(image, value):\n im = image.convert('RGBA')\n arr = np.array(np.asarray(im).astype(float))\n r,g,b,a = np.rollaxis(arr, axis=-1)\n # print(np.max(r))\n h,s,v = rgb_to_hsv(r, g, b)\n r, g, b = hsv_to_rgb((h + value/360.0) % 1.0, s, v)\n arr = np.dstack((r, g, b, a))\n\n # print(np.max(r))\n # plt.imshow(arr.astype(int), aspect='auto')\n # plt.show()\n\n return Image.fromarray(arr.astype('uint8'), 'RGBA')\n\nparser = argparse.ArgumentParser(description='Rainbow an image batch')\nparser.add_argument('--filename', dest='filename', type=str)\nparser.add_argument('--step', dest='step', type=float, default=5.0)\nparser.add_argument('--max_step', dest='max_step', type=float, default=360.0)\nargs = parser.parse_args()\n\ncolor_image = Image.open(args.filename)\n\nbasename = os.path.basename(args.filename)\nbase, ext = os.path.splitext(basename)\n\nif not os.path.exists('anim'):\n os.mkdir('anim')\n\nfor n in range(0, int(args.max_step/args.step)):\n dtheta = n*args.step\n print('Writing out', dtheta)\n cropped = crop(color_image, (1620, 780, 2220, 1380))\n new_im = hue_shift(cropped, dtheta)\n new_fn = os.path.join('anim','{0}_{1}{2}'.format(base, n, ext))\n n += 1\n new_im.save(new_fn)" ]
[ [ "numpy.rollaxis", "numpy.vectorize", "numpy.asarray", "numpy.dstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
leike666666/tensorflow
[ "04f2870814d2773e09dcfa00cbe76a66a2c4de88", "04f2870814d2773e09dcfa00cbe76a66a2c4de88", "a3fd0ddfcb716be124e95b51e96e6c1e4507ef64", "04f2870814d2773e09dcfa00cbe76a66a2c4de88", "a3fd0ddfcb716be124e95b51e96e6c1e4507ef64", "a3fd0ddfcb716be124e95b51e96e6c1e4507ef64", "04f2870814d2773e09dcfa00cbe76a66a2c4de88", "04f2870814d2773e09dcfa00cbe76a66a2c4de88", "a3fd0ddfcb716be124e95b51e96e6c1e4507ef64", "04f2870814d2773e09dcfa00cbe76a66a2c4de88", "a3fd0ddfcb716be124e95b51e96e6c1e4507ef64", "a3fd0ddfcb716be124e95b51e96e6c1e4507ef64" ]
[ "tensorflow/python/keras/regularizers_test.py", "tensorflow/python/kernel_tests/cwise_ops_test.py", "tensorflow/python/data/experimental/kernel_tests/stats_dataset_ops_test.py", "tensorflow/lite/testing/op_tests/transpose_conv.py", "tensorflow/python/ops/parallel_for/array_test.py", "tensorflow/python/framework/convert_to_constants.py", "tensorflow/lite/testing/op_tests/constant.py", "tensorflow/lite/testing/op_tests/cast.py", "tensorflow/python/keras/mixed_precision/experimental/test_util.py", "tensorflow/lite/testing/op_tests/not_equal.py", "tensorflow/python/keras/distribute/keras_correctness_test_base.py", "tensorflow/compiler/tests/matrix_diag_ops_test.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras regularizers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.keras.utils import np_utils\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nDATA_DIM = 5\nNUM_CLASSES = 2\n\n\nclass KerasRegularizersTest(keras_parameterized.TestCase,\n parameterized.TestCase):\n\n def create_model(self, kernel_regularizer=None, activity_regularizer=None):\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(NUM_CLASSES,\n kernel_regularizer=kernel_regularizer,\n activity_regularizer=activity_regularizer,\n input_shape=(DATA_DIM,)))\n return model\n\n def get_data(self):\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=10,\n test_samples=10,\n input_shape=(DATA_DIM,),\n num_classes=NUM_CLASSES)\n y_train = np_utils.to_categorical(y_train, NUM_CLASSES)\n y_test = np_utils.to_categorical(y_test, NUM_CLASSES)\n return (x_train, y_train), (x_test, y_test)\n\n def create_multi_input_model_from(self, layer1, layer2):\n input_1 = keras.layers.Input(shape=(DATA_DIM,))\n input_2 = keras.layers.Input(shape=(DATA_DIM,))\n out1 = layer1(input_1)\n out2 = layer2(input_2)\n out = keras.layers.Average()([out1, out2])\n model = keras.models.Model([input_1, input_2], out)\n model.add_loss(keras.backend.mean(out2))\n model.add_loss(math_ops.reduce_sum(input_1))\n return model\n\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters([\n ('l1', regularizers.l1()),\n ('l2', regularizers.l2()),\n ('l1_l2', regularizers.l1_l2()),\n ])\n def test_kernel_regularization(self, regularizer):\n (x_train, y_train), _ = self.get_data()\n model = self.create_model(kernel_regularizer=regularizer)\n model.compile(\n loss='categorical_crossentropy',\n optimizer='sgd',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n self.assertEqual(len(model.losses), 1)\n model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)\n\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters([\n ('l1', regularizers.l1()),\n ('l2', regularizers.l2()),\n ('l1_l2', regularizers.l1_l2()),\n ('l2_zero', keras.regularizers.l2(0.)),\n ])\n def test_activity_regularization(self, regularizer):\n (x_train, y_train), _ = self.get_data()\n model = self.create_model(activity_regularizer=regularizer)\n model.compile(\n loss='categorical_crossentropy',\n optimizer='sgd',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n self.assertEqual(len(model.losses), 1 if context.executing_eagerly() else 1)\n model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)\n\n @keras_parameterized.run_all_keras_modes\n @keras_parameterized.run_with_all_model_types\n def test_zero_regularization(self):\n # Verifies that training with zero regularization works.\n x, y = np.ones((10, 10)), np.ones((10, 3))\n model = testing_utils.get_model_from_layers(\n [keras.layers.Dense(3, kernel_regularizer=keras.regularizers.l2(0))],\n input_shape=(10,))\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n model.fit(x, y, batch_size=5, epochs=1)\n\n def test_custom_regularizer_saving(self):\n\n def my_regularizer(weights):\n return math_ops.reduce_sum(math_ops.abs(weights))\n\n inputs = keras.Input((10,))\n outputs = keras.layers.Dense(1, kernel_regularizer=my_regularizer)(inputs)\n model = keras.Model(inputs, outputs)\n model2 = model.from_config(\n model.get_config(), custom_objects={'my_regularizer': my_regularizer})\n self.assertEqual(model2.layers[1].kernel_regularizer, my_regularizer)\n\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters([\n ('l1', regularizers.l1()),\n ('l2', regularizers.l2()),\n ('l1_l2', regularizers.l1_l2()),\n ])\n def test_regularization_shared_layer(self, regularizer):\n dense_layer = keras.layers.Dense(\n NUM_CLASSES,\n kernel_regularizer=regularizer,\n activity_regularizer=regularizer)\n model = self.create_multi_input_model_from(dense_layer, dense_layer)\n model.compile(\n loss='categorical_crossentropy',\n optimizer='sgd',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n self.assertLen(model.losses, 5)\n\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters([\n ('l1', regularizers.l1()),\n ('l2', regularizers.l2()),\n ('l1_l2', regularizers.l1_l2()),\n ])\n def test_regularization_shared_model(self, regularizer):\n dense_layer = keras.layers.Dense(\n NUM_CLASSES,\n kernel_regularizer=regularizer,\n activity_regularizer=regularizer)\n\n input_tensor = keras.layers.Input(shape=(DATA_DIM,))\n dummy_model = keras.models.Model(input_tensor, dense_layer(input_tensor))\n\n model = self.create_multi_input_model_from(dummy_model, dummy_model)\n model.compile(\n loss='categorical_crossentropy',\n optimizer='sgd',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n self.assertLen(model.losses, 6)\n\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters([\n ('l1', regularizers.l1()),\n ('l2', regularizers.l2()),\n ('l1_l2', regularizers.l1_l2()),\n ])\n def test_regularization_shared_layer_in_different_models(self, regularizer):\n shared_dense = keras.layers.Dense(\n NUM_CLASSES,\n kernel_regularizer=regularizer,\n activity_regularizer=regularizer)\n models = []\n for _ in range(2):\n input_tensor = keras.layers.Input(shape=(DATA_DIM,))\n unshared_dense = keras.layers.Dense(\n NUM_CLASSES, kernel_regularizer=regularizer)\n out = unshared_dense(shared_dense(input_tensor))\n models.append(keras.models.Model(input_tensor, out))\n\n model = self.create_multi_input_model_from(\n layer1=models[0], layer2=models[1])\n model.compile(\n loss='categorical_crossentropy',\n optimizer='sgd',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n\n # We expect to see 9 losses on the model:\n # - 2 from the 2 add_loss calls on the outer model.\n # - 3 from the weight regularizers on the shared_dense layer, unshared_dense\n # in inner model 1, unshared_dense in inner model 2.\n # - 4 from activity regularizers on the shared_dense layer.\n self.assertLen(model.losses, 9)\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for coefficient-wise operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.compat import compat\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes as dtypes_lib\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import test\n\n_ADD = lambda x, y: x + y\n_SUB = lambda x, y: x - y\n_MUL = lambda x, y: x * y\n_POW = lambda x, y: x**y\n_TRUEDIV = lambda x, y: x / y\n_FLOORDIV = lambda x, y: x // y\n_MOD = lambda x, y: x % y\n\n_LT = lambda x, y: x < y\n_LE = lambda x, y: x <= y\n_GT = lambda x, y: x > y\n_GE = lambda x, y: x >= y\n\n_AND = lambda x, y: x & y\n_OR = lambda x, y: x | y\n_XOR = lambda x, y: x ^ y\n_INV = lambda x: ~x\n\n\n# TODO(zongheng): it'd be great to factor out this function and various random\n# SparseTensor gen funcs.\ndef _sparsify(x, thresh=0.5, index_dtype=np.int64):\n x[x < thresh] = 0\n\n non_zero = np.where(x)\n x_indices = np.vstack(non_zero).astype(index_dtype).T\n x_values = x[non_zero]\n x_shape = x.shape\n\n return sparse_tensor.SparseTensor(\n indices=x_indices, values=x_values, dense_shape=x_shape), x_values\n\n\ndef _default_tolerance(dtype):\n \"\"\"Returns a sensible default tolerance for comparing results of a given type.\n\n Args:\n dtype: A datatype.\n \"\"\"\n if dtype == np.float16:\n return 5e-3\n elif dtype in (np.float32, np.complex64):\n return 1e-3\n elif dtype in (np.float64, np.complex128):\n return 1e-5\n else:\n return None # Fail fast for unexpected types\n\n\nclass ComparisonOpTest(test.TestCase):\n\n def _compareScalar(self, func, x, y, dtype):\n with test_util.use_gpu():\n out = func(\n ops.convert_to_tensor(np.array([x]).astype(dtype)),\n ops.convert_to_tensor(np.array([y]).astype(dtype)))\n ret = self.evaluate(out)\n return ret[0]\n\n def testScalarCompareScalar(self):\n dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]\n data = [-1, 0, 1]\n for t in dtypes:\n for x in data:\n for y in data:\n self.assertEqual(self._compareScalar(math_ops.less, x, y, t), x < y)\n self.assertEqual(\n self._compareScalar(math_ops.less_equal, x, y, t), x <= y)\n self.assertEqual(\n self._compareScalar(math_ops.greater, x, y, t), x > y)\n self.assertEqual(\n self._compareScalar(math_ops.greater_equal, x, y, t), x >= y)\n self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)\n self.assertEqual(\n self._compareScalar(math_ops.not_equal, x, y, t), x != y)\n data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]\n for t in [np.complex64, np.complex128]:\n for x in data:\n for y in data:\n self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)\n self.assertEqual(\n self._compareScalar(math_ops.not_equal, x, y, t), x != y)\n\n def _compare(self, x, y, np_func, tf_func):\n np_ans = np_func(x, y)\n with test_util.use_gpu():\n out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))\n tf_ans = self.evaluate(out)\n self.assertAllEqual(np_ans, tf_ans)\n\n def testTensorCompareTensor(self):\n x = np.linspace(-15, 15, 6).reshape(1, 3, 2)\n y = np.linspace(20, -10, 6).reshape(1, 3, 2)\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compare(xt, yt, np.less, math_ops.less)\n self._compare(xt, yt, np.less_equal, math_ops.less_equal)\n self._compare(xt, yt, np.greater, math_ops.greater)\n self._compare(xt, yt, np.greater_equal, math_ops.greater_equal)\n self._compare(xt, yt, np.equal, math_ops.equal)\n self._compare(xt, yt, np.not_equal, math_ops.not_equal)\n # Complex types do not support ordering but do support equality tests.\n for t in [np.complex64, np.complex128]:\n xt = x.astype(t)\n xt -= 1j * xt\n yt = y.astype(t)\n yt -= 1j * yt\n self._compare(xt, yt, np.equal, math_ops.equal)\n self._compare(xt, yt, np.not_equal, math_ops.not_equal)\n\n def _compareBCast(self, xs, ys, dtype, np_func, tf_func):\n x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)\n y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)\n if dtype in (np.complex64, np.complex128):\n x -= 1j * x\n y -= 1j * y\n self._compare(x, y, np_func, tf_func)\n self._compare(y, x, np_func, tf_func)\n\n def _testBCastByFunc(self, np_func, tf_func, include_complex=False):\n shapes = [\n ([1, 3, 2], [1]),\n ([1, 3, 2], [2]),\n ([1, 3, 2], [3, 2]),\n ([1, 3, 2], [3, 1]),\n ([1, 3, 2], [1, 3, 2]),\n ([1, 3, 2], [2, 3, 1]),\n ([1, 3, 2], [2, 1, 1]),\n ([1, 3, 2], [1, 3, 1]),\n ([2, 1, 5], [2, 3, 1]),\n ([2, 0, 5], [2, 0, 1]),\n ([2, 3, 0], [2, 3, 1]),\n ]\n dtypes = [\n np.float16,\n np.float32,\n np.float64,\n np.int32,\n np.int64,\n ]\n if include_complex:\n dtypes.extend([np.complex64, np.complex128])\n\n for (xs, ys) in shapes:\n for dtype in dtypes:\n self._compareBCast(xs, ys, dtype, np_func, tf_func)\n\n def testBCastLess(self):\n self._testBCastByFunc(np.less, math_ops.less)\n\n def testBCastLessEqual(self):\n self._testBCastByFunc(np.less_equal, math_ops.less_equal)\n\n def testBCastGreater(self):\n self._testBCastByFunc(np.greater, math_ops.greater)\n\n def testBCastGreaterEqual(self):\n self._testBCastByFunc(np.greater_equal, math_ops.greater_equal)\n\n def testBCastEqual(self):\n self._testBCastByFunc(np.equal, math_ops.equal, include_complex=True)\n\n def testBCastNotEqual(self):\n self._testBCastByFunc(\n np.not_equal, math_ops.not_equal, include_complex=True)\n\n def testShapeMismatch(self):\n dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]\n funcs = [\n math_ops.less, math_ops.less_equal, math_ops.greater,\n math_ops.greater_equal, math_ops.equal, math_ops.not_equal\n ]\n x = np.arange(0, 10).reshape([2, 5])\n y = np.arange(0, 10).reshape([5, 2])\n for t in dtypes:\n for f in funcs:\n with self.assertRaisesRegexp(\n (ValueError, errors.InvalidArgumentError),\n \"Incompatible shapes|Dimensions must be equal\"):\n f(x.astype(t), y.astype(t))\n\n\nclass LogicalOpTest(test.TestCase):\n\n def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):\n np_ans = np_func(x, y)\n with test_util.device(use_gpu=use_gpu):\n inx = ops.convert_to_tensor(x)\n iny = ops.convert_to_tensor(y)\n out = tf_func(inx, iny)\n tf_val = self.evaluate(out)\n self.assertEqual(out.dtype, dtypes_lib.bool)\n self.assertAllEqual(np_ans, tf_val)\n self.assertShapeEqual(np_ans, out)\n\n def _not(self, x, use_gpu=False):\n np_ans = np.logical_not(x)\n with test_util.device(use_gpu=use_gpu):\n out = math_ops.logical_not(ops.convert_to_tensor(x))\n tf_val = self.evaluate(out)\n self.assertEqual(out.dtype, dtypes_lib.bool)\n self.assertAllEqual(np_ans, tf_val)\n self.assertShapeEqual(np_ans, out)\n\n def testScalar(self):\n data = [np.array([True]), np.array([False])]\n for use_gpu in [True, False]:\n for x in data:\n self._not(x, use_gpu)\n for x in data:\n for y in data:\n self._compareBinary(x, y, np.logical_and, math_ops.logical_and,\n use_gpu)\n self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)\n self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor,\n use_gpu)\n\n def testTensor(self):\n x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n for use_gpu in [True, False]:\n self._not(x, use_gpu)\n self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)\n self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)\n self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu)\n\n def testBCast(self):\n shapes = [\n ([1, 3, 2], [1]),\n ([1, 3, 2], [2]),\n ([1, 3, 2], [3, 2]),\n ([1, 3, 2], [3, 1]),\n ([1, 3, 2], [1, 3, 2]),\n ([1, 3, 2], [2, 3, 1]),\n ([1, 3, 2], [2, 1, 1]),\n ([1, 3, 2], [1, 3, 1]),\n ([2, 1, 5], [2, 3, 1]),\n ([2, 0, 5], [2, 0, 1]),\n ([2, 3, 0], [2, 3, 1]),\n ]\n for (xs, ys) in shapes:\n x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs)\n y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys)\n for use_gpu in [True, False]:\n self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)\n self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)\n self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu)\n\n @test_util.run_deprecated_v1\n def testShapeMismatch(self):\n x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1)\n for f in [math_ops.logical_and, math_ops.logical_or, math_ops.logical_xor]:\n with self.assertRaisesWithPredicateMatch(\n ValueError, lambda e: \"Dimensions must\" in str(e)):\n f(x, y)\n\n @test_util.run_deprecated_v1\n def testUsingAsPythonValueFails(self):\n # Ensure that we raise an error when the user attempts to treat a\n # `Tensor` as a Python `bool`.\n b = constant_op.constant(False)\n with self.assertRaises(TypeError):\n if b:\n pass\n\n x = constant_op.constant(3)\n y = constant_op.constant(4)\n with self.assertRaises(TypeError):\n if x > y:\n pass\n\n z = constant_op.constant(7)\n\n # The chained comparison should fail because Python computes `x <\n # y` and short-circuits the comparison with `z` if it is `False`.\n with self.assertRaises(TypeError):\n _ = x < y < z\n\n\nclass SelectOpTest(test.TestCase):\n\n def _compare(self, fn, c, x, y, use_gpu):\n np_ans = np.where(c, x, y)\n with test_util.device(use_gpu=use_gpu):\n out = fn(c, x, y)\n tf_ans = self.evaluate(out)\n self.assertAllEqual(np_ans, tf_ans)\n self.assertShapeEqual(np_ans, out)\n\n def _compareGradientX(self,\n fn,\n c,\n x,\n y,\n numeric_gradient_type=None,\n x_init_value=None):\n with self.cached_session():\n inx = ops.convert_to_tensor(x)\n iny = ops.convert_to_tensor(y)\n out = fn(c, inx, iny)\n s = list(np.shape(c))\n if x_init_value is None:\n x_init_value = x\n if x.shape != y.shape:\n x_init_value = np.broadcast_to(y, x.shape)\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n inx, s, out, s, x_init_value=x_init_value)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = ops.convert_to_tensor(xf)\n inyf = ops.convert_to_tensor(yf)\n outf = fn(c, inxf, inyf)\n _, jacob_n = gradient_checker.compute_gradient(\n inxf, s, outf, s, x_init_value=xf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGradientY(self, fn, c, x, y, numeric_gradient_type=None):\n with self.cached_session():\n inx = ops.convert_to_tensor(x)\n iny = ops.convert_to_tensor(y)\n out = fn(c, inx, iny)\n s = list(np.shape(c))\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n iny, s, out, s, x_init_value=x, delta=1.0)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = ops.convert_to_tensor(xf)\n inyf = ops.convert_to_tensor(yf)\n outf = fn(c, inxf, inyf)\n _, jacob_n = gradient_checker.compute_gradient(\n inyf, s, outf, s, x_init_value=yf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _testScalar(self, fn):\n c = True\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 3, 2) * 100\n for t in [\n np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,\n np.complex128\n ]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compare(fn, c, xt, yt, use_gpu=False)\n if t in [np.float16, np.float32, np.float64]:\n self._compare(fn, c, xt, yt, use_gpu=True)\n\n def testScalar(self):\n self._testScalar(array_ops.where)\n self._testScalar(array_ops.where_v2)\n\n def _testScalarBroadcast(self, fn, c, x, y):\n for t in [\n np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,\n np.complex128\n ]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compare(fn, c, xt, yt, use_gpu=False)\n if t in [np.float16, np.float32, np.float64]:\n self._compare(fn, c, xt, yt, use_gpu=True)\n\n def testScalarBroadcast(self):\n c = True\n # where_v2 only\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1, 1) * 100\n self._testScalarBroadcast(array_ops.where_v2, c, x, y)\n self._testScalarBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 3, 1) * 100\n self._testScalarBroadcast(array_ops.where_v2, c, x, y)\n self._testScalarBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1, 2) * 100\n self._testScalarBroadcast(array_ops.where_v2, c, x, y)\n self._testScalarBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1) * 100\n self._testScalarBroadcast(array_ops.where_v2, c, x, y)\n self._testScalarBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1) * 100\n self._testScalarBroadcast(array_ops.where_v2, c, x, y)\n self._testScalarBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 2) * 100\n self._testScalarBroadcast(array_ops.where_v2, c, x, y)\n self._testScalarBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(3, 2) * 100\n self._testScalarBroadcast(array_ops.where_v2, c, x, y)\n self._testScalarBroadcast(array_ops.where_v2, c, y, x)\n\n def _testBasic(self, fn):\n c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 3, 2) * 100\n for t in [\n np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,\n np.complex128\n ]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compare(fn, c, xt, yt, use_gpu=False)\n if t in [np.float16, np.float32, np.float64]:\n self._compare(fn, c, xt, yt, use_gpu=True)\n\n def testBasic(self):\n self._testBasic(array_ops.where)\n self._testBasic(array_ops.where_v2)\n\n def _testBasicBroadcast(self, fn, c, x, y):\n for t in [\n np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,\n np.complex128\n ]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compare(fn, c, xt, yt, use_gpu=False)\n if t in [np.float16, np.float32, np.float64]:\n self._compare(fn, c, xt, yt, use_gpu=True)\n\n def testBasicBroadcast(self):\n c0 = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n c1 = np.random.randint(0, 2, 2).astype(np.bool).reshape(1, 1, 2)\n c2 = np.random.randint(0, 2, 3).astype(np.bool).reshape(1, 3, 1)\n c3 = np.random.randint(0, 2, 1).astype(np.bool).reshape(1, 1, 1)\n for c in [c0, c1, c2, c3]:\n # where_v2 only\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1, 1) * 100\n self._testBasicBroadcast(array_ops.where_v2, c, x, y)\n self._testBasicBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 3, 1) * 100\n self._testBasicBroadcast(array_ops.where_v2, c, x, y)\n self._testBasicBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1, 2) * 100\n self._testBasicBroadcast(array_ops.where_v2, c, x, y)\n self._testBasicBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1) * 100\n self._testBasicBroadcast(array_ops.where_v2, c, x, y)\n self._testBasicBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1) * 100\n self._testBasicBroadcast(array_ops.where_v2, c, x, y)\n self._testBasicBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 2) * 100\n self._testBasicBroadcast(array_ops.where_v2, c, x, y)\n self._testBasicBroadcast(array_ops.where_v2, c, y, x)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(3, 2) * 100\n self._testBasicBroadcast(array_ops.where_v2, c, x, y)\n self._testBasicBroadcast(array_ops.where_v2, c, y, x)\n\n def _testGradients(self, fn):\n c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 3, 2) * 100\n for t in [np.float16, np.float32, np.float64]:\n xt = x.astype(t)\n yt = y.astype(t)\n if t == np.float16:\n # Compare fp16 theoretical gradients to fp32 numerical gradients,\n # since fp16 numerical gradients are too imprecise unless great\n # care is taken with choosing the inputs and the delta. This is\n # a weaker check (in particular, it does not test the op itself,\n # only its gradient), but it's much better than nothing.\n self._compareGradientX(fn, c, xt, yt, np.float)\n self._compareGradientY(fn, c, xt, yt, np.float)\n else:\n self._compareGradientX(fn, c, xt, yt)\n self._compareGradientY(fn, c, xt, yt)\n\n @test_util.run_deprecated_v1\n def testGradients(self):\n self._testGradients(array_ops.where)\n self._testGradients(array_ops.where_v2)\n\n @test_util.run_deprecated_v1\n def testGradientsBroadcast(self):\n c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n for t in [np.float32, np.float64]:\n # where_v2 only\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1, 1) * 100\n self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 3, 1) * 100\n self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1, 2) * 100\n self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 1) * 100\n self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1) * 100\n self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 2) * 100\n self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(3, 2) * 100\n self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))\n\n def _testShapeMismatch(self, fn):\n c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(2, 5, 3) * 100\n for t in [\n np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,\n np.complex128\n ]:\n xt = x.astype(t)\n yt = y.astype(t)\n with self.assertRaises(ValueError):\n fn(c, xt, yt)\n\n @test_util.run_deprecated_v1\n def testShapeMismatch(self):\n self._testShapeMismatch(array_ops.where)\n self._testShapeMismatch(array_ops.where_v2)\n\n def _testEmptyTensor(self, fn):\n c = np.random.randint(0, 3, 0).astype(np.bool).reshape(1, 3, 0)\n x = np.random.rand(1, 3, 0) * 100\n y = np.random.rand(1, 3, 0) * 100\n z_expected = np.zeros((1, 3, 0), dtype=np.float32)\n with self.cached_session():\n xt = x.astype(np.float32)\n yt = y.astype(np.float32)\n z = fn(c, xt, yt).eval()\n self.assertAllEqual(z_expected, z)\n\n @test_util.run_deprecated_v1\n def testEmptyTensor(self):\n self._testEmptyTensor(array_ops.where)\n self._testEmptyTensor(array_ops.where_v2)\n\n def _testNan(self, fn):\n with self.cached_session():\n for c in False, True:\n for a in 7.0, np.nan:\n for b in 5.0, np.nan:\n x = fn(c, a, b).eval()\n y = a if c else b\n self.assertEqual(np.isnan(x), np.isnan(y))\n\n @test_util.run_deprecated_v1\n def testNan(self):\n \"\"\"Verify that nans don't propagate where they shouldn't.\"\"\"\n self._testNan(array_ops.where)\n self._testNan(array_ops.where_v2)\n\n\nclass BatchSelectOpTest(test.TestCase):\n \"\"\"Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+.\"\"\"\n\n def _compare(self, c, x, y, use_gpu):\n np_ans = np.dstack(\n [x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose(\n [2, 0, 1])\n with test_util.device(use_gpu=use_gpu):\n out = array_ops.where(c, x, y)\n tf_ans = self.evaluate(out)\n self.assertAllEqual(np_ans, tf_ans)\n self.assertShapeEqual(np_ans, out)\n\n def _compareGradientX(self, c, x, y, numeric_gradient_type=None):\n with self.cached_session():\n inx = ops.convert_to_tensor(x)\n iny = ops.convert_to_tensor(y)\n out = array_ops.where(c, inx, iny)\n s = list(np.shape(x))\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n inx, s, out, s, x_init_value=x)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = ops.convert_to_tensor(xf)\n inyf = ops.convert_to_tensor(yf)\n outf = array_ops.where(c, inxf, inyf)\n _, jacob_n = gradient_checker.compute_gradient(\n inxf, s, outf, s, x_init_value=xf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGradientY(self, c, x, y, numeric_gradient_type=None):\n with self.cached_session():\n inx = ops.convert_to_tensor(x)\n iny = ops.convert_to_tensor(y)\n out = array_ops.where(c, inx, iny)\n s = list(np.shape(x))\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n iny, s, out, s, x_init_value=y)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = ops.convert_to_tensor(xf)\n inyf = ops.convert_to_tensor(yf)\n outf = array_ops.where(c, inxf, inyf)\n _, jacob_n = gradient_checker.compute_gradient(\n inyf, s, outf, s, x_init_value=yf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def testBasic(self):\n c = np.random.randint(0, 2, 16).astype(np.bool)\n x = np.random.rand(16, 2, 8) * 100\n y = np.random.rand(16, 2, 8) * 100\n for t in [\n np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,\n np.complex128\n ]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compare(c, xt, yt, use_gpu=False)\n if t in [np.float16, np.float32, np.float64]:\n self._compare(c, xt, yt, use_gpu=True)\n\n @test_util.run_deprecated_v1\n def testGradients(self):\n c = np.random.randint(0, 2, 16).astype(np.bool)\n x = np.random.rand(16, 2, 8) * 100\n y = np.random.rand(16, 2, 8) * 100\n for t in [np.float16, np.float32, np.float64]:\n xt = x.astype(t)\n yt = y.astype(t)\n if t == np.float16:\n # Compare fp16 theoretical gradients to fp32 numerical gradients,\n # since fp16 numerical gradients are too imprecise unless great\n # care is taken with choosing the inputs and the delta. This is\n # a weaker check (in particular, it does not test the op itself,\n # only its gradient), but it's much better than nothing.\n self._compareGradientX(c, xt, yt, np.float)\n self._compareGradientY(c, xt, yt, np.float)\n else:\n self._compareGradientX(c, xt, yt)\n self._compareGradientY(c, xt, yt)\n\n @test_util.run_deprecated_v1\n def testShapeMismatch(self):\n c = np.random.randint(0, 2, 8).astype(np.bool)\n x = np.random.rand(16, 3, 2) * 100\n y = np.random.rand(16, 3, 2) * 100\n for t in [\n np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,\n np.complex128\n ]:\n xt = x.astype(t)\n yt = y.astype(t)\n with self.assertRaises(ValueError):\n array_ops.where(c, xt, yt)\n\n\nclass MinMaxOpTest(test.TestCase):\n\n def _compare(self, x, y, use_gpu):\n np_min, np_max = np.minimum(x, y), np.maximum(x, y)\n with test_util.device(use_gpu=use_gpu):\n inx = ops.convert_to_tensor(x)\n iny = ops.convert_to_tensor(y)\n omin, omax = math_ops.minimum(inx, iny), math_ops.maximum(inx, iny)\n tf_min, tf_max = self.evaluate([omin, omax])\n self.assertAllEqual(np_min, tf_min)\n self.assertAllEqual(np_max, tf_max)\n\n def testBasic(self):\n x = np.random.rand(1, 3, 2) * 100.\n y = np.random.rand(1, 3, 2) * 100.\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:\n self._compare(x.astype(t), y.astype(t), use_gpu=False)\n self._compare(x.astype(t), y.astype(t), use_gpu=True)\n\n def testDifferentShapes(self):\n x = np.random.rand(1, 3, 2) * 100.\n y = np.random.rand(2) * 100. # should broadcast\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:\n self._compare(x.astype(t), y.astype(t), use_gpu=False)\n self._compare(x.astype(t), y.astype(t), use_gpu=True)\n\n def testScalar(self):\n x = np.random.rand(1, 3, 2) * 100.\n y = np.random.rand(1).item() * 100. # should broadcast\n # dropped np.float64, int64 because TF automatically converts to 32 bit\n for t in [np.float32, np.int32]:\n self._compare(x.astype(t), t(y), use_gpu=False)\n self._compare(x.astype(t), t(y), use_gpu=True)\n\n def _compareGradientX(self, func, x, y):\n with self.cached_session():\n inx = ops.convert_to_tensor(x)\n iny = ops.convert_to_tensor(y)\n out = func(inx, iny)\n s = list(np.shape(x))\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n inx, s, out, s, x_init_value=x)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGradientY(self, func, x, y):\n with self.cached_session():\n inx = ops.convert_to_tensor(x)\n iny = ops.convert_to_tensor(y)\n out = func(inx, iny)\n s = list(np.shape(x))\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n iny, s, out, s, x_init_value=y)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n @test_util.run_deprecated_v1\n def testGradients(self):\n x = np.random.rand(1, 3, 2) * 100.\n # ensure x != y\n y = x + (np.random.randint(2, size=x.shape) - .5) * 2 # -1 or +1\n self._compareGradientX(math_ops.maximum, x, y)\n self._compareGradientY(math_ops.maximum, x, y)\n self._compareGradientX(math_ops.minimum, x, y)\n self._compareGradientY(math_ops.minimum, x, y)\n\n\nclass MathOpsOverloadTest(test.TestCase):\n\n def _computeTensorAndLiteral(self, x, y, dtype, func):\n with test_util.force_cpu():\n inx = ops.convert_to_tensor(x, dtype=dtype)\n z = func(inx, y) # Should use __add__, __sub__, etc.\n return self.evaluate(z)\n\n def _computeLiteralAndTensor(self, x, y, dtype, func):\n with test_util.force_cpu():\n iny = ops.convert_to_tensor(y, dtype=dtype)\n z = func(x, iny) # Should use __radd__, __rsub__, etc.\n return self.evaluate(z)\n\n def _compareBinary(self, x, y, dtype, np_func, tf_func):\n np_ans = np_func(x, y).astype(dtype.as_numpy_dtype)\n self.assertAllClose(np_ans,\n self._computeTensorAndLiteral(x, y, dtype, tf_func))\n self.assertAllClose(np_ans,\n self._computeLiteralAndTensor(x, y, dtype, tf_func))\n\n def _compareUnary(self, x, dtype, np_func, tf_func):\n np_ans = np_func(x).astype(dtype.as_numpy_dtype)\n with test_util.force_cpu():\n self.assertAllClose(\n np_ans, self.evaluate(tf_func(ops.convert_to_tensor(x, dtype=dtype))))\n\n def testOverload(self):\n dtypes = [\n dtypes_lib.float16,\n dtypes_lib.float32,\n dtypes_lib.float64,\n dtypes_lib.int32,\n dtypes_lib.int64,\n dtypes_lib.complex64,\n dtypes_lib.complex128,\n ]\n funcs = [\n (np.add, _ADD),\n (np.subtract, _SUB),\n (np.multiply, _MUL),\n (np.power, _POW),\n (np.true_divide, _TRUEDIV),\n (np.floor_divide, _FLOORDIV),\n ]\n for dtype in dtypes:\n for np_func, tf_func in funcs:\n if dtype in (dtypes_lib.complex64,\n dtypes_lib.complex128) and tf_func == _FLOORDIV:\n continue # floordiv makes no sense for complex\n self._compareBinary(10, 5, dtype, np_func, tf_func)\n # Mod only works for int32 and int64.\n for dtype in [dtypes_lib.int32, dtypes_lib.int64]:\n self._compareBinary(10, 3, dtype, np.mod, _MOD)\n\n def testOverloadComparisons(self):\n dtypes = [\n dtypes_lib.float16,\n dtypes_lib.float32,\n dtypes_lib.float64,\n dtypes_lib.int32,\n dtypes_lib.int64,\n ]\n funcs = [\n (np.less, _LT),\n (np.less_equal, _LE),\n (np.greater, _GT),\n (np.greater_equal, _GE),\n ]\n for dtype in dtypes:\n for np_func, tf_func in funcs:\n self._compareBinary(10, 5, dtype, np_func, tf_func)\n logical_funcs = [(np.logical_and, _AND), (np.logical_or, _OR),\n (np.logical_xor, _XOR), (np.equal, math_ops.equal),\n (np.not_equal, math_ops.not_equal)]\n for np_func, tf_func in logical_funcs:\n self._compareBinary(True, False, dtypes_lib.bool, np_func, tf_func)\n self._compareBinary(True, True, dtypes_lib.bool, np_func, tf_func)\n self._compareBinary(False, False, dtypes_lib.bool, np_func, tf_func)\n self._compareBinary(False, True, dtypes_lib.bool, np_func, tf_func)\n self._compareBinary([True, True, False, False],\n [True, False, True, False], dtypes_lib.bool, np_func,\n tf_func)\n self._compareUnary(True, dtypes_lib.bool, np.logical_not, _INV)\n self._compareUnary(False, dtypes_lib.bool, np.logical_not, _INV)\n self._compareUnary([True, False], dtypes_lib.bool, np.logical_not, _INV)\n\n\nclass IsFiniteInfNanTest(test.TestCase):\n\n def _compare(self, x, use_gpu):\n np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)\n with test_util.device(use_gpu=use_gpu):\n inx = ops.convert_to_tensor(x)\n ofinite, oinf, onan = math_ops.is_finite(inx), math_ops.is_inf(\n inx), math_ops.is_nan(inx)\n tf_finite, tf_inf, tf_nan = self.evaluate([ofinite, oinf, onan])\n self.assertAllEqual(np_inf, tf_inf)\n self.assertAllEqual(np_nan, tf_nan)\n self.assertAllEqual(np_finite, tf_finite)\n self.assertShapeEqual(np_inf, oinf)\n self.assertShapeEqual(np_nan, onan)\n self.assertShapeEqual(np_finite, ofinite)\n\n def _testDtype(self, dtype):\n fi = np.finfo(dtype)\n data = np.array([\n 0, -1, 1, fi.resolution, -fi.resolution, fi.min, fi.max, -np.inf,\n np.inf, np.nan\n ]).astype(dtype)\n self._compare(data, use_gpu=False)\n self._compare(data, use_gpu=True)\n\n def testHalf(self):\n self._testDtype(np.float16)\n\n def testFloat(self):\n self._testDtype(np.float32)\n\n def testDouble(self):\n self._testDtype(np.float64)\n\n def testSqrt(self):\n for dtype in [np.float16, np.float32, np.float64]:\n fi = np.finfo(dtype)\n for size in [1, 3, 4, 7, 8, 63, 64, 65]:\n # For float32 Eigen uses Carmack's fast vectorized sqrt algorithm.\n # It is not accurate for very large arguments, so we test for\n # fi.max/100 instead of fi.max here.\n for value in [fi.min, -2, -1, 0, fi.tiny, 1, 2, 1000, fi.max / 100]:\n x = np.full((size,), value, dtype=dtype)\n np_y = np.sqrt(x)\n np_nan = np.isnan(np_y)\n with test_util.use_gpu():\n tf_y = math_ops.sqrt(x)\n tf_nan = math_ops.is_nan(tf_y)\n if value < 0:\n self.assertAllEqual(np_nan, self.evaluate(tf_nan))\n else:\n self.assertAllCloseAccordingToType(np_y, self.evaluate(tf_y))\n\n\nclass RoundingTest(test.TestCase):\n\n def _compare_values(self, x, y=None):\n y = np.rint(x) if y is None else np.asarray(y)\n\n tf_rint = math_ops.rint(x)\n np_rint = self.evaluate(tf_rint)\n\n self.assertAllEqual(y, np_rint)\n self.assertShapeEqual(y, tf_rint)\n\n def _compare(self, x):\n np_floor, np_ceil = np.floor(x), np.ceil(x)\n\n inx = ops.convert_to_tensor(x)\n ofloor, oceil = math_ops.floor(inx), math_ops.ceil(inx)\n tf_floor, tf_ceil = self.evaluate([ofloor, oceil])\n\n self.assertAllEqual(np_floor, tf_floor)\n self.assertAllEqual(np_ceil, tf_ceil)\n self.assertShapeEqual(np_floor, ofloor)\n self.assertShapeEqual(np_ceil, oceil)\n\n def _testDtype(self, dtype):\n data = (np.arange(-3, 3) / 4.).reshape(1, 3, 2).astype(dtype)\n self._compare(data)\n # TODO: rint op is not supported for float16\n if dtype is np.float16:\n return\n self._compare_values(data)\n x = [0.5, 0.5000001]\n y = [0.0, 1.0]\n self._compare_values(x, y=y)\n\n # numpy example\n x = [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]\n y = [-2., -2., -0., 0., 2., 2., 2.]\n self._compare_values(x, y=y)\n\n def testTypes(self):\n self.skipTest(\"b/131162241\")\n for dtype in [np.float16, np.float32, np.float64]:\n self._testDtype(dtype)\n\n\nclass ComplexMakeRealImagTest(test.TestCase):\n\n def _compareMake(self, real, imag, use_gpu):\n np_ans = real + (1j) * imag\n\n with test_util.device(use_gpu=use_gpu):\n real = ops.convert_to_tensor(real)\n imag = ops.convert_to_tensor(imag)\n tf_ans = math_ops.complex(real, imag)\n out = self.evaluate(tf_ans)\n\n self.assertAllEqual(np_ans, out)\n self.assertShapeEqual(np_ans, tf_ans)\n\n def testMake(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)\n for use_gpu in [False, True]:\n self._compareMake(real, imag, use_gpu)\n self._compareMake(real, 12.0, use_gpu)\n self._compareMake(23.0, imag, use_gpu)\n\n def testRealImagNumericType(self):\n for use_gpu in [True, False]:\n for value in [1., 1j, 1. + 1j]:\n np_real, np_imag = np.real(value), np.imag(value)\n with test_util.device(use_gpu=use_gpu):\n tf_real = math_ops.real(value)\n tf_imag = math_ops.imag(value)\n self.assertAllEqual(np_real, self.evaluate(tf_real))\n self.assertAllEqual(np_imag, self.evaluate(tf_imag))\n\n def _compareRealImag(self, cplx, use_gpu):\n np_real, np_imag = np.real(cplx), np.imag(cplx)\n np_zeros = np_real * 0\n\n with test_util.device(use_gpu=use_gpu):\n inx = ops.convert_to_tensor(cplx)\n tf_real = math_ops.real(inx)\n tf_imag = math_ops.imag(inx)\n tf_real_real = math_ops.real(tf_real)\n tf_imag_real = math_ops.imag(tf_real)\n self.assertAllEqual(np_real, self.evaluate(tf_real))\n self.assertAllEqual(np_imag, self.evaluate(tf_imag))\n self.assertAllEqual(np_real, self.evaluate(tf_real_real))\n self.assertAllEqual(np_zeros, self.evaluate(tf_imag_real))\n\n def testRealImag64(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)\n cplx = real + 1j * imag\n self._compareRealImag(cplx, use_gpu=False)\n self._compareRealImag(cplx, use_gpu=True)\n\n def testRealImag128(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)\n cplx = real + 1j * imag\n self._compareRealImag(cplx, use_gpu=False)\n self._compareRealImag(cplx, use_gpu=True)\n\n def _compareAngle(self, cplx, use_gpu):\n np_angle = np.angle(cplx)\n\n with test_util.device(use_gpu=use_gpu):\n inx = ops.convert_to_tensor(cplx)\n tf_angle = math_ops.angle(inx)\n tf_angle_val = self.evaluate(tf_angle)\n\n self.assertAllClose(np_angle, tf_angle_val)\n self.assertShapeEqual(np_angle, tf_angle)\n\n def testAngle64(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)\n cplx = real + 1j * imag\n self._compareAngle(cplx, use_gpu=False)\n self._compareAngle(cplx, use_gpu=True)\n\n def testAngle(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)\n cplx = real + 1j * imag\n self._compareAngle(cplx, use_gpu=False)\n self._compareAngle(cplx, use_gpu=True)\n\n @test_util.run_deprecated_v1\n def testRealReal(self):\n for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float32,\n dtypes_lib.float64):\n x = array_ops.placeholder(dtype)\n y = math_ops.real(x)\n self.assertEqual(x, y)\n\n def _compareConj(self, cplx, use_gpu):\n np_ans = np.conj(cplx)\n with test_util.device(use_gpu=use_gpu):\n inx = ops.convert_to_tensor(cplx)\n tf_conj = math_ops.conj(inx)\n tf_ans = self.evaluate(tf_conj)\n self.assertAllEqual(np_ans, tf_ans)\n self.assertShapeEqual(np_ans, tf_conj)\n\n def testConj64(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)\n cplx = real + 1j * imag\n self._compareConj(cplx, use_gpu=False)\n self._compareConj(cplx, use_gpu=True)\n\n def testConj128(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)\n cplx = real + 1j * imag\n self._compareConj(cplx, use_gpu=False)\n self._compareConj(cplx, use_gpu=True)\n\n @test_util.run_deprecated_v1\n def testConjReal(self):\n for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float16,\n dtypes_lib.float32, dtypes_lib.float64):\n x = array_ops.placeholder(dtype)\n y = math_ops.conj(x)\n self.assertEqual(x, y)\n\n @test_util.run_deprecated_v1\n def testConjString(self):\n x = array_ops.placeholder(dtypes_lib.string)\n with self.assertRaisesRegexp(TypeError,\n r\"Expected numeric or variant tensor\"):\n math_ops.conj(x)\n\n def _compareGradient(self, x):\n # x[:, 0] is real, x[:, 1] is imag. We combine real and imag into\n # complex numbers. Then, we extract real and imag parts and\n # computes the squared sum. This is obviously the same as sum(real\n # * real) + sum(imag * imag). We just want to make sure the\n # gradient function is checked.\n with self.cached_session():\n inx = ops.convert_to_tensor(x)\n real, imag = array_ops.split(value=inx, num_or_size_splits=2, axis=1)\n real, imag = array_ops.reshape(real, [-1]), array_ops.reshape(imag, [-1])\n cplx = math_ops.complex(real, imag)\n cplx = math_ops.conj(cplx)\n loss = math_ops.reduce_sum(math_ops.square(\n math_ops.real(cplx))) + math_ops.reduce_sum(\n math_ops.square(math_ops.imag(cplx)))\n epsilon = 1e-3\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n inx, list(x.shape), loss, [1], x_init_value=x, delta=epsilon)\n self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)\n\n def _compareBroadcastGradient(self, x):\n x_ = ops.convert_to_tensor(x)\n epsilon = 1e-3\n with self.cached_session():\n for args in [(x_, 0.), (0., x_)]:\n z = math_ops.reduce_sum(math_ops.abs(math_ops.complex(*args)))\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n x_, list(x.shape), z, [1], x_init_value=x, delta=epsilon)\n self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)\n\n @test_util.run_deprecated_v1\n def testGradient(self):\n # complex64\n data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float32)\n self._compareGradient(data)\n self._compareBroadcastGradient(data)\n # complex128\n data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float64)\n self._compareGradient(data)\n\n def _compareMulGradient(self, data):\n # data is a float matrix of shape [n, 4]. data[:, 0], data[:, 1],\n # data[:, 2], data[:, 3] are real parts of x, imaginary parts of\n # x, real parts of y and imaginary parts of y.\n with self.cached_session():\n inp = ops.convert_to_tensor(data)\n xr, xi, yr, yi = array_ops.split(value=inp, num_or_size_splits=4, axis=1)\n\n def vec(x): # Reshape to a vector\n return array_ops.reshape(x, [-1])\n\n xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)\n\n def cplx(r, i): # Combine to a complex vector\n return math_ops.complex(r, i)\n\n x, y = cplx(xr, xi), cplx(yr, yi)\n # z is x times y in complex plane.\n z = x * y\n # Defines the loss function as the sum of all coefficients of z.\n loss = math_ops.reduce_sum(math_ops.real(z) + math_ops.imag(z))\n epsilon = 0.005\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n inp, list(data.shape), loss, [1], x_init_value=data, delta=epsilon)\n self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)\n\n @test_util.run_deprecated_v1\n def testMulGradient(self):\n data = np.arange(1, 2, 0.125).reshape([2, 4]).astype(np.float32)\n self._compareMulGradient(data)\n\n\nclass PolyvalTest(test.TestCase):\n\n def _runtest(self, dtype, degree):\n x = np.random.rand(2, 2).astype(dtype)\n coeffs = [np.random.rand(2, 2).astype(dtype) for _ in range(degree + 1)]\n np_val = np.polyval(coeffs, x)\n with self.cached_session():\n tf_val = math_ops.polyval(coeffs, x)\n self.assertAllClose(np_val, self.evaluate(tf_val))\n\n def testSimple(self):\n for dtype in [\n np.int32, np.float32, np.float64, np.complex64, np.complex128\n ]:\n for degree in range(5):\n self._runtest(dtype, degree)\n\n def testBroadcast(self):\n dtype = np.float32\n degree = 3\n shapes = [(1,), (2, 1), (1, 2), (2, 2)]\n for x_shape in shapes:\n for coeff_shape in shapes:\n x = np.random.rand(*x_shape).astype(dtype)\n coeffs = [\n np.random.rand(*coeff_shape).astype(dtype)\n for _ in range(degree + 1)\n ]\n np_val = np.polyval(coeffs, x)\n with self.cached_session():\n tf_val = math_ops.polyval(coeffs, x)\n self.assertAllClose(np_val, self.evaluate(tf_val))\n\n def testEmpty(self):\n x = np.random.rand(2, 2).astype(np.float32)\n coeffs = []\n np_val = np.polyval(coeffs, x)\n with self.cached_session():\n tf_val = math_ops.polyval(coeffs, x)\n self.assertAllClose(np_val, self.evaluate(tf_val))\n\n\nclass SingularGradientOpTest(test.TestCase):\n\n @test_util.run_deprecated_v1\n def testGradientAtSingularity(self):\n if not compat.forward_compatible(2020, 3, 14):\n self.skipTest(\"Skipping test for future functionality.\")\n\n ops_and_singularity = [\n (gen_math_ops.reciprocal, (0.,)),\n (gen_math_ops.rsqrt, (0.,)),\n (gen_math_ops.sqrt, (0.,)),\n (gen_math_ops.sqrt_grad, (\n 0.,\n 0.,\n )),\n (gen_math_ops.reciprocal_grad, (\n 1.,\n 0.,\n )),\n (gen_math_ops.tan, (np.pi / 2,)),\n (gen_math_ops.log, (0.,)),\n (gen_math_ops.log1p, (-1.,)),\n (gen_math_ops.acosh, (0.,)),\n (gen_math_ops.asin, (1.,)),\n (gen_math_ops.acos, (1.,)),\n (gen_math_ops.atan2, (0., 0.)),\n (gen_math_ops.div, (1., 0.)),\n (gen_math_ops.div_no_nan, (1., 0.)),\n (gen_math_ops.real_div, (1., 0.)),\n (math_ops.pow, (0., -1.)),\n ]\n for op, singularity in ops_and_singularity:\n for dtype in (dtypes_lib.half, dtypes_lib.float32, dtypes_lib.float64,\n dtypes_lib.complex64, dtypes_lib.complex128):\n if dtype.is_complex and op in [\n gen_math_ops.asin, gen_math_ops.acos, gen_math_ops.atan2\n ]:\n continue\n if dtype == dtypes_lib.half and op in [\n gen_math_ops.acosh, gen_math_ops.asin, gen_math_ops.acos,\n gen_math_ops.atan2\n ]:\n continue\n with self.cached_session():\n print(\"op = \", op, \", singularity = \", singularity, \", type = \",\n dtype)\n args = [constant_op.constant(s, dtype=dtype) for s in singularity]\n grad_y = constant_op.constant(0, dtype=dtype)\n y = op(*args)\n g = gradients_impl.gradients(y, args, grad_ys=grad_y)\n g_val = self.evaluate(g)\n self.assertAllEqual(g_val, np.zeros(len(singularity)))\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the experimental input pipeline statistics gathering ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base\nfrom tensorflow.python.data.experimental.kernel_tests import stats_dataset_test_base\nfrom tensorflow.python.data.experimental.ops import batching\nfrom tensorflow.python.data.experimental.ops import stats_aggregator\nfrom tensorflow.python.data.experimental.ops import stats_ops\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import combinations\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\n# TODO(jsimsa): Figure out why are graph tests failing.\nclass StatsDatasetTest(stats_dataset_test_base.StatsDatasetTestBase,\n parameterized.TestCase):\n\n @combinations.generate(test_base.eager_only_combinations())\n def testBytesProduced(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(100).map(\n lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).apply(\n stats_ops.bytes_produced_stats(\"bytes_produced\"))\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n next_element = self.getNext(dataset, requires_initialization=True)\n\n expected_sum = 0.0\n for i in range(100):\n self.assertAllEqual(\n np.array([i] * i, dtype=np.int64), self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"bytes_produced\", float(i + 1),\n i + 2)\n expected_sum += i * 8.0\n self.assertStatisticsHasSum(handle, \"bytes_produced\", expected_sum, i + 2)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"bytes_produced\", 100.0, 101)\n self.assertStatisticsHasSum(handle, \"bytes_produced\", expected_sum, 101)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testLatencyStats(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(100).apply(\n stats_ops.latency_stats(\"record_latency\"))\n\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n\n next_element = self.getNext(dataset, requires_initialization=True)\n\n for i in range(100):\n self.assertEqual(i, self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"record_latency\", float(i + 1),\n i + 2)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"record_latency\", 100.0, 101)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testPrefetchBufferUtilization(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(100).map(\n lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).prefetch(-1)\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n next_element = self.getNext(dataset, requires_initialization=True)\n for i in range(100):\n self.assertAllEqual(\n np.array([i] * i, dtype=np.int64), self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(\n handle,\n self.regexForNodeName(\"PrefetchDataset\", \"buffer_utilization\"),\n float(i + 1),\n 3 * i + 4,\n offset=2)\n self.assertStatisticsContains(\n handle, self.regexForNodeName(\"PrefetchDataset\", \"buffer_capacity\"),\n 3 * i + 4)\n self.assertStatisticsContains(\n handle,\n self.regexForNodeName(\"PrefetchDataset\", \"buffer_size\"),\n 3 * i + 4,\n offset=1)\n self.assertStatisticsHasRange(\n handle,\n self.regexForNodeName(\"PrefetchDataset\", \"buffer_utilization\"),\n 0,\n 1,\n 3 * i + 4,\n offset=2)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(\n handle,\n self.regexForNodeName(\"PrefetchDataset\", \"buffer_utilization\"),\n 100,\n 301,\n offset=2)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testPrefetchBufferScalars(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(10).map(\n lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).prefetch(1)\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n next_element = self.getNext(dataset, requires_initialization=True)\n\n for i in range(10):\n self.assertAllEqual(\n np.array([i] * i, dtype=np.int64), self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasScalarValue(\n handle, self.regexForNodeName(\"PrefetchDataset\", \"buffer_capacity\"),\n 1, 3 * i + 4)\n self.assertStatisticsHasScalarValue(\n handle,\n self.regexForNodeName(\"PrefetchDataset\", \"buffer_size\"),\n 1,\n 3 * i + 4,\n offset=1)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n\n @combinations.generate(test_base.eager_only_combinations())\n def testFilteredElementsStats(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(101).filter(\n lambda x: math_ops.equal(math_ops.mod(x, 3), 0))\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n next_element = self.getNext(dataset, requires_initialization=True)\n\n for i in range(34):\n self.assertEqual(i * 3, self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n if i != 0:\n self.assertStatisticsHasScalarValue(\n handle, self.regexForNodeName(\"FilterDataset\", \"dropped_elements\"),\n float(i * 2))\n self.assertStatisticsHasScalarValue(\n handle, self.regexForNodeName(\"FilterDataset\", \"filtered_elements\"),\n float(i + 1))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasScalarValue(\n handle, self.regexForNodeName(\"FilterDataset\", \"dropped_elements\"),\n 67.0)\n self.assertStatisticsHasScalarValue(\n handle, self.regexForNodeName(\"FilterDataset\", \"filtered_elements\"),\n 34.0)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testReinitialize(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(100).apply(\n stats_ops.latency_stats(\"record_latency\"))\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n\n for j in range(5):\n next_element = self.getNext(dataset, requires_initialization=True)\n for i in range(100):\n self.assertEqual(i, self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"record_latency\",\n float((j * 100) + i + 1),\n (j * 100) + i + 2)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"record_latency\", (j + 1) * 100.0,\n (j * 100) + 101)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testNoAggregatorRegistered(self):\n dataset = dataset_ops.Dataset.range(100).apply(\n stats_ops.latency_stats(\"record_latency\"))\n\n next_element = self.getNext(dataset, requires_initialization=True)\n\n for i in range(100):\n self.assertEqual(i, self.evaluate(next_element()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n\n @combinations.generate(test_base.eager_only_combinations())\n def testMultipleTags(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(100).apply(\n stats_ops.latency_stats(\"record_latency\")).apply(\n stats_ops.latency_stats(\"record_latency_2\"))\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n\n next_element = self.getNext(dataset, requires_initialization=True)\n\n for i in range(100):\n self.assertEqual(i, self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(\n handle, \"record_latency\", float(i + 1), 2 * i + 3, offset=1)\n self.assertStatisticsHasCount(handle, \"record_latency_2\", float(i + 1),\n 2 * i + 3)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(\n handle, \"record_latency\", 100.0, 201, offset=1)\n self.assertStatisticsHasCount(handle, \"record_latency_2\", 100.0, 201)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testRepeatedTags(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(100).apply(\n stats_ops.latency_stats(\"record_latency\")).apply(\n stats_ops.latency_stats(\"record_latency\"))\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n next_element = self.getNext(dataset, requires_initialization=True)\n\n for i in range(100):\n self.assertEqual(i, self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"record_latency\",\n float(2 * (i + 1)), 2 * i + 3)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"record_latency\", 200.0, 201)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testMultipleIteratorsSameAggregator(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(100).apply(\n stats_ops.latency_stats(\"record_latency\"))\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n next_element1 = self.getNext(dataset, requires_initialization=True)\n next_element2 = self.getNext(dataset, requires_initialization=True)\n\n for i in range(100):\n self.assertEqual(i * 2, self.evaluate(next_element1() + next_element2()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"record_latency\",\n float(2 * (i + 1)), 2 * i + 3)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element1())\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element2())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(handle, \"record_latency\", 200.0, 201)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testMultipleDatasetWithPrefixes(self):\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(100).apply(\n stats_ops.latency_stats(\"record_latency\"))\n dataset = self.datasetExperimentalStats(\n dataset, aggregator, prefix=\"dataset1\")\n dataset2 = dataset_ops.Dataset.range(100).apply(\n stats_ops.latency_stats(\"record_latency\"))\n dataset2 = self.datasetExperimentalStats(\n dataset2, aggregator, prefix=\"dataset2\")\n next_element1 = self.getNext(dataset, requires_initialization=True)\n next_element2 = self.getNext(dataset2, requires_initialization=True)\n\n for i in range(100):\n self.assertEqual(i * 2, self.evaluate(next_element1() + next_element2()))\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(\n handle, \"dataset1::record_latency\", float(i + 1), 2 * i + 3, offset=1)\n self.assertStatisticsHasCount(handle, \"dataset2::record_latency\",\n float(i + 1), 2 * i + 3)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element1())\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element2())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(\n handle, \"dataset1::record_latency\", 100.0, 201, offset=1)\n self.assertStatisticsHasCount(handle, \"dataset2::record_latency\", 100.0,\n 201)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testMultiplePrefetchStats(self):\n\n aggregator = stats_aggregator.StatsAggregator()\n dataset = dataset_ops.Dataset.range(10).prefetch(\n 2).filter(lambda x: math_ops.equal(math_ops.mod(x, 2), 0)).prefetch(1)\n\n dataset = self.datasetExperimentalStats(dataset, aggregator)\n next_element = self.getNext(dataset, requires_initialization=True)\n\n for i in range(5):\n self.assertEqual(i * 2, self.evaluate(next_element()))\n handle = self.getHandle(aggregator)\n # TODO(shivaniagarwal): using exact name of prefetch node than the regex,\n # to differentiate between two prefetch. This might break in future, at\n # which point, it would be best to disable this test.\n self.assertStatisticsHasScalarValue(\n handle, \"PrefetchDataset/_5::buffer_capacity\", 2)\n self.assertStatisticsContains(handle, \"PrefetchDataset/_5::buffer_size\")\n self.assertStatisticsHasScalarValue(\n handle, \"PrefetchDataset/_8::buffer_capacity\", 1)\n self.assertStatisticsContains(handle, \"PrefetchDataset/_8::buffer_size\")\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n\n\nclass ThreadUtilizationStatsTest(stats_dataset_test_base.StatsDatasetTestBase,\n parameterized.TestCase):\n\n @combinations.generate(test_base.eager_only_combinations())\n def testMapBufferUtilization(self):\n\n def dataset_fn():\n return dataset_ops.Dataset.range(10).map(\n lambda x: array_ops.tile([x], ops.convert_to_tensor([x])),\n num_parallel_calls=4)\n\n self.parallelCallsStats(\n dataset_fn, {\"ParallelMapDataset\"}, 10, function_processing_time=True)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testMapAutoTuneBufferUtilization(self):\n\n def dataset_fn():\n return dataset_ops.Dataset.range(10).map(\n lambda x: array_ops.tile([x], ops.convert_to_tensor([x])),\n num_parallel_calls=dataset_ops.AUTOTUNE)\n\n self.parallelCallsStats(\n dataset_fn, {\"ParallelMapDataset\"}, 10, function_processing_time=True)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testInterleaveAutoTuneBufferUtilization(self):\n\n def dataset_fn():\n\n def interleave_fn(_):\n return dataset_ops.Dataset.range(\n 10).map(lambda x: array_ops.tile([x], ops.convert_to_tensor([x])))\n\n return dataset_ops.Dataset.range(1).interleave(\n interleave_fn,\n cycle_length=1,\n num_parallel_calls=dataset_ops.AUTOTUNE)\n\n self.parallelCallsStats(dataset_fn, {\"ParallelInterleaveDatasetV2\"}, 10)\n\n @combinations.generate(test_base.eager_only_combinations())\n def testMapAndBatchAutoTuneBufferUtilization(self):\n\n def dataset_fn():\n return dataset_ops.Dataset.range(100).apply(\n batching.map_and_batch(\n lambda x: array_ops.tile([x], ops.convert_to_tensor([2])),\n num_parallel_calls=dataset_ops.AUTOTUNE,\n batch_size=16))\n\n num_output = 100 // 16 + 1\n self.parallelCallsStats(\n dataset_fn, {\"MapAndBatchDataset\"},\n num_output,\n check_elements=False,\n function_processing_time=True)\n\n\nclass FeatureStatsDatasetTest(\n stats_dataset_test_base.StatsDatasetTestBase,\n reader_dataset_ops_test_base.MakeBatchedFeaturesDatasetTestBase,\n parameterized.TestCase):\n\n @combinations.generate(test_base.eager_only_combinations())\n def testFeaturesStats(self):\n num_epochs = 5\n total_records = num_epochs * self._num_records\n batch_size = 2\n\n def dataset_fn():\n return self.make_batch_feature(\n filenames=self.test_filenames[0],\n num_epochs=num_epochs,\n batch_size=batch_size,\n shuffle=True,\n shuffle_seed=5,\n drop_final_batch=False)\n\n num_output = total_records // batch_size\n if total_records % batch_size:\n num_output = total_records // batch_size + 1\n\n self.parallelCallsStats(\n dataset_fn, {\"ParseExampleDataset\"},\n num_output,\n check_elements=False)\n\n aggregator = stats_aggregator.StatsAggregator()\n dataset = self.datasetExperimentalStats(\n dataset_fn(), aggregator, prefix=\"record_stats\")\n\n next_element = self.getNext(dataset, requires_initialization=True)\n\n for _ in range(num_output):\n self.evaluate(next_element())\n\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_element())\n handle = self.getHandle(aggregator)\n self.assertStatisticsHasCount(\n handle,\n self.regexForNodeName(\"record_stats::ParseExampleDataset\",\n \"features_count\"), total_records)\n self.assertStatisticsHasCount(\n handle,\n self.regexForNodeName(\"record_stats::ParseExampleDataset\",\n \"feature_values_count\"), total_records)\n self.assertStatisticsHasSum(\n handle,\n self.regexForNodeName(\"record_stats::ParseExampleDataset\",\n \"features_count\"), total_records * 4)\n self.assertStatisticsHasSum(\n handle,\n self.regexForNodeName(\"record_stats::ParseExampleDataset\",\n \"feature_values_count\"),\n self._sum_keywords(1) * num_epochs + 3 * total_records)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test configs for transpose_conv.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.lite.testing.zip_test_utils import create_tensor_data\nfrom tensorflow.lite.testing.zip_test_utils import make_zip_of_tests\nfrom tensorflow.lite.testing.zip_test_utils import register_make_test_function\n\n\n# Since compute output_shape is fairly complicated for\n# tf.nn.conv2d_transpose input_sizes argument, so we here first perform a\n# \"conv2d\" operation to get the output, then we use the output to feed in\n# tf.nn.conv2d_backprop_input.\n# This test will depend on the \"conv2d\" operation's correctness.\n@register_make_test_function()\ndef make_transpose_conv_tests(options):\n \"\"\"Make a set of tests to do transpose_conv.\"\"\"\n\n # Tensorflow only supports equal strides\n test_parameters = [\n {\n \"input_shape\": [[1, 3, 4, 1], [1, 10, 10, 3], [3, 20, 20, 1]],\n \"filter_size\": [[1, 1], [1, 2], [3, 3]],\n \"strides\": [[1, 1, 1, 1], [1, 3, 3, 1]],\n \"padding\": [\"SAME\", \"VALID\"],\n \"data_format\": [\"NHWC\"],\n \"channel_multiplier\": [1, 2],\n \"output_shape\": [[]],\n \"fully_quantize\": [False]\n },\n # TODO(yunluli): Adding simple tests for now to unblock edgetpu debugging.\n # Need to add more test cases.\n {\n \"input_shape\": [[1, 3, 3, 1]],\n \"filter_size\": [[3, 3, 2, 1]],\n \"strides\": [[1, 1, 1, 1]],\n \"padding\": [\"SAME\"],\n \"data_format\": [\"NHWC\"],\n \"channel_multiplier\": [1],\n \"output_shape\": [[1, 3, 3, 2]],\n \"fully_quantize\": [True]\n },\n {\n \"input_shape\": [[1, 3, 3, 1]],\n \"filter_size\": [[3, 3, 2, 1]],\n \"strides\": [[1, 2, 2, 1]],\n \"padding\": [\"SAME\"],\n \"data_format\": [\"NHWC\"],\n \"channel_multiplier\": [1],\n \"output_shape\": [[1, 6, 6, 2]],\n \"fully_quantize\": [True]\n },\n {\n \"input_shape\": [[1, 4, 3, 1]],\n \"filter_size\": [[3, 3, 2, 1]],\n \"strides\": [[1, 2, 2, 1]],\n \"padding\": [\"SAME\"],\n \"data_format\": [\"NHWC\"],\n \"channel_multiplier\": [1],\n \"output_shape\": [[1, 8, 6, 2]],\n \"fully_quantize\": [True]\n }\n ]\n\n def get_tensor_shapes(parameters):\n input_shape = parameters[\"input_shape\"]\n filter_size = parameters[\"filter_size\"]\n if not parameters[\"fully_quantize\"]:\n filter_shape = filter_size + [\n input_shape[3], parameters[\"channel_multiplier\"]\n ]\n return [input_shape, filter_shape]\n return [input_shape, filter_size]\n\n def build_graph(parameters):\n \"\"\"Build a transpose_conv graph given `parameters`.\"\"\"\n input_shape, filter_shape = get_tensor_shapes(parameters)\n input_tensor = tf.compat.v1.placeholder(\n dtype=tf.float32, name=\"input\", shape=input_shape)\n\n filter_input = tf.compat.v1.placeholder(\n dtype=tf.float32, name=\"filter\", shape=filter_shape)\n\n if not parameters[\"fully_quantize\"]:\n input_tensors = [input_tensor, filter_input]\n conv_outputs = tf.nn.conv2d(\n input_tensor,\n filter_input,\n strides=parameters[\"strides\"],\n padding=parameters[\"padding\"],\n data_format=parameters[\"data_format\"])\n out = tf.compat.v1.nn.conv2d_backprop_input(\n input_shape,\n filter_input,\n conv_outputs,\n strides=parameters[\"strides\"],\n padding=parameters[\"padding\"],\n data_format=parameters[\"data_format\"])\n else:\n input_tensors = [input_tensor]\n filter_input = create_tensor_data(\n np.float32, filter_shape, min_value=-1, max_value=1)\n out = tf.nn.conv2d_transpose(\n input_tensor,\n filter_input,\n parameters[\"output_shape\"],\n strides=parameters[\"strides\"],\n padding=parameters[\"padding\"],\n data_format=parameters[\"data_format\"])\n\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_shape, filter_shape = get_tensor_shapes(parameters)\n if not parameters[\"fully_quantize\"]:\n values = [\n create_tensor_data(np.float32, input_shape),\n create_tensor_data(np.float32, filter_shape)\n ]\n else:\n values = [\n create_tensor_data(\n np.float32, input_shape, min_value=-1, max_value=1),\n ]\n\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(options, test_parameters, build_graph, build_inputs)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for vectorization of array kernels.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.compat import compat\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import\nfrom tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops\nfrom tensorflow.python.ops.parallel_for.test_util import PForTestCase\nfrom tensorflow.python.platform import test\n\n\n# LINT.IfChange\nmatrix_diag_v3_forward_compat_date = (2019, 12, 6)\n# LINT.ThenChange(\n# //tensorflow/compiler/tests/matrix_diag_ops_test.py,\n# //tensorflow/python/kernel_tests/diag_op_test.py,\n# //tensorflow/python/ops/array_ops.py\n# )\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass ArrayTest(PForTestCase):\n\n def test_gather(self):\n x = random_ops.random_uniform([3, 3, 3])\n x2 = array_ops.placeholder_with_default(x, shape=None) # Has dynamic shape.\n\n def loop_fn(i):\n outputs = []\n x_i = array_ops.gather(x, i)\n for y in [x, x2, x_i]:\n axes = [0] if y is x_i else [0, 2, -1]\n for axis in axes:\n outputs.append(array_ops.gather(y, 2, axis=axis))\n outputs.append(array_ops.gather(y,\n math_ops.cast(2, dtypes.int64),\n axis=axis))\n outputs.append(array_ops.gather(y,\n 2,\n axis=math_ops.cast(\n axis, dtypes.int64)))\n outputs.append(array_ops.gather(y,\n math_ops.cast(i, dtypes.int64),\n axis=axis))\n outputs.append(array_ops.gather(y, [i], axis=axis))\n outputs.append(array_ops.gather(y, [i, 2], axis=axis))\n outputs.append(array_ops.gather(y, [[2, i], [i, 1]], axis=axis))\n return outputs\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_gather_nd(self):\n x = random_ops.random_uniform([3, 3, 3])\n\n def loop_fn(i):\n outputs = []\n x_i = array_ops.gather(x, i)\n outputs.append(array_ops.gather_nd(x_i, [0], batch_dims=0))\n outputs.append(array_ops.gather_nd(x_i, [i], batch_dims=0))\n outputs.append(array_ops.gather_nd(x_i, [[i], [i], [i]], batch_dims=1))\n return outputs\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_shape(self):\n x = random_ops.random_uniform([3, 2, 3])\n\n def loop_fn(i):\n x_i = array_ops.gather(x, i)\n return array_ops.shape(x_i), array_ops.shape(x_i, out_type=dtypes.int64)\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_size(self):\n x = random_ops.random_uniform([3, 2, 3])\n\n def loop_fn(i):\n x_i = array_ops.gather(x, i)\n return array_ops.size(x_i), array_ops.size(x_i, out_type=dtypes.int64)\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_rank(self):\n x = random_ops.random_uniform([3, 2, 3])\n\n def loop_fn(i):\n x_i = array_ops.gather(x, i)\n return array_ops.rank(x_i)\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_shape_n(self):\n x = random_ops.random_uniform([3, 2, 3])\n y = random_ops.random_uniform([3])\n\n def loop_fn(i):\n x_i = array_ops.gather(x, i)\n y_i = array_ops.gather(y, i)\n return array_ops.shape_n([x_i, x, y, y_i]), array_ops.shape_n(\n [x_i, x, y, y_i], out_type=dtypes.int64)\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_reshape(self):\n x = random_ops.random_uniform([3, 2, 3])\n\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n return array_ops.reshape(x1, [-1]), array_ops.reshape(x1, [1, 3, 1, -1])\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_broadcast_to(self):\n x = random_ops.random_uniform([3, 2, 1, 3])\n\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n return (array_ops.broadcast_to(x1, [2, 2, 3]),\n array_ops.broadcast_to(x1, [1, 2, 1, 3]))\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_expand_dims(self):\n x = random_ops.random_uniform([3, 2, 3])\n\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n return array_ops.expand_dims(\n x1, axis=-1), array_ops.expand_dims(\n x1, axis=1)\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_one_hot(self):\n indices = random_ops.random_uniform(\n [3, 2, 3], minval=0, maxval=4, dtype=dtypes.int32)\n\n def loop_fn(i):\n indices_i = array_ops.gather(indices, i)\n return (array_ops.one_hot(indices_i, depth=4, on_value=2., off_value=-2.),\n array_ops.one_hot(indices_i, depth=4, axis=1))\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_searchsorted(self):\n sorted_inputs = math_ops.cumsum(random_ops.random_uniform([3, 2, 4]),\n axis=-1)\n values = random_ops.random_uniform([2, 3], minval=-1, maxval=4.5)\n\n def loop_fn(i):\n inputs_i = array_ops.gather(sorted_inputs, i)\n return [array_ops.searchsorted(inputs_i, values, out_type=dtypes.int32,\n side=\"left\"), # creates LowerBound op.\n array_ops.searchsorted(inputs_i, values, out_type=dtypes.int64,\n side=\"right\")] # creates UpperBound op.\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_slice(self):\n x = random_ops.random_uniform([3, 2, 3])\n\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n return array_ops.slice(x1, begin=(0, 1), size=(2, 1))\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_tile(self):\n x = random_ops.random_uniform([3, 2, 3])\n\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n return array_ops.tile(x1, [2, 1])\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_tile_loop_dependent(self):\n x = random_ops.random_uniform([3, 2, 3])\n\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n return array_ops.tile(x1, [i, 1])\n\n with self.assertRaisesRegexp(ValueError, \"expected to be loop invariant\"):\n pfor_control_flow_ops.pfor(loop_fn, 2)\n\n def test_pack(self):\n x = random_ops.random_uniform([3, 2, 3])\n y = random_ops.random_uniform([2, 3])\n\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n return array_ops.stack([x1, y], axis=-1)\n\n self._test_loop_fn(loop_fn, 1)\n\n def test_unpack(self):\n x = random_ops.random_uniform([3, 2, 3, 4])\n\n def loop_fn(i):\n x_i = array_ops.gather(x, i)\n return array_ops.unstack(\n x_i, 4, axis=-1), array_ops.unstack(\n x_i, 3, axis=1)\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_pad(self):\n x = random_ops.random_uniform([3, 2, 3])\n padding = constant_op.constant([[1, 2], [3, 4]])\n\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n return array_ops.pad(x1, padding, mode=\"CONSTANT\")\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_split(self):\n x = random_ops.random_uniform([3, 2, 3])\n\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n return array_ops.split(x1, 2, axis=0), array_ops.split(x1, 3, axis=-1)\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_split_v(self):\n x = random_ops.random_uniform([3, 6, 3])\n\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n return (array_ops.split(x1, [2, 1, 3], axis=0),\n array_ops.split(x1, [3], axis=-1))\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_squeeze(self):\n x = random_ops.random_uniform([5, 1, 2, 1])\n\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n return (array_ops.squeeze(x1, axis=0),\n array_ops.squeeze(x1, axis=-1),\n array_ops.squeeze(x1))\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_transpose(self):\n x = random_ops.random_uniform([3, 2, 3, 4])\n\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n return array_ops.transpose(x1, [2, 1, 0])\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_zeros_like(self):\n x = random_ops.random_uniform([3, 2, 3])\n\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n z = array_ops.zeros_like(x1),\n return z, z + x1\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_concat_v2(self):\n x = random_ops.random_uniform([3, 2, 3])\n y = random_ops.random_uniform([2, 3])\n\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n return array_ops.concat(\n [x1, x1, y], axis=0), array_ops.concat(\n [x1, x1, y], axis=-1)\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_unary_cwise_ops(self):\n for op in [array_ops.identity, array_ops.stop_gradient]:\n with backprop.GradientTape(persistent=True) as g:\n x = random_ops.random_uniform([3, 5])\n g.watch(x)\n\n # pylint: disable=cell-var-from-loop\n def loop_fn(i):\n with g:\n x1 = array_ops.gather(x, i)\n y = op(x1) + x1\n loss = nn.l2_loss(y)\n return op(x), y, g.gradient(loss, x1)\n\n # pylint: enable=cell-var-from-loop\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_identity_n(self):\n x = random_ops.random_uniform([3, 4])\n\n def loop_fn(i):\n return array_ops.identity_n([x, array_ops.gather(x, i)])\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_matrix_band_part(self):\n x = random_ops.random_uniform([3, 4, 2, 2])\n\n for num_lower, num_upper in ((0, -1), (-1, 0), (1, 1)):\n # pylint: disable=cell-var-from-loop\n def loop_fn(i):\n return array_ops.matrix_band_part(\n array_ops.gather(x, i),\n num_lower=num_lower,\n num_upper=num_upper)\n # pylint: enable=cell-var-from-loop\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_matrix_diag(self):\n x = random_ops.random_uniform([3, 2, 4])\n\n def loop_fn(i):\n diagonal = array_ops.gather(x, i)\n if compat.forward_compatible(*matrix_diag_v3_forward_compat_date):\n return array_ops.matrix_diag(\n diagonal, k=(0, 1), num_rows=4, num_cols=5, align=\"RIGHT_LEFT\")\n return array_ops.matrix_diag(diagonal)\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_matrix_diag_part(self):\n x = random_ops.random_uniform([3, 4, 6])\n\n def loop_fn(i):\n input = array_ops.gather(x, i) # pylint: disable=redefined-builtin\n if compat.forward_compatible(*matrix_diag_v3_forward_compat_date):\n return array_ops.matrix_diag_part(\n input, k=(-2, 0), padding_value=3, align=\"RIGHT_LEFT\")\n return array_ops.matrix_diag_part(input)\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_matrix_set_diag(self):\n matrices = random_ops.random_uniform([3, 4, 4])\n diags = random_ops.random_uniform([3, 4])\n bands = random_ops.random_uniform([3, 3, 4])\n\n def loop_fn(i):\n matrix_i = array_ops.gather(matrices, i)\n diag_i = array_ops.gather(diags, i)\n results = [\n array_ops.matrix_set_diag(matrix_i, diag_i),\n array_ops.matrix_set_diag(matrices[0, ...], diag_i),\n array_ops.matrix_set_diag(matrix_i, diags[0, ...]),\n ]\n\n if compat.forward_compatible(*matrix_diag_v3_forward_compat_date):\n k = (-1, 1)\n band_i = array_ops.gather(bands, i)\n for align in [\"RIGHT_LEFT\", \"LEFT_RIGHT\"]:\n results.extend([\n array_ops.matrix_set_diag(matrix_i, band_i, k=k, align=align),\n array_ops.matrix_set_diag(\n matrices[0, ...], band_i, k=k, align=align),\n array_ops.matrix_set_diag(\n matrix_i, bands[0, ...], k=k, align=align)\n ])\n return results\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_strided_slice(self):\n with backprop.GradientTape(persistent=True) as g:\n x = random_ops.random_uniform([3, 3, 4, 4, 2, 2, 2])\n g.watch(x)\n\n def loop_fn(i):\n with g:\n x_i = array_ops.gather(x, i)\n y = x_i[:2, ::2, 1::3, ..., array_ops.newaxis, 1]\n loss = nn.l2_loss(y)\n return y, g.gradient(loss, x_i)\n\n self._test_loop_fn(loop_fn, 3)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Helpers to convert variables to constants in TensorFlow 2.0.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.core.framework import tensor_shape_pb2\nfrom tensorflow.core.framework import variable_pb2\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import meta_graph_pb2\nfrom tensorflow.python.eager import wrap_function\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.grappler import tf_optimizer\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.util import object_identity\nfrom tensorflow.python.training.saver import export_meta_graph\n\n\n_CONDITIONAL_OPS = set([\"If\", \"StatelessIf\"])\n_LOOP_OPS = set([\"While\", \"StatelessWhile\"])\n_CONTROL_FLOW_OPS = _CONDITIONAL_OPS.union(_LOOP_OPS)\n\n\ndef disable_lower_using_switch_merge(graph_def):\n \"\"\"Set '_lower_using_switch_merge' attributes to False.\n\n Sets the attribute to False in the NodeDefs in the main graph and the NodeDefs\n in each function's graph.\n\n Args:\n graph_def: GraphDef proto.\n\n Returns:\n GraphDef\n \"\"\"\n output_graph_def = graph_pb2.GraphDef()\n output_graph_def.CopyFrom(graph_def)\n\n def disable_control_flow_lowering(node):\n if node.op in _CONTROL_FLOW_OPS:\n node.attr[\"_lower_using_switch_merge\"].b = False\n\n for node in output_graph_def.node:\n disable_control_flow_lowering(node)\n\n if output_graph_def.library:\n for func in output_graph_def.library.function:\n for node in func.node_def:\n disable_control_flow_lowering(node)\n return output_graph_def\n\n\ndef _run_inline_graph_optimization(func, lower_control_flow):\n \"\"\"Apply function inline optimization to the graph.\n\n Returns the GraphDef after Grappler's function inlining optimization is\n applied. This optimization does not work on models with control flow.\n\n Args:\n func: ConcreteFunction.\n lower_control_flow: Boolean indicating whether or not to lower control flow\n ops such as If and While. (default True)\n\n Returns:\n GraphDef\n \"\"\"\n graph_def = func.graph.as_graph_def()\n if not lower_control_flow:\n graph_def = disable_lower_using_switch_merge(graph_def)\n\n # In some cases, a secondary implementation of the function (e.g. for GPU) is\n # written to the \"api_implements\" attribute. (e.g. `tf.keras.layers.LSTM` in\n # TF2 produces a CuDNN-based RNN for GPU).\n # This function suppose to inline all functions calls, but \"api_implements\"\n # prevents this from happening. Removing the attribute solves the problem.\n # To learn more about \"api_implements\", see:\n # tensorflow/core/grappler/optimizers/implementation_selector.h\n for function in graph_def.library.function:\n if \"api_implements\" in function.attr:\n del function.attr[\"api_implements\"]\n\n meta_graph = export_meta_graph(graph_def=graph_def, graph=func.graph)\n\n # Clear the initializer_name for the variables collections, since they are not\n # needed after saved to saved_model.\n for name in [\n \"variables\", \"model_variables\", \"trainable_variables\", \"local_variables\"\n ]:\n raw_list = []\n for raw in meta_graph.collection_def[\"variables\"].bytes_list.value:\n variable = variable_pb2.VariableDef()\n variable.ParseFromString(raw)\n variable.ClearField(\"initializer_name\")\n raw_list.append(variable.SerializeToString())\n meta_graph.collection_def[name].bytes_list.value[:] = raw_list\n\n # Add a collection 'train_op' so that Grappler knows the outputs.\n fetch_collection = meta_graph_pb2.CollectionDef()\n for array in func.inputs + func.outputs:\n fetch_collection.node_list.value.append(array.name)\n meta_graph.collection_def[\"train_op\"].CopyFrom(fetch_collection)\n\n # Initialize RewriterConfig with everything disabled except function inlining.\n config = config_pb2.ConfigProto()\n rewrite_options = config.graph_options.rewrite_options\n rewrite_options.min_graph_nodes = -1 # do not skip small graphs\n rewrite_options.optimizers.append(\"function\")\n return tf_optimizer.OptimizeGraph(config, meta_graph)\n\n\ndef _get_tensor_name(name):\n \"\"\"Returns the name of the input tensor.\n\n Args:\n name: str\n\n Returns:\n str\n \"\"\"\n return name.split(\":\")[0]\n\n\ndef _get_new_function_name(name):\n \"\"\"Returns the function name with '_frozen' appended.\n\n Args:\n name: str\n\n Returns:\n str\n \"\"\"\n return name + \"_frozen\"\n\n\ndef _get_node_defs_list(graph_def):\n \"\"\"Returns a list of NodeDefs in the GraphDef.\n\n This list consists of all NodeDefs in the main graph as well as all control\n flow NodeDefs in the functions.\n\n The remaining NodeDefs in the functions are not included because the op names\n are not unique and the variables are handled differently than the main graph.\n The control flow ops need to be extracted because they are need their\n attributes to be updated similar to the control flow ops in the main graph.\n\n Args:\n graph_def: GraphDef proto.\n\n Returns:\n [NodeDef]\n \"\"\"\n node_defs = list(graph_def.node)\n\n if graph_def.library:\n for func in graph_def.library.function:\n node_defs.extend(\n [node for node in func.node_def if node.op in _CONTROL_FLOW_OPS])\n return node_defs\n\n\ndef _get_tensor_data(func):\n \"\"\"Gets the tensor data for all Placeholders in the model.\n\n Returns a dictionary that maps the tensor name to a dictionary containing:\n data: numpy data\n index: int index in func.graph.captures\n is_variable: bool indicating whether the tensor is a variable or not\n\n Args:\n func: ConcreteFunction.\n\n Returns:\n Dict\n \"\"\"\n tensor_data = {}\n map_index_to_variable = {}\n for var in func.graph.variables:\n for idx, captured_input in enumerate(func.captured_inputs):\n if var.handle is captured_input: # pylint: disable=protected-access\n map_index_to_variable[idx] = var\n break\n\n # Iterates through all captures which are represented as Placeholders.\n for idx, (val_tensor, name_tensor) in enumerate(func.graph.captures):\n tensor_name = _get_tensor_name(name_tensor.name)\n is_variable = idx in map_index_to_variable\n if is_variable:\n data = map_index_to_variable[idx].numpy()\n else:\n data = val_tensor.numpy()\n tensor_data[tensor_name] = {\n \"data\": data,\n \"index\": idx,\n \"is_variable\": is_variable,\n }\n return tensor_data\n\n\ndef _get_control_flow_function_data(node_defs, tensor_data, name_to_node):\n \"\"\"Gets the types and shapes for the parameters to the function.\n\n Creates a map from function name to a list of types and a list of shapes that\n correspond with the function arguments. The data is primarily determined from\n the corresponding \"If\" or \"While\" op. If the argument is a resource variable,\n then the type is determined from the type of the data contained within the\n Tensor. The shape data is only determined in the case of the \"While\" op.\n\n `is_also_output_type` is used to identify the \"While\" bodies that require the\n output types to be updated at the same time the input types are updated.\n\n Args:\n node_defs: List of NodeDefs.\n tensor_data: {str name : Tensor}.\n name_to_node: Dictionary mapping node name to node object.\n\n Returns:\n {str function name : {\"types\" : [int representing DataType],\n \"shapes\" : [[int] representing TensorShape]],\n \"is_also_output_type\" : bool}\n \"\"\"\n func_data = {}\n\n def get_source_node_name_through_identities(node_name):\n # Trace the source node along with a chain of Identity nodes.\n # For example, given Plaecholder -> Identity -> Identity -> node_name\n # The function will return the name of the Placeholder.\n while name_to_node[node_name].op == \"Identity\":\n node_name = _get_tensor_name(name_to_node[node_name].input[0])\n return node_name\n\n def get_resource_type(node_name):\n node_name = get_source_node_name_through_identities(node_name)\n\n numpy_type = tensor_data[node_name][\"data\"].dtype\n return dtypes.as_dtype(numpy_type).as_datatype_enum\n\n def get_resource_shape(node_name):\n node_name = get_source_node_name_through_identities(node_name)\n\n return tensor_shape_pb2.TensorShapeProto(dim=[\n tensor_shape_pb2.TensorShapeProto.Dim(size=dim)\n for dim in tensor_data[node_name][\"data\"].shape\n ])\n\n def add_value(func_name, arg_types, output_shapes, is_also_output_type):\n func_data[func_name] = {\n \"types\": arg_types,\n \"shapes\": output_shapes,\n \"is_also_output_type\": is_also_output_type\n }\n\n for node in node_defs:\n if node.op in _CONDITIONAL_OPS:\n arg_types = [dtype for dtype in node.attr[\"Tin\"].list.type]\n\n for idx in range(len(arg_types)):\n if arg_types[idx] == dtypes.resource:\n # Skip first index which represents the condition.\n arg_types[idx] = get_resource_type(node.input[idx + 1])\n\n add_value(node.attr[\"then_branch\"].func.name, arg_types, None, False)\n add_value(node.attr[\"else_branch\"].func.name, arg_types, None, False)\n elif node.op in _LOOP_OPS:\n arg_types = [dtype for dtype in node.attr[\"T\"].list.type]\n output_shapes = [shape for shape in node.attr[\"output_shapes\"].list.shape]\n\n for idx in range(len(arg_types)):\n if arg_types[idx] == dtypes.resource:\n input_name = node.input[idx]\n arg_types[idx] = get_resource_type(input_name)\n output_shapes[idx] = get_resource_shape(input_name)\n\n add_value(node.attr[\"body\"].func.name, arg_types, output_shapes, True)\n add_value(node.attr[\"cond\"].func.name, arg_types, output_shapes, False)\n return func_data\n\n\ndef _populate_const_op(output_node, node_name, dtype, data, data_shape):\n \"\"\"Creates a Const op.\n\n Args:\n output_node: TensorFlow NodeDef.\n node_name: str node name.\n dtype: AttrValue with a populated .type field.\n data: numpy data value.\n data_shape: Tuple of integers containing data shape.\n \"\"\"\n output_node.op = \"Const\"\n output_node.name = node_name\n output_node.attr[\"dtype\"].CopyFrom(dtype)\n tensor = tensor_util.make_tensor_proto(\n data, dtype=dtype.type, shape=data_shape)\n output_node.attr[\"value\"].tensor.CopyFrom(tensor)\n\n\ndef _populate_identity_op(output_node, input_node):\n \"\"\"Creates an Identity op from a ReadVariable op.\n\n Args:\n output_node: TensorFlow NodeDef.\n input_node: TensorFlow NodeDef.\n \"\"\"\n output_node.op = \"Identity\"\n output_node.name = input_node.name\n output_node.input.append(input_node.input[0])\n output_node.attr[\"T\"].CopyFrom(input_node.attr[\"dtype\"])\n if \"_class\" in input_node.attr:\n output_node.attr[\"_class\"].CopyFrom(input_node.attr[\"_class\"])\n\n\ndef _populate_if_op(output_node, input_node, function_data):\n \"\"\"Updates the type attributes and function names of If or StatelessIf.\n\n Args:\n output_node: TensorFlow NodeDef.\n input_node: TensorFlow NodeDef.\n function_data: Map of function names to the list of types and shapes that\n correspond with the function arguments.\n \"\"\"\n output_node.CopyFrom(input_node)\n then_func = input_node.attr[\"then_branch\"].func.name\n output_node.attr[\"then_branch\"].func.name = _get_new_function_name(then_func)\n output_node.attr[\"else_branch\"].func.name = _get_new_function_name(\n input_node.attr[\"else_branch\"].func.name)\n output_node.attr[\"Tin\"].list.CopyFrom(\n attr_value_pb2.AttrValue.ListValue(\n type=function_data[then_func][\"types\"]))\n\n\ndef _populate_while_op(output_node, input_node, function_data):\n \"\"\"Updates the type attributes and function names of While or StatelessWhile.\n\n Args:\n output_node: TensorFlow NodeDef.\n input_node: TensorFlow NodeDef.\n function_data: Map of function names to the list of types and shapes that\n correspond with the function arguments.\n \"\"\"\n output_node.CopyFrom(input_node)\n cond_func = input_node.attr[\"cond\"].func.name\n output_node.attr[\"cond\"].func.name = _get_new_function_name(cond_func)\n output_node.attr[\"body\"].func.name = _get_new_function_name(\n input_node.attr[\"body\"].func.name)\n output_node.attr[\"T\"].list.CopyFrom(\n attr_value_pb2.AttrValue.ListValue(\n type=function_data[cond_func][\"types\"]))\n output_node.attr[\"output_shapes\"].list.CopyFrom(\n attr_value_pb2.AttrValue.ListValue(\n shape=function_data[cond_func][\"shapes\"]))\n\n\ndef _construct_concrete_function(func, output_graph_def,\n converted_input_indices):\n \"\"\"Constructs a concrete function from the `output_graph_def`.\n\n Args:\n func: ConcreteFunction\n output_graph_def: GraphDef proto.\n converted_input_indices: Set of integers of input indices that were\n converted to constants.\n\n Returns:\n ConcreteFunction.\n \"\"\"\n # Create a ConcreteFunction from the new GraphDef.\n input_tensors = func.graph.internal_captures\n converted_inputs = object_identity.ObjectIdentitySet(\n [input_tensors[index] for index in converted_input_indices])\n not_converted_inputs = [\n tensor for tensor in func.inputs if tensor not in converted_inputs]\n not_converted_inputs_map = {\n tensor.name: tensor for tensor in not_converted_inputs\n }\n\n new_input_names = [tensor.name for tensor in not_converted_inputs]\n new_output_names = [tensor.name for tensor in func.outputs]\n new_func = wrap_function.function_from_graph_def(output_graph_def,\n new_input_names,\n new_output_names)\n\n # Manually propagate shape for input tensors where the shape is not correctly\n # propagated. Scalars shapes are lost when wrapping the function.\n for input_tensor in new_func.inputs:\n input_tensor.set_shape(not_converted_inputs_map[input_tensor.name].shape)\n return new_func\n\n\ndef _convert_variables_to_constants_v2_impl(func, lower_control_flow=True):\n \"\"\"Replaces all the variables in a graph with constants of the same values.\n\n TensorFlow 2.0 function for converting all Variable ops into Const ops holding\n the same values. This makes it possible to describe the network fully with a\n single GraphDef file, and allows the removal of a lot of ops related to\n loading and saving the variables. This function runs Grappler's function\n inlining optimization in order to return a single subgraph.\n\n The current implementation only works for graphs that do not contain any\n control flow or embedding related ops.\n\n Note that the NodeDefs in the returned GraphDef contains the original node\n names if they are created by the graph optimization. Converting the GraphDef\n to concrete function will lose these debug information.\n\n Args:\n func: ConcreteFunction.\n lower_control_flow: Boolean indicating whether or not to lower control flow\n ops such as If and While. (default True)\n\n Returns:\n GraphDef containing a simplified version of the original and converted\n input indices that were converted to constants.\n \"\"\"\n # Inline the graph in order to remove functions when possible.\n graph_def = _run_inline_graph_optimization(func, lower_control_flow)\n\n # Gets list of all node defs include those in the library.\n node_defs = _get_node_defs_list(graph_def)\n\n # Get mapping from node name to node.\n name_to_node = {_get_tensor_name(node.name): node for node in node_defs}\n\n # Get mapping from node name to variable value.\n tensor_data = _get_tensor_data(func)\n\n # Get mapping from function name to argument types.\n function_data = _get_control_flow_function_data(\n node_defs, tensor_data, name_to_node)\n\n # Get variable data for all nodes in `node_defs`.\n reference_variables = {}\n resource_identities = {}\n placeholders = {}\n converted_input_indices = set()\n\n def _save_placeholder(node_name, dtype):\n placeholders[node_name] = {\n \"dtype\": dtype,\n \"data\": tensor_data[node_name][\"data\"],\n }\n converted_input_indices.add(tensor_data[node_name][\"index\"])\n\n for node in node_defs:\n if node.op in _CONDITIONAL_OPS:\n # Get dtype and data for resource Placeholders.\n then_func = node.attr[\"then_branch\"].func.name\n arg_types = function_data[then_func][\"types\"]\n for idx, input_tensor in enumerate(node.input[1:]):\n input_name = _get_tensor_name(input_tensor)\n if input_name in tensor_data:\n dtype = attr_value_pb2.AttrValue(type=arg_types[idx])\n _save_placeholder(_get_tensor_name(input_tensor), dtype)\n elif node.op in _LOOP_OPS:\n # Get dtype and data for resource Placeholders.\n cond_func = node.attr[\"cond\"].func.name\n arg_types = function_data[cond_func][\"types\"]\n for idx, input_tensor in enumerate(node.input):\n input_name = _get_tensor_name(input_tensor)\n if input_name in tensor_data:\n dtype = attr_value_pb2.AttrValue(type=arg_types[idx])\n _save_placeholder(_get_tensor_name(input_tensor), dtype)\n elif (node.op == \"Identity\" and node.attr[\"T\"].type == dtypes.resource and\n name_to_node[_get_tensor_name(node.input[0])].op in _LOOP_OPS):\n # Store the dtype for Identity resource ops that are outputs of While ops.\n while_node = name_to_node[_get_tensor_name(node.input[0])]\n body_func = while_node.attr[\"body\"].func.name\n input_data = node.input[0].split(\":\")\n idx = 0 if len(input_data) == 1 else int(input_data[1])\n\n dtype = attr_value_pb2.AttrValue(\n type=function_data[body_func][\"types\"][idx])\n resource_identities[node.name] = dtype\n elif node.op == \"VariableV2\":\n # Get data for VariableV2 ops (reference variables) that cannot be lifted.\n with func.graph.as_default():\n identity_node = array_ops.identity(\n func.graph.as_graph_element(node.name + \":0\"))\n reference_variables[node.name] = (\n func.prune([], [identity_node.name])()[0])\n elif node.name in tensor_data and not tensor_data[node.name][\"is_variable\"]:\n # Get dtype and data for non-variable Placeholders (ex. values for 1.X\n # Const ops that are loaded as Placeholders in 2.0)\n _save_placeholder(node.name, node.attr[\"dtype\"])\n elif node.op in [\"ReadVariableOp\", \"ResourceGather\"]:\n # Get dtype and data for Placeholder ops associated with ReadVariableOp\n # and ResourceGather ops. There can be an Identity in between the\n # resource op and Placeholder. Store the dtype for the Identity ops.\n input_name = _get_tensor_name(node.input[0])\n while name_to_node[input_name].op == \"Identity\":\n resource_identities[input_name] = node.attr[\"dtype\"]\n input_name = _get_tensor_name(name_to_node[input_name].input[0])\n if name_to_node[input_name].op != \"Placeholder\":\n raise ValueError(\"Cannot find the Placeholder op that is an input \"\n \"to the ReadVariableOp.\")\n _save_placeholder(input_name, node.attr[\"dtype\"])\n\n # Reconstruct the graph with constants in place of variables.\n output_graph_def = graph_pb2.GraphDef()\n\n for input_node in graph_def.node:\n output_node = output_graph_def.node.add()\n # Convert VariableV2 ops to Const ops.\n if input_node.name in reference_variables:\n data = reference_variables[input_node.name]\n dtype = attr_value_pb2.AttrValue(type=data.dtype.as_datatype_enum)\n _populate_const_op(output_node, input_node.name, dtype, data.numpy(),\n data.shape)\n # Convert Placeholder ops to Const ops.\n elif input_node.name in placeholders:\n data = placeholders[input_node.name][\"data\"]\n dtype = placeholders[input_node.name][\"dtype\"]\n _populate_const_op(output_node, input_node.name, dtype, data, data.shape)\n # Update the dtype for Identity ops that are inputs to ReadVariableOps.\n elif input_node.name in resource_identities:\n output_node.CopyFrom(input_node)\n output_node.attr[\"T\"].CopyFrom(resource_identities[input_node.name])\n # Convert ReadVariableOps to Identity ops.\n elif input_node.op == \"ReadVariableOp\":\n _populate_identity_op(output_node, input_node)\n # Convert ResourceGather to Gather ops with a Const axis feeding into it.\n elif input_node.op == \"ResourceGather\":\n if input_node.attr[\"batch_dims\"].i != 0:\n raise ValueError(\"batch_dims != 0 is not supported by freeze_graph.\")\n output_axis_node = output_graph_def.node.add()\n axis_node_name = input_node.name + \"/axis\"\n axis_dtype = input_node.attr[\"Tindices\"]\n axis_data = np.array(input_node.attr[\"batch_dims\"].i)\n _populate_const_op(output_axis_node, axis_node_name, axis_dtype,\n axis_data, axis_data.shape)\n\n output_node.op = \"GatherV2\"\n output_node.name = input_node.name\n output_node.input.extend(\n [input_node.input[0], input_node.input[1], axis_node_name])\n output_node.attr[\"Tparams\"].CopyFrom(input_node.attr[\"dtype\"])\n output_node.attr[\"Tindices\"].CopyFrom(input_node.attr[\"Tindices\"])\n output_node.attr[\"Taxis\"].CopyFrom(axis_dtype)\n if \"_class\" in input_node.attr:\n output_node.attr[\"_class\"].CopyFrom(input_node.attr[\"_class\"])\n # Update the function names and argument types for the conditional ops.\n elif input_node.op in _CONDITIONAL_OPS:\n _populate_if_op(output_node, input_node, function_data)\n elif input_node.op in _LOOP_OPS:\n _populate_while_op(output_node, input_node, function_data)\n else:\n output_node.CopyFrom(input_node)\n\n # Add functions to reconstructed graph.\n if graph_def.library:\n library = output_graph_def.library\n\n for input_library_func in graph_def.library.function:\n orig_func_name = input_library_func.signature.name\n new_func_name = _get_new_function_name(orig_func_name)\n\n # Do not copy any functions that aren't being used in the graph. Any\n # functions that are not used by control flow should have been inlined.\n if orig_func_name not in function_data:\n continue\n\n output_library_func = library.function.add()\n for key, value in input_library_func.ret.items():\n output_library_func.ret[key] = value\n for key, value in input_library_func.control_ret.items():\n output_library_func.control_ret[key] = value\n\n # Update the input types in the function signature. Update the output\n # types for functions that are while loop bodies.\n output_library_func.signature.CopyFrom(input_library_func.signature)\n output_library_func.signature.name = new_func_name\n for dtype, arg in zip(function_data[orig_func_name][\"types\"],\n output_library_func.signature.input_arg):\n arg.type = dtype\n if function_data[orig_func_name][\"is_also_output_type\"]:\n for dtype, arg in zip(function_data[orig_func_name][\"types\"],\n output_library_func.signature.output_arg):\n arg.type = dtype\n\n # Update the NodeDefs.\n func_variables = {\n node.name: node.input[0]\n for node in input_library_func.node_def\n if node.op == \"ReadVariableOp\"\n }\n\n for input_node in input_library_func.node_def:\n output_node = output_library_func.node_def.add()\n # Convert ReadVariableOps to Identity ops.\n if input_node.op == \"ReadVariableOp\":\n _populate_identity_op(output_node, input_node)\n # Update the function names and argument types for the conditional ops.\n elif input_node.op in _CONDITIONAL_OPS:\n _populate_if_op(output_node, input_node, function_data)\n elif input_node.op in _LOOP_OPS:\n _populate_while_op(output_node, input_node, function_data)\n else:\n output_node.CopyFrom(input_node)\n # Convert :value to :output for ops that use the ReadVariableOp.\n for idx, full_name in enumerate(input_node.input):\n input_name = _get_tensor_name(full_name)\n if input_name in func_variables:\n full_name_parts = full_name.split(\":\")\n full_name_parts[1] = \"output\"\n input_name = \":\".join(full_name_parts)\n output_node.input[idx] = input_name\n\n output_graph_def.versions.CopyFrom(graph_def.versions)\n return (output_graph_def, converted_input_indices)\n\n\ndef convert_variables_to_constants_v2(func, lower_control_flow=True):\n \"\"\"Replaces all the variables in a graph with constants of the same values.\n\n TensorFlow 2.0 function for converting all Variable ops into Const ops holding\n the same values. This makes it possible to describe the network fully with a\n single GraphDef file, and allows the removal of a lot of ops related to\n loading and saving the variables. This function runs Grappler's function\n inlining optimization in order to return a single subgraph.\n\n The current implementation only works for graphs that do not contain any\n control flow or embedding related ops.\n\n Args:\n func: ConcreteFunction.\n lower_control_flow: Boolean indicating whether or not to lower control flow\n ops such as If and While. (default True)\n\n Returns:\n ConcreteFunction containing a simplified version of the original.\n \"\"\"\n output_graph_def, converted_inputs = _convert_variables_to_constants_v2_impl(\n func, lower_control_flow)\n return _construct_concrete_function(func, output_graph_def, converted_inputs)\n\n\ndef convert_variables_to_constants_v2_as_graph(func, lower_control_flow=True):\n \"\"\"Replaces all the variables in a graph with constants of the same values.\n\n This function works as same as convert_variables_to_constants_v2, but it\n returns the intermediate `GraphDef` as well. This `GraphDef` contains all the\n debug information after all the transformations in the frozen phase.\n\n Args:\n func: ConcreteFunction.\n lower_control_flow: Boolean indicating whether or not to lower control flow\n ops such as If and While. (default True)\n\n Returns:\n ConcreteFunction containing a simplified version of the original, and also\n the intermediate GraphDef containing the node debug information for the\n transformations in the frozen phase.\n \"\"\"\n graph_def, converted_inputs = _convert_variables_to_constants_v2_impl(\n func, lower_control_flow)\n frozen_func = _construct_concrete_function(func, graph_def, converted_inputs)\n return frozen_func, graph_def\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test configs for constant ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.lite.testing.zip_test_utils import create_tensor_data\nfrom tensorflow.lite.testing.zip_test_utils import make_zip_of_tests\nfrom tensorflow.lite.testing.zip_test_utils import register_make_test_function\nfrom tensorflow.lite.testing.zip_test_utils import TF_TYPE_INFO\n\n\n# This function tests various TensorFLow functions that generates Const op,\n# including `tf.ones`, `tf.zeros` and random functions.\n@register_make_test_function()\ndef make_constant_tests(options):\n \"\"\"Make a set of tests to do constant ops.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.float32, tf.int32],\n \"input_shape\": [[], [1], [2], [1, 1, 1, 1], [2, 2, 2, 2]],\n \"constant_is_also_output\": [True, False],\n # This is a regression test for a bug where Toco rejects models with\n # unread inputs.\n \"has_unread_input\": [True, False],\n }]\n\n def build_graph(parameters):\n \"\"\"Build a constant graph given `parameters`.\"\"\"\n dummy_input = tf.compat.v1.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input1\",\n shape=parameters[\"input_shape\"])\n constant = tf.constant(\n create_tensor_data(parameters[\"dtype\"], parameters[\"input_shape\"]))\n outputs = [tf.maximum(dummy_input, constant)]\n if parameters[\"constant_is_also_output\"]:\n outputs.append(constant)\n inputs = [dummy_input]\n if parameters[\"has_unread_input\"]:\n unread_input = tf.compat.v1.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"unread_input\",\n shape=parameters[\"input_shape\"])\n inputs.append(unread_input)\n\n return inputs, outputs\n\n def build_inputs(parameters, sess, inputs, outputs):\n dummy_input = np.zeros(\n parameters[\"input_shape\"], dtype=TF_TYPE_INFO[parameters[\"dtype\"]][0])\n return [dummy_input], sess.run(outputs, feed_dict={inputs[0]: dummy_input})\n\n make_zip_of_tests(options, test_parameters, build_graph, build_inputs)\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test configs for cast.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.lite.testing.zip_test_utils import create_tensor_data\nfrom tensorflow.lite.testing.zip_test_utils import make_zip_of_tests\nfrom tensorflow.lite.testing.zip_test_utils import register_make_test_function\n\n\n@register_make_test_function()\ndef make_cast_tests(options):\n \"\"\"Generate examples for cast.\"\"\"\n test_parameters = [{\n \"input_dtype\": [tf.int32],\n \"output_dtype\": [tf.float32],\n \"input_shape\": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the cast testing graph.\"\"\"\n input_value = tf.compat.v1.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.cast(input_value, parameters[\"output_dtype\"])\n return [input_value], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape\"])\n return [input_value], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value])))\n\n make_zip_of_tests(options, test_parameters, build_graph, build_inputs)\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains testing utilities related to mixed precision.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras.engine import base_layer\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import custom_gradient\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.util import nest\n\n\ndef create_identity_with_grad_check_fn(expected_gradient, expected_dtype=None):\n \"\"\"Returns a function that asserts it's gradient has a certain value.\n\n This serves as a hook to assert intermediate gradients have a certain value.\n This returns an identity function. The identity's gradient function is also\n the identity function, except it asserts that the gradient equals\n `expected_gradient` and has dtype `expected_dtype`.\n\n Args:\n expected_gradient: The gradient function asserts that the gradient is this\n value.\n expected_dtype: The gradient function asserts the gradient has this dtype.\n\n Returns:\n An identity function whose gradient function asserts the gradient has a\n certain value.\n \"\"\"\n @custom_gradient.custom_gradient\n def _identity_with_grad_check(x):\n \"\"\"Function that asserts it's gradient has a certain value.\"\"\"\n x = array_ops.identity(x)\n def grad(dx):\n if expected_dtype:\n assert dx.dtype == expected_dtype, (\n 'dx.dtype should be %s but is: %s' % (expected_dtype, dx.dtype))\n expected_tensor = ops.convert_to_tensor(expected_gradient, dtype=dx.dtype,\n name='expected_gradient')\n assert_op = check_ops.assert_equal(dx, expected_tensor)\n with ops.control_dependencies([assert_op]):\n dx = array_ops.identity(dx)\n return dx\n return x, grad\n # Keras sometimes has trouble serializing Lambda layers with a decorated\n # function. So we define and return a non-decorated function.\n def identity_with_grad_check(x):\n return _identity_with_grad_check(x)\n return identity_with_grad_check\n\n\ndef create_identity_with_nan_gradients_fn(have_nan_gradients):\n \"\"\"Returns a function that optionally has NaN gradients.\n\n This serves as a hook to introduce NaN gradients to a model. This returns an\n identity function. The identity's gradient function will check if the boolean\n tensor `have_nan_gradients` is True. If so, the gradient will be NaN.\n Otherwise, the gradient will also be the identity.\n\n Args:\n have_nan_gradients: A scalar boolean tensor. If True, gradients will be NaN.\n Otherwise, the gradient function is the identity function.\n\n Returns:\n An identity function whose gradient function will return NaNs, if\n `have_nan_gradients` is True.\n \"\"\"\n @custom_gradient.custom_gradient\n def _identity_with_nan_gradients(x):\n \"\"\"Function whose gradient is NaN iff `have_nan_gradients` is True.\"\"\"\n x = array_ops.identity(x)\n def grad(dx):\n # We need this control dependency, because otherwise the NaN could be\n # produced before `dx`. This in turn could cause the final gradient to be\n # produced because `dx`, causing the loss scale to be updated before `dx`,\n # which can cause `tf.assert_equal`s to fail.\n with ops.control_dependencies([dx]):\n nan_scalar = constant_op.constant(float('NaN'), dtype=dx.dtype)\n return control_flow_ops.cond(\n have_nan_gradients,\n lambda: array_ops.fill(array_ops.shape(dx), nan_scalar),\n lambda: dx\n )\n return x, grad\n # Keras sometimes has trouble serializing Lambda layers with a decorated\n # function. So we define and return a non-decorated function.\n def identity_with_nan_gradients(x):\n return _identity_with_nan_gradients(x)\n return identity_with_nan_gradients\n\n\nclass AssertTypeLayer(base_layer.Layer):\n \"\"\"A layer which asserts it's inputs are a certain type.\"\"\"\n\n def __init__(self, assert_type=None, **kwargs):\n self._assert_type = (dtypes.as_dtype(assert_type).name if assert_type\n else None)\n super(AssertTypeLayer, self).__init__(**kwargs)\n\n def assert_input_types(self, inputs):\n \"\"\"Asserts `inputs` are of the correct type. Should be called in call().\"\"\"\n if self._assert_type:\n inputs_flattened = nest.flatten(inputs)\n for inp in inputs_flattened:\n assert inp.dtype.base_dtype == self._assert_type, (\n 'Input tensor has type %s which does not match assert type %s' %\n (inp.dtype.name, self._assert_type))\n\n\nclass AddLayer(AssertTypeLayer):\n \"\"\"A layer which adds it's input to a scalar variable.\"\"\"\n\n def __init__(self,\n regularizer=None,\n use_operator=False,\n var_name='v',\n **kwargs):\n \"\"\"Initializes the AddLayer.\n\n Args:\n regularizer: The regularizer on the scalar variable.\n use_operator: If True, add using the + operator. If False, add using\n tf.add.\n var_name: The name of the variable. It can be useful to pass a name other\n than 'v', to test having the attribute name (self.v) being different\n from the variable name.\n **kwargs: Passed to AssertTypeLayer constructor.\n \"\"\"\n self._regularizer = regularizer\n if isinstance(regularizer, dict):\n self._regularizer = regularizers.deserialize(regularizer,\n custom_objects=globals())\n self._use_operator = use_operator\n self._var_name = var_name\n super(AddLayer, self).__init__(**kwargs)\n\n def build(self, _):\n self.v = self.add_weight(\n self._var_name, (), initializer='ones', regularizer=self._regularizer)\n self.built = True\n\n def call(self, inputs):\n self.assert_input_types(inputs)\n assert inputs.dtype == self.v.dtype\n return self._add(inputs, self.v)\n\n def _add(self, x, y):\n if self._use_operator:\n return x + y\n else:\n return math_ops.add(x, y)\n\n def get_config(self):\n config = super(AddLayer, self).get_config()\n config['regularizer'] = regularizers.serialize(self._regularizer)\n config['use_operator'] = self._use_operator\n config['var_name'] = self._var_name\n config['assert_type'] = self._assert_type\n return config\n\n\nclass IdentityRegularizer(regularizers.Regularizer):\n\n def __call__(self, x):\n assert x.dtype == dtypes.float32\n return array_ops.identity(x)\n\n def get_config(self):\n return {}\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test configs for not_equal.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.lite.testing.zip_test_utils import create_tensor_data\nfrom tensorflow.lite.testing.zip_test_utils import make_zip_of_tests\nfrom tensorflow.lite.testing.zip_test_utils import register_make_test_function\n\n\n@register_make_test_function()\ndef make_not_equal_tests(options):\n \"\"\"Make a set of tests to do not equal.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32, tf.int32, tf.int64],\n \"input_shape_pair\": [([1, 1, 1, 3], [1, 1, 1, 3]),\n ([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),\n ([5, 5], [1]), ([10], [2, 4, 10])],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the not euqal op testing graph.\"\"\"\n input_value1 = tf.compat.v1.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input1\",\n shape=parameters[\"input_shape_pair\"][0])\n input_value2 = tf.compat.v1.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input2\",\n shape=parameters[\"input_shape_pair\"][1])\n out = tf.not_equal(input_value1, input_value2)\n return [input_value1, input_value2], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value1 = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_pair\"][0])\n input_value2 = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_pair\"][1])\n return [input_value1, input_value2], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))\n\n make_zip_of_tests(\n options,\n test_parameters,\n build_graph,\n build_inputs,\n expected_tf_failures=3)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Correctness tests for tf.keras using DistributionStrategy.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport six\nfrom tensorflow.python import keras\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import mirrored_strategy\nfrom tensorflow.python.distribute import strategy_combinations\nfrom tensorflow.python.distribute import tpu_strategy\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.keras.distribute import distributed_training_utils\nfrom tensorflow.python.keras.mixed_precision.experimental import policy\nfrom tensorflow.python.keras.preprocessing import sequence\nfrom tensorflow.python.util import nest\n\n_RANDOM_SEED = 1337\n_EVAL_STEPS = 20\n_GLOBAL_BATCH_SIZE = 64\n\n# Note: Please make sure the tests in this file are also covered in\n# keras_backward_compat_test for features that are supported with both APIs.\n\nall_strategies = [\n strategy_combinations.default_strategy,\n strategy_combinations.one_device_strategy,\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.mirrored_strategy_with_two_gpus,\n strategy_combinations.tpu_strategy, # steps_per_run=2\n strategy_combinations.tpu_strategy_one_step,\n]\n\n\ndef eager_mode_test_configuration():\n return combinations.combine(\n mode='eager', use_numpy=[True, False], use_validation_data=[True, False])\n\n\ndef graph_mode_test_configuration():\n return combinations.combine(\n mode='graph', use_numpy=[True, False], use_validation_data=[True, False])\n\n\ndef all_strategy_and_input_config_combinations():\n return (combinations.times(\n combinations.combine(\n distribution=all_strategies,\n experimental_run_tf_function=[True, False]),\n eager_mode_test_configuration() + graph_mode_test_configuration()))\n\n\ndef strategy_minus_tpu_and_input_config_combinations_eager():\n return (combinations.times(\n combinations.combine(\n distribution=strategy_combinations.strategies_minus_tpu),\n eager_mode_test_configuration()))\n\n\ndef strategies_for_embedding_models():\n \"\"\"Returns distribution strategies to test for embedding models.\n\n Since embedding models take longer to train, we disregard DefaultStrategy\n in order to prevent testing timeouts.\n \"\"\"\n\n return [\n s for s in all_strategies if s.required_tpu or s.required_gpus or\n s is strategy_combinations.one_device_strategy\n ]\n\n\ndef test_combinations_for_embedding_model():\n # TODO(sourabhbajaj): Enable tests for eager mode\n eager_mode_strategies = [\n s for s in strategies_for_embedding_models() if not s.required_tpu\n ]\n\n return (combinations.times(\n combinations.combine(\n distribution=strategies_for_embedding_models(),\n experimental_run_tf_function=[True, False]),\n (graph_mode_test_configuration())) + combinations.times(\n combinations.combine(\n distribution=eager_mode_strategies,\n experimental_run_tf_function=[False]),\n (eager_mode_test_configuration())))\n\n\ndef test_combinations_with_tpu_strategies():\n tpu_strategies = [\n strategy_combinations.tpu_strategy,\n strategy_combinations.tpu_strategy_one_step\n ]\n\n return (combinations.times(\n combinations.combine(distribution=tpu_strategies),\n graph_mode_test_configuration()))\n\n\nclass MaybeDistributionScope(object):\n \"\"\"Provides a context allowing no distribution strategy.\"\"\"\n\n def __init__(self, distribution):\n self._distribution = distribution\n self._scope = None\n\n def __enter__(self):\n if self._distribution:\n self._scope = self._distribution.scope()\n self._scope.__enter__()\n\n def __exit__(self, exc_type, value, traceback):\n if self._distribution:\n self._scope.__exit__(exc_type, value, traceback)\n self._scope = None\n\n\ndef batch_wrapper(dataset, batch_size, repeat=None):\n if repeat:\n dataset = dataset.repeat(repeat)\n return dataset.batch(batch_size)\n\n\ndef get_batch_size(global_batch_size, distribution):\n batch_size = global_batch_size\n # TODO(b/118776054): Use global batch size for Keras/DS support.\n use_per_core_batch_size = (\n distribution and\n not distributed_training_utils.global_batch_size_supported(distribution))\n if use_per_core_batch_size:\n batch_size //= distribution.num_replicas_in_sync\n return batch_size\n\n\ndef get_data_size(data):\n \"\"\"Gets the size of data in list, tuple, dict, or a numpy array.\"\"\"\n assert isinstance(data, (np.ndarray, list, dict, tuple))\n\n if isinstance(data, np.ndarray):\n return len(data)\n\n if isinstance(data, (list, tuple)):\n return len(data[0])\n\n return len(six.next(six.itervalues(data)))\n\n\ndef get_shapes(data):\n shapes = None\n if all(hasattr(x, 'shape') for x in nest.flatten(data)):\n shapes = nest.map_structure(lambda x: x.shape, data)\n return shapes\n\n\ndef get_correctness_test_inputs(use_numpy, use_validation_data,\n with_distribution, x_train, y_train, x_eval,\n y_eval, x_predict, training_epochs):\n \"\"\"Generates the inputs for correctness check when enable Keras with DS.\"\"\"\n global_batch_size = _GLOBAL_BATCH_SIZE\n batch_size = get_batch_size(global_batch_size, with_distribution)\n\n if use_numpy:\n training_inputs = {\n 'batch_size': batch_size,\n 'x': x_train,\n 'y': y_train,\n 'epochs': training_epochs,\n 'shuffle': False,\n }\n\n if use_validation_data:\n eval_inputs = None\n training_inputs['validation_data'] = (x_eval, y_eval)\n else:\n eval_inputs = {\n 'batch_size': batch_size,\n 'x': x_eval,\n 'y': y_eval,\n }\n predict_inputs = {'x': x_predict}\n else:\n training_data_size = get_data_size(x_train)\n # For dataset inputs, we do not pass batch_size to\n # keras.fit/evaluate/predict. The batch size is part of the dataset.\n train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))\n x = batch_wrapper(train_dataset, batch_size, repeat=training_epochs)\n\n steps_per_epoch = int(np.ceil(1.0 * training_data_size / global_batch_size))\n training_inputs = {\n 'batch_size': None,\n 'x': x,\n 'y': None,\n 'epochs': training_epochs,\n 'shuffle': False,\n 'steps_per_epoch': steps_per_epoch\n }\n if use_validation_data:\n eval_inputs = None # Remove the eval_inputs\n eval_dataset = dataset_ops.Dataset.from_tensor_slices((x_eval, y_eval))\n x = batch_wrapper(eval_dataset, batch_size)\n training_inputs['validation_data'] = x\n training_inputs['validation_steps'] = 5\n else:\n eval_dataset = dataset_ops.Dataset.from_tensor_slices((x_eval, y_eval))\n x = batch_wrapper(eval_dataset, batch_size)\n eval_steps = int(np.ceil(1.0 * get_data_size(x_eval) / global_batch_size))\n eval_inputs = {\n 'batch_size': None,\n 'x': x,\n 'y': None,\n 'steps': eval_steps,\n }\n\n predict_batch_size = get_batch_size(\n get_data_size(x_predict), with_distribution)\n predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)\n predict_dataset = batch_wrapper(predict_dataset, predict_batch_size)\n predict_inputs = {\n 'steps': 1,\n 'x': predict_dataset,\n }\n\n return training_inputs, eval_inputs, predict_inputs\n\n\ndef fit_eval_and_predict(initial_weights,\n input_fn,\n model_fn,\n experimental_run_tf_function=None,\n distribution=None,\n is_stateful_model=False):\n \"\"\"Generates results for fit/predict/evaluate for given model.\"\"\"\n training_inputs, eval_inputs, predict_inputs = input_fn()\n model = model_fn(\n experimental_run_tf_function=experimental_run_tf_function,\n initial_weights=initial_weights,\n distribution=distribution,\n input_shapes=get_shapes(training_inputs['x']))\n\n result = {}\n result['training_history_1'] = model.fit(**training_inputs).history\n\n if eval_inputs is not None:\n result['eval_result_1'] = model.evaluate(**eval_inputs)\n\n result['weights_1'] = model.get_weights()\n\n if predict_inputs is not None:\n # Check correctness of the result of predict() invoked\n # multiple times -- as for stateful models, result of\n # predict may differ for each batch.\n predict_length = 1\n if is_stateful_model:\n predict_length = 3\n for i in range(predict_length):\n result_key = 'predict_result_{}'.format(i)\n result[result_key] = model.predict(**predict_inputs)\n\n # Train and eval again to mimic user's flow.\n\n result['training_history_2'] = model.fit(**training_inputs).history\n\n if eval_inputs is not None:\n result['eval_result_2'] = model.evaluate(**eval_inputs)\n\n result['weights_2'] = model.get_weights()\n\n return result\n\n\ndef compare_results(results_with_ds,\n results_without_ds,\n distribution,\n testcase,\n partial_last_batch=None):\n \"\"\"Compares results of model compiled with/without distribution strategy.\"\"\"\n if policy.global_policy().compute_dtype in ('float16', 'bfloat16'):\n default_tolerance = 1e-2\n relaxed_tolerance = 1e-2\n elif partial_last_batch == 'train_and_eval':\n # We relax the tolerence a lot in the partial last batch case as\n # 1. the examples in uneven batches may have different weights when\n # applying the gradients in the distributed case.\n # 2. TF Keras and TF Keras DS have different ways to handle the case when\n # training with epochs > 1 with numpy inputs. In TF Keras, every epoch\n # may have a partial batch. While in TF Keras DS, as we convert\n # numpy inputs into dataset, it will do a repeat() first and calculate\n # steps_per_epoch, so it will at most have one partial batch. This\n # makes the 1-CPU result even different.\n default_tolerance = 1e-3\n relaxed_tolerance = 1e-3\n else:\n default_tolerance = 1e-5\n relaxed_tolerance = 1e-4\n\n def _get_compare_result_tolerance(key):\n \"\"\"Returns tolerance to compare results.\"\"\"\n # TODO(b/119257215): For MirroredStrategy, weights are not exactly the same,\n # so use larger tolerance for now. Predict should be related to weights.\n if (isinstance(distribution,\n (mirrored_strategy.MirroredStrategy,\n distribute_lib._DefaultDistributionStrategy)) and # pylint: disable=protected-access\n key.startswith(('weights_1', 'weights_2', 'predict_result'))):\n return relaxed_tolerance\n\n return default_tolerance\n\n for key in sorted(results_with_ds.keys()):\n if (key.startswith('training_history') and\n isinstance(distribution,\n (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)) and\n distribution.extended.steps_per_run > 1):\n # TODO(b/119894254): Enable this test for all cases once the\n # underlying bug is fixed.\n continue\n\n tolerance = _get_compare_result_tolerance(key)\n\n # We don't compare the loss as loss is currently not computed as metric\n # in Keras, the loss value is inaccurate for last partial batch due to\n # more weights for the last batch samples.\n if partial_last_batch is not None:\n if key.startswith('eval_result'):\n results_with_ds[key] = results_with_ds[key][1:]\n results_without_ds[key] = results_without_ds[key][1:]\n if key.startswith('training_history'):\n results_with_ds[key]['val_loss'] = 0\n results_without_ds[key]['val_loss'] = 0\n\n testcase.assertAllClose(\n results_with_ds[key],\n results_without_ds[key],\n atol=tolerance,\n rtol=tolerance,\n msg='Fail to assert {}.'.format(key))\n\n\ndef should_skip_tpu_with_eager(distribution):\n return (context.executing_eagerly() and\n isinstance(distribution,\n (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)))\n\n\nclass LearningRateBatchScheduler(keras.callbacks.Callback):\n \"\"\"Scheduler that dynamically sets the learning rate of model.\"\"\"\n\n def __init__(self, update_freq=None):\n self._update_freq = update_freq\n\n def on_batch_begin(self, batch, logs=None):\n if self._update_freq and batch % self._update_freq != 0:\n return\n\n # To avoid divergence, limit the value range.\n lr = 0.001 * (batch % 10)\n keras.backend.set_value(self.model.optimizer.lr, lr)\n\n\nclass TestDistributionStrategyCorrectnessBase(test.TestCase,\n parameterized.TestCase):\n \"\"\"Model agnostic testing infra to test correctness of Keras models.\"\"\"\n\n def set_up_test_config(self,\n use_numpy=False,\n use_validation_data=False,\n with_batch_norm=False):\n self.use_numpy = use_numpy\n self.use_validation_data = use_validation_data\n self.with_batch_norm = with_batch_norm\n\n keras.backend.set_image_data_format('channels_last')\n np.random.seed(_RANDOM_SEED)\n random_seed.set_random_seed(_RANDOM_SEED)\n\n def get_data(self):\n num_samples = 10000\n x_train = np.random.randint(0, 2, num_samples)\n x_train = np.reshape(x_train, (num_samples, 1))\n y_train = x_train\n return (x_train.astype('float32'), y_train.astype('float32'), None)\n\n def get_data_with_partial_last_batch(self):\n raise NotImplementedError\n\n def get_data_with_partial_last_batch_eval(self):\n raise NotImplementedError\n\n def get_input_for_correctness_test(self, **kwargs):\n \"\"\"Generates inputs that are dictionaries.\n\n We only provide a default implementation of this method here. If you need\n more customized way of providing input to your model, overwrite this method.\n\n Arguments:\n **kwargs: key word arguments about how to create the input dictionaries\n\n Returns:\n Three dictionaries representing the input for fit(), evalutate() and\n predict()\n \"\"\"\n\n return get_correctness_test_inputs(**kwargs)\n\n def get_model(self,\n distribution=None,\n experimental_run_tf_function=None,\n input_shapes=None):\n raise NotImplementedError\n\n def run_correctness_test(self,\n distribution,\n use_numpy,\n use_validation_data,\n experimental_run_tf_function=None,\n with_batch_norm=False,\n is_stateful_model=False,\n partial_last_batch=None,\n training_epochs=2):\n with self.cached_session():\n self.set_up_test_config(use_numpy, use_validation_data, with_batch_norm)\n\n if partial_last_batch == 'eval':\n x_train, y_train, x_eval, y_eval, x_predict = (\n self.get_data_with_partial_last_batch_eval())\n elif partial_last_batch == 'train_and_eval':\n x_train, y_train, x_eval, y_eval, x_predict = (\n self.get_data_with_partial_last_batch())\n else:\n x_train, y_train, x_predict = self.get_data()\n x_eval = x_train\n y_eval = y_train\n\n # The model is built once and the initial weights are saved.\n # This is used to initialize the model for both the distribution and\n # non-distribution run.\n model = self.get_model(\n experimental_run_tf_function=experimental_run_tf_function,\n input_shapes=get_shapes(x_train))\n initial_weights = model.get_weights()\n\n ds_input_fn = functools.partial(\n self.get_input_for_correctness_test,\n use_numpy=use_numpy,\n use_validation_data=use_validation_data,\n with_distribution=distribution,\n x_train=x_train,\n y_train=y_train,\n x_eval=x_eval,\n y_eval=y_eval,\n x_predict=x_predict,\n training_epochs=training_epochs)\n\n nods_input_fn = functools.partial(\n self.get_input_for_correctness_test,\n use_numpy=use_numpy,\n use_validation_data=use_validation_data,\n with_distribution=None,\n x_train=x_train,\n y_train=y_train,\n x_eval=x_eval,\n y_eval=y_eval,\n x_predict=x_predict,\n training_epochs=training_epochs)\n\n results_with_ds = fit_eval_and_predict(\n initial_weights,\n input_fn=ds_input_fn,\n model_fn=self.get_model,\n experimental_run_tf_function=experimental_run_tf_function,\n distribution=distribution,\n is_stateful_model=is_stateful_model)\n results_without_ds = fit_eval_and_predict(\n initial_weights,\n input_fn=nods_input_fn,\n model_fn=self.get_model,\n experimental_run_tf_function=experimental_run_tf_function,\n distribution=None,\n is_stateful_model=is_stateful_model)\n\n # First, special case, for multi-replica distributed training, batch\n # norm is not aggregated globally. So it is expected to have different\n # weights.\n if (self.with_batch_norm and distribution.num_replicas_in_sync > 1):\n with self.assertRaises(AssertionError):\n compare_results(\n results_with_ds,\n results_without_ds,\n distribution,\n testcase=self,\n partial_last_batch=partial_last_batch)\n else:\n compare_results(\n results_with_ds,\n results_without_ds,\n distribution,\n testcase=self,\n partial_last_batch=partial_last_batch)\n\n def get_input_for_dynamic_lr_test(self, **kwargs):\n \"\"\"Generates inputs that are dictionaries.\n\n We only provide a default implementation of this method here. If you need\n more customized way of providing input to your model, overwrite this method.\n\n Arguments:\n **kwargs: key word arguments about how to create the input dictionaries\n\n Returns:\n Three dictionaries representing the input for fit(), evalutate() and\n predict()\n \"\"\"\n\n training_input = kwargs\n return training_input, None, None\n\n def run_dynamic_lr_test(self,\n distribution,\n experimental_run_tf_function=None):\n with self.cached_session():\n self.set_up_test_config()\n\n x_train, y_train, _ = self.get_data()\n model = self.get_model(\n experimental_run_tf_function=experimental_run_tf_function,\n input_shapes=get_shapes(x_train))\n initial_weights = model.get_weights()\n update_freq = None\n\n if (isinstance(distribution, tpu_strategy.TPUStrategyV1) and\n distribution.extended.steps_per_run > 1):\n # For TPUStrategy with steps_per_run > 1, the callback is not invoked\n # every step. So, to compare the CPU/TPU, we let the CPU to behave the\n # same as TPU.\n update_freq = distribution.extended.steps_per_run\n\n training_epochs = 2\n global_batch_size = 64\n\n ds_batch_size = get_batch_size(global_batch_size, distribution)\n nods_batch_size = get_batch_size(global_batch_size, None)\n\n ds_input_fn = functools.partial(\n self.get_input_for_dynamic_lr_test,\n x=x_train,\n y=y_train,\n batch_size=ds_batch_size,\n shuffle=False,\n epochs=training_epochs,\n callbacks=[LearningRateBatchScheduler(update_freq)],\n validation_data=(x_train, y_train))\n\n nods_input_fn = functools.partial(\n self.get_input_for_dynamic_lr_test,\n x=x_train,\n y=y_train,\n batch_size=nods_batch_size,\n shuffle=False,\n epochs=training_epochs,\n callbacks=[LearningRateBatchScheduler(update_freq)],\n validation_data=(x_train, y_train))\n\n results_with_ds = fit_eval_and_predict(\n initial_weights,\n input_fn=ds_input_fn,\n model_fn=self.get_model,\n experimental_run_tf_function=experimental_run_tf_function,\n distribution=distribution)\n results_without_ds = fit_eval_and_predict(\n initial_weights,\n input_fn=nods_input_fn,\n model_fn=self.get_model,\n experimental_run_tf_function=experimental_run_tf_function,\n distribution=None)\n compare_results(\n results_with_ds, results_without_ds, distribution, testcase=self)\n\n\nclass TestDistributionStrategyEmbeddingModelCorrectnessBase(\n TestDistributionStrategyCorrectnessBase):\n \"\"\"Base class to test correctness of Keras models with embedding layers.\"\"\"\n\n def get_data(self,\n count=(_GLOBAL_BATCH_SIZE * _EVAL_STEPS),\n min_words=5,\n max_words=10,\n max_word_id=19,\n num_classes=2):\n distribution = []\n for _ in range(num_classes):\n dist = np.abs(np.random.randn(max_word_id))\n dist /= np.sum(dist)\n distribution.append(dist)\n\n features = []\n labels = []\n for _ in range(count):\n label = np.random.randint(0, num_classes, size=1)[0]\n num_words = np.random.randint(min_words, max_words, size=1)[0]\n word_ids = np.random.choice(\n max_word_id, size=num_words, replace=True, p=distribution[label])\n word_ids = word_ids\n labels.append(label)\n features.append(word_ids)\n\n features = sequence.pad_sequences(\n features, maxlen=max_words)\n x_train = np.asarray(features, dtype=np.float32)\n y_train = np.asarray(labels, dtype=np.int32).reshape((count, 1))\n x_predict = x_train[:_GLOBAL_BATCH_SIZE]\n return x_train, y_train, x_predict\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for XLA matrix diag ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.compiler.tests import xla_test\nfrom tensorflow.python.compat import compat\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import googletest\n\n\n# LINT.IfChange\nmatrix_diag_v3_forward_compat_date = (2019, 12, 6)\n# LINT.ThenChange(\n# //tensorflow/python/kernel_tests/diag_op_test.py,\n# //tensorflow/python/ops/array_ops.py,\n# //tensorflow/python/ops/parallel_for/array_test.py\n# )\n\ndefault_v2_alignment = \"LEFT_LEFT\"\nalignment_list = [\"RIGHT_LEFT\", \"LEFT_RIGHT\"]\n\n\ndef zip_to_first_list_length(a, b):\n if len(b) > len(a):\n return zip(a, b[:len(a)])\n return zip(a, b + [None] * (len(a) - len(b)))\n\n\n# Routines to convert test cases to have diagonals in a specified alignment.\n# Copied from //third_party/tensorflow/python/kernel_tests/diag_op_test.py\ndef repack_diagonals(packed_diagonals,\n diag_index,\n num_rows,\n num_cols,\n align=None):\n # The original test cases are LEFT_LEFT aligned.\n if align == default_v2_alignment or align is None:\n return packed_diagonals\n\n align = align.split(\"_\")\n d_lower, d_upper = diag_index\n batch_dims = packed_diagonals.ndim - (2 if d_lower < d_upper else 1)\n max_diag_len = packed_diagonals.shape[-1]\n index = (slice(None),) * batch_dims\n repacked_diagonals = np.zeros_like(packed_diagonals)\n\n # Aligns each diagonal row-by-row.\n for diag_index in range(d_lower, d_upper + 1):\n diag_len = min(num_rows + min(0, diag_index), num_cols - max(0, diag_index))\n row_index = d_upper - diag_index\n padding_len = max_diag_len - diag_len\n left_align = (diag_index >= 0 and\n align[0] == \"LEFT\") or (diag_index <= 0 and\n align[1] == \"LEFT\")\n # Prepares index tuples.\n extra_dim = tuple() if d_lower == d_upper else (row_index,)\n packed_last_dim = (slice(None),) if left_align else (slice(0, diag_len, 1),)\n repacked_last_dim = (slice(None),) if left_align else (slice(\n padding_len, max_diag_len, 1),)\n packed_index = index + extra_dim + packed_last_dim\n repacked_index = index + extra_dim + repacked_last_dim\n\n # Repacks the diagonal.\n repacked_diagonals[repacked_index] = packed_diagonals[packed_index]\n return repacked_diagonals\n\n\ndef repack_diagonals_in_tests(tests, align=None):\n # The original test cases are LEFT_LEFT aligned.\n if align == default_v2_alignment or align is None:\n return tests\n\n new_tests = dict()\n # Loops through each case.\n for diag_index, (packed_diagonals, padded_diagonals) in tests.items():\n num_rows, num_cols = padded_diagonals.shape[-2:]\n repacked_diagonals = repack_diagonals(\n packed_diagonals, diag_index, num_rows, num_cols, align=align)\n new_tests[diag_index] = (repacked_diagonals, padded_diagonals)\n\n return new_tests\n\n\n# Test cases shared by MatrixDiagV2, MatrixDiagPartV2, and MatrixSetDiagV2.\n# Copied from //third_party/tensorflow/python/kernel_tests/diag_op_test.py\ndef square_cases(align=None):\n # pyformat: disable\n mat = np.array([[[1, 2, 3, 4, 5],\n [6, 7, 8, 9, 1],\n [3, 4, 5, 6, 7],\n [8, 9, 1, 2, 3],\n [4, 5, 6, 7, 8]],\n [[9, 1, 2, 3, 4],\n [5, 6, 7, 8, 9],\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 1],\n [2, 3, 4, 5, 6]]])\n tests = dict()\n # tests[d_lower, d_upper] = (compact_diagonals, padded_diagonals)\n tests[-1, -1] = (np.array([[6, 4, 1, 7],\n [5, 2, 8, 5]]),\n np.array([[[0, 0, 0, 0, 0],\n [6, 0, 0, 0, 0],\n [0, 4, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 7, 0]],\n [[0, 0, 0, 0, 0],\n [5, 0, 0, 0, 0],\n [0, 2, 0, 0, 0],\n [0, 0, 8, 0, 0],\n [0, 0, 0, 5, 0]]]))\n tests[-4, -3] = (np.array([[[8, 5],\n [4, 0]],\n [[6, 3],\n [2, 0]]]),\n np.array([[[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [8, 0, 0, 0, 0],\n [4, 5, 0, 0, 0]],\n [[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [6, 0, 0, 0, 0],\n [2, 3, 0, 0, 0]]]))\n tests[-2, 1] = (np.array([[[2, 8, 6, 3, 0],\n [1, 7, 5, 2, 8],\n [6, 4, 1, 7, 0],\n [3, 9, 6, 0, 0]],\n [[1, 7, 4, 1, 0],\n [9, 6, 3, 9, 6],\n [5, 2, 8, 5, 0],\n [1, 7, 4, 0, 0]]]),\n np.array([[[1, 2, 0, 0, 0],\n [6, 7, 8, 0, 0],\n [3, 4, 5, 6, 0],\n [0, 9, 1, 2, 3],\n [0, 0, 6, 7, 8]],\n [[9, 1, 0, 0, 0],\n [5, 6, 7, 0, 0],\n [1, 2, 3, 4, 0],\n [0, 7, 8, 9, 1],\n [0, 0, 4, 5, 6]]]))\n tests[2, 4] = (np.array([[[5, 0, 0],\n [4, 1, 0],\n [3, 9, 7]],\n [[4, 0, 0],\n [3, 9, 0],\n [2, 8, 5]]]),\n np.array([[[0, 0, 3, 4, 5],\n [0, 0, 0, 9, 1],\n [0, 0, 0, 0, 7],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]],\n [[0, 0, 2, 3, 4],\n [0, 0, 0, 8, 9],\n [0, 0, 0, 0, 5],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]]]))\n # pyformat: enable\n return (mat, repack_diagonals_in_tests(tests, align))\n\n\ndef tall_cases(align=None):\n # pyformat: disable\n mat = np.array([[[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n [9, 8, 7],\n [6, 5, 4]],\n [[3, 2, 1],\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n [9, 8, 7]]])\n tests = dict()\n # tests[d_lower, d_upper] = (compact_diagonals, padded_diagonals)\n tests[0, 0] = (np.array([[1, 5, 9],\n [3, 2, 6]]),\n np.array([[[1, 0, 0],\n [0, 5, 0],\n [0, 0, 9],\n [0, 0, 0]],\n [[3, 0, 0],\n [0, 2, 0],\n [0, 0, 6],\n [0, 0, 0]]]))\n tests[-4, -3] = (np.array([[[9, 5],\n [6, 0]],\n [[7, 8],\n [9, 0]]]),\n np.array([[[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0],\n [9, 0, 0],\n [6, 5, 0]],\n [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0],\n [7, 0, 0],\n [9, 8, 0]]]))\n tests[-2, -1] = (np.array([[[4, 8, 7],\n [7, 8, 4]],\n [[1, 5, 9],\n [4, 8, 7]]]),\n np.array([[[0, 0, 0],\n [4, 0, 0],\n [7, 8, 0],\n [0, 8, 7],\n [0, 0, 4]],\n [[0, 0, 0],\n [1, 0, 0],\n [4, 5, 0],\n [0, 8, 9],\n [0, 0, 7]]]))\n tests[-2, 1] = (np.array([[[2, 6, 0],\n [1, 5, 9],\n [4, 8, 7],\n [7, 8, 4]],\n [[2, 3, 0],\n [3, 2, 6],\n [1, 5, 9],\n [4, 8, 7]]]),\n np.array([[[1, 2, 0],\n [4, 5, 6],\n [7, 8, 9],\n [0, 8, 7],\n [0, 0, 4]],\n [[3, 2, 0],\n [1, 2, 3],\n [4, 5, 6],\n [0, 8, 9],\n [0, 0, 7]]]))\n tests[1, 2] = (np.array([[[3, 0],\n [2, 6]],\n [[1, 0],\n [2, 3]]]),\n np.array([[[0, 2, 3],\n [0, 0, 6],\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]],\n [[0, 2, 1],\n [0, 0, 3],\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]]))\n # pyformat: enable\n return (mat, repack_diagonals_in_tests(tests, align))\n\n\ndef fat_cases(align=None):\n # pyformat: disable\n mat = np.array([[[1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 1, 2, 3]],\n [[4, 5, 6, 7],\n [8, 9, 1, 2],\n [3, 4, 5, 6]]])\n tests = dict()\n # tests[d_lower, d_upper] = (compact_diagonals, padded_diagonals)\n tests[0, 0] = (np.array([[1, 6, 2],\n [4, 9, 5]]),\n np.array([[[1, 0, 0, 0],\n [0, 6, 0, 0],\n [0, 0, 2, 0]],\n [[4, 0, 0, 0],\n [0, 9, 0, 0],\n [0, 0, 5, 0]]]))\n tests[2, 2] = (np.array([[3, 8],\n [6, 2]]),\n np.array([[[0, 0, 3, 0],\n [0, 0, 0, 8],\n [0, 0, 0, 0]],\n [[0, 0, 6, 0],\n [0, 0, 0, 2],\n [0, 0, 0, 0]]]))\n tests[-2, 0] = (np.array([[[1, 6, 2],\n [5, 1, 0],\n [9, 0, 0]],\n [[4, 9, 5],\n [8, 4, 0],\n [3, 0, 0]]]),\n np.array([[[1, 0, 0, 0],\n [5, 6, 0, 0],\n [9, 1, 2, 0]],\n [[4, 0, 0, 0],\n [8, 9, 0, 0],\n [3, 4, 5, 0]]]))\n tests[-1, 1] = (np.array([[[2, 7, 3],\n [1, 6, 2],\n [5, 1, 0]],\n [[5, 1, 6],\n [4, 9, 5],\n [8, 4, 0]]]),\n np.array([[[1, 2, 0, 0],\n [5, 6, 7, 0],\n [0, 1, 2, 3]],\n [[4, 5, 0, 0],\n [8, 9, 1, 0],\n [0, 4, 5, 6]]]))\n tests[0, 3] = (np.array([[[4, 0, 0],\n [3, 8, 0],\n [2, 7, 3],\n [1, 6, 2]],\n [[7, 0, 0],\n [6, 2, 0],\n [5, 1, 6],\n [4, 9, 5]]]),\n np.array([[[1, 2, 3, 4],\n [0, 6, 7, 8],\n [0, 0, 2, 3]],\n [[4, 5, 6, 7],\n [0, 9, 1, 2],\n [0, 0, 5, 6]]]))\n # pyformat: enable\n return (mat, repack_diagonals_in_tests(tests, align))\n\n\ndef all_tests(align=None):\n return [square_cases(align), tall_cases(align), fat_cases(align)]\n\n\nclass MatrixDiagTest(xla_test.XLATestCase):\n\n def _assertOpOutputMatchesExpected(self,\n params,\n solution,\n rtol=1e-3,\n atol=1e-5):\n \"\"\"Verifies that matrix_diag produces `solution` when fed `params`.\n\n Args:\n params: dictionary containing input parameters to matrix_diag.\n solution: numpy array representing the expected output of matrix_diag.\n rtol: relative tolerance for equality test.\n atol: absolute tolerance for equality test.\n \"\"\"\n diagonal = params[\"diagonal\"]\n with self.session() as session:\n for dtype in self.numeric_types - {np.int8, np.uint8}:\n expected = solution.astype(dtype)\n with self.test_scope():\n params[\"diagonal\"] = array_ops.placeholder(\n dtype, diagonal.shape, name=\"diagonal\")\n output = array_ops.matrix_diag(**params)\n result = session.run(output,\n {params[\"diagonal\"]: diagonal.astype(dtype)})\n self.assertEqual(output.dtype, expected.dtype)\n self.assertAllCloseAccordingToType(\n expected, result, rtol=rtol, atol=atol, bfloat16_rtol=0.03)\n\n # Generic tests applicable to both v1 and v2 ops.\n # Originally from unary_ops_tests.py.\n def testV1(self):\n # pyformat: disable\n vecs1 = np.array([[1, 2],\n [3, 4]])\n solution1 = np.array([[[1, 0], [0, 2]],\n [[3, 0], [0, 4]]])\n vecs2 = np.array([1, 2, 3, 4])\n solution2 = np.array([[1, 0, 0, 0],\n [0, 2, 0, 0],\n [0, 0, 3, 0],\n [0, 0, 0, 4]])\n vecs3 = np.array([[[1, 2, 3],\n [4, 5, 6]],\n [[7, 8, 9], # pylint: disable=bad-whitespace\n [10, 11, 12]]])\n solution3 = np.array([[[[1, 0, 0],\n [0, 2, 0],\n [0, 0, 3]],\n [[4, 0, 0],\n [0, 5, 0],\n [0, 0, 6]]],\n [[[7, 0, 0],\n [0, 8, 0],\n [0, 0, 9]],\n [[10, 0, 0],\n [0, 11, 0],\n [0, 0, 12]]]])\n # pyformat: enable\n self._assertOpOutputMatchesExpected({\"diagonal\": vecs1}, solution1)\n self._assertOpOutputMatchesExpected({\"diagonal\": vecs2}, solution2)\n self._assertOpOutputMatchesExpected({\"diagonal\": vecs3}, solution3)\n\n # From here onwards are v2-only tests.\n def testSquare(self):\n if compat.forward_compatible(*matrix_diag_v3_forward_compat_date):\n for align in alignment_list:\n for _, tests in [square_cases(align)]:\n for diag_index, (vecs, solution) in tests.items():\n params = {\"diagonal\": vecs[0], \"k\": diag_index, \"align\": align}\n self._assertOpOutputMatchesExpected(params, solution[0])\n\n def testSquareBatch(self):\n if compat.forward_compatible(*matrix_diag_v3_forward_compat_date):\n for align in alignment_list:\n for _, tests in [square_cases(align)]:\n for diag_index, (vecs, solution) in tests.items():\n params = {\"diagonal\": vecs, \"k\": diag_index, \"align\": align}\n self._assertOpOutputMatchesExpected(params, solution)\n\n def testRectangularBatch(self):\n if not compat.forward_compatible(*matrix_diag_v3_forward_compat_date):\n return\n\n # Stores expected num_rows and num_cols (when the other is given).\n # expected[(d_lower, d_upper)] = (expected_num_rows, expected_num_cols)\n test_list = list()\n\n # Do not align the test cases here. Re-alignment needs to happen after the\n # solution shape is updated.\n # Square cases:\n expected = {\n (-1, -1): (5, 4),\n (-4, -3): (5, 2),\n (-2, 1): (5, 5),\n (2, 4): (3, 5),\n }\n test_list.append((expected, square_cases()))\n\n # Tall cases\n expected = {\n (0, 0): (3, 3),\n (-4, -3): (5, 2),\n (-2, -1): (4, 3),\n (-2, 1): (3, 3),\n (1, 2): (2, 3)\n }\n test_list.append((expected, tall_cases()))\n\n # Fat cases\n expected = {\n (2, 2): (2, 4),\n (-2, 0): (3, 3),\n (-1, 1): (3, 3),\n (0, 3): (3, 3)\n }\n test_list.append((expected, fat_cases()))\n\n # Giving both num_rows and num_cols\n align = alignment_list[0]\n for _, tests in [tall_cases(align), fat_cases(align)]:\n for diag_index, (vecs, solution) in tests.items():\n self._assertOpOutputMatchesExpected(\n {\n \"diagonal\": vecs,\n \"k\": diag_index,\n \"num_rows\": solution.shape[-2],\n \"num_cols\": solution.shape[-1],\n \"align\": align\n }, solution)\n\n # We go through each alignment in a round-robin manner.\n align_index = 0\n\n # Giving just num_rows or num_cols.\n for expected, (_, tests) in test_list:\n for diag_index, (new_num_rows, new_num_cols) in expected.items():\n align = alignment_list[align_index]\n align_index = (align_index + 1) % len(alignment_list)\n vecs, solution = tests[diag_index]\n solution_given_num_rows = solution.take(\n indices=range(new_num_cols), axis=-1)\n # Repacks the diagonal input according to the new solution shape.\n vecs_given_num_rows = repack_diagonals(\n vecs,\n diag_index,\n solution_given_num_rows.shape[-2],\n new_num_cols,\n align=align)\n self._assertOpOutputMatchesExpected(\n {\n \"diagonal\": vecs_given_num_rows,\n \"k\": diag_index,\n \"num_rows\": solution_given_num_rows.shape[-2],\n \"align\": align\n }, solution_given_num_rows)\n solution_given_num_cols = solution.take(\n indices=range(new_num_rows), axis=-2)\n # Repacks the diagonal input according to the new solution shape.\n vecs_given_num_cols = repack_diagonals(\n vecs,\n diag_index,\n new_num_rows,\n solution_given_num_cols.shape[-1],\n align=align)\n self._assertOpOutputMatchesExpected(\n {\n \"diagonal\": vecs_given_num_cols,\n \"k\": diag_index,\n \"num_cols\": solution_given_num_cols.shape[-1],\n \"align\": align\n }, solution_given_num_cols)\n\n def testPadding(self):\n if compat.forward_compatible(*matrix_diag_v3_forward_compat_date):\n for padding_value, align in zip_to_first_list_length([555, -11],\n alignment_list):\n for _, tests in all_tests(align):\n for diag_index, (vecs, solution) in tests.items():\n mask = (solution == 0)\n solution = solution + (mask * padding_value)\n self._assertOpOutputMatchesExpected(\n {\n \"diagonal\": vecs,\n \"k\": diag_index,\n \"num_rows\": solution.shape[-2],\n \"num_cols\": solution.shape[-1],\n \"padding_value\": padding_value,\n \"align\": align\n }, solution)\n\n\nclass MatrixSetDiagTest(xla_test.XLATestCase):\n\n def _assertOpOutputMatchesExpected(self,\n params,\n solution,\n rtol=1e-3,\n atol=1e-5):\n \"\"\"Verifies that matrix_set_diag produces `solution` when fed `params`.\n\n Args:\n params: dictionary containing input parameters to matrix_set_diag.\n solution: numpy array representing the expected output of matrix_set_diag.\n rtol: relative tolerance for equality test.\n atol: absolute tolerance for equality test.\n \"\"\"\n input = params[\"input\"] # pylint: disable=redefined-builtin\n diagonal = params[\"diagonal\"]\n with self.session() as session:\n for dtype in self.numeric_types - {np.int8, np.uint8}:\n expected = solution.astype(dtype)\n with self.test_scope():\n params[\"input\"] = array_ops.placeholder(\n dtype, input.shape, name=\"input\")\n params[\"diagonal\"] = array_ops.placeholder(\n dtype, diagonal.shape, name=\"diagonal\")\n output = array_ops.matrix_set_diag(**params)\n result = session.run(\n output, {\n params[\"input\"]: input.astype(dtype),\n params[\"diagonal\"]: diagonal.astype(dtype)\n })\n self.assertEqual(output.dtype, expected.dtype)\n self.assertAllCloseAccordingToType(\n expected, result, rtol=rtol, atol=atol, bfloat16_rtol=0.03)\n\n # Generic tests applicable to both v1 and v2 ops.\n # Originally from binary_ops_tests.py.\n def testV1(self):\n test_cases = list()\n\n # pyformat: disable\n # pylint: disable=bad-whitespace\n # Square cases.\n input = np.array([[0, 1, 0], # pylint: disable=redefined-builtin\n [1, 0, 1],\n [1, 1, 1]])\n diag = np.array([1, 2, 3])\n solution = np.array([[1, 1, 0],\n [1, 2, 1],\n [1, 1, 3]])\n test_cases.append(({\"input\": input, \"diagonal\": diag}, solution))\n\n input = np.array([[[1, 0, 3],\n [0, 2, 0],\n [1, 0, 3]],\n [[4, 0, 4],\n [0, 5, 0],\n [2, 0, 6]]])\n diag = np.array([[-1, 0, -3],\n [-4, -5, -6]])\n solution = np.array([[[-1, 0, 3],\n [ 0, 0, 0],\n [ 1, 0, -3]],\n [[-4, 0, 4],\n [ 0, -5, 0],\n [ 2, 0, -6]]])\n test_cases.append(({\"input\": input, \"diagonal\": diag}, solution))\n\n # Rectangular cases.\n input = np.array([[0, 1, 0],\n [1, 0, 1]])\n diag = np.array([3, 4])\n solution = np.array([[3, 1, 0],\n [1, 4, 1]])\n test_cases.append(({\"input\": input, \"diagonal\": diag}, solution))\n\n input = np.array([[0, 1],\n [1, 0],\n [1, 1]])\n diag = np.array([3, 4])\n solution = np.array([[3, 1],\n [1, 4],\n [1, 1]])\n test_cases.append(({\"input\": input, \"diagonal\": diag}, solution))\n\n input = np.array([[[1, 0, 3],\n [0, 2, 0]],\n [[4, 0, 4],\n [0, 5, 0]]])\n diag = np.array([[-1, -2], [-4, -5]])\n solution = np.array([[[-1, 0, 3],\n [ 0, -2, 0]],\n [[-4, 0, 4],\n [ 0, -5, 0]]])\n test_cases.append(({\"input\": input, \"diagonal\": diag}, solution))\n # pylint: enable=bad-whitespace\n # pyformat: enable\n\n for test in test_cases:\n self._assertOpOutputMatchesExpected(test[0], test[1])\n\n # From here onwards are v2-only tests.\n def testSingleMatrix(self):\n if compat.forward_compatible(*matrix_diag_v3_forward_compat_date):\n for align in alignment_list:\n for _, tests in all_tests(align):\n for diag_index, (vecs, banded_mat) in tests.items():\n mask = (banded_mat[0] == 0)\n input_mat = np.random.randint(10, size=mask.shape)\n solution = input_mat * mask + banded_mat[0]\n self._assertOpOutputMatchesExpected(\n {\n \"input\": input_mat,\n \"diagonal\": vecs[0],\n \"k\": diag_index,\n \"align\": align\n }, solution)\n\n def testBatch(self):\n if compat.forward_compatible(*matrix_diag_v3_forward_compat_date):\n for align in alignment_list:\n for _, tests in all_tests(align):\n for diag_index, (vecs, banded_mat) in tests.items():\n mask = (banded_mat == 0)\n input_mat = np.random.randint(10, size=mask.shape)\n solution = input_mat * mask + banded_mat\n self._assertOpOutputMatchesExpected(\n {\n \"input\": input_mat,\n \"diagonal\": vecs,\n \"k\": diag_index,\n \"align\": align\n }, solution)\n\n\nclass MatrixDiagPartTest(xla_test.XLATestCase):\n\n def _assertOpOutputMatchesExpected(self,\n params,\n solution,\n rtol=1e-3,\n atol=1e-5):\n \"\"\"Verifies that matrix_diag_part produces `solution` when fed `params`.\n\n Args:\n params: dictionary containing input parameters to matrix_diag_part.\n solution: numpy array representing the expected output.\n rtol: relative tolerance for equality test.\n atol: absolute tolerance for equality test.\n \"\"\"\n input = params[\"input\"] # pylint: disable=redefined-builtin\n with self.session() as session:\n for dtype in self.numeric_types - {np.int8, np.uint8}:\n expected = solution.astype(dtype)\n with self.test_scope():\n params[\"input\"] = array_ops.placeholder(\n dtype, input.shape, name=\"input\")\n output = array_ops.matrix_diag_part(**params)\n result = session.run(output, {\n params[\"input\"]: input.astype(dtype),\n })\n self.assertEqual(output.dtype, expected.dtype)\n self.assertAllCloseAccordingToType(\n expected, result, rtol=rtol, atol=atol, bfloat16_rtol=0.03)\n\n # Generic tests applicable to both v1 and v2 ops.\n # Originally from unary_ops_tests.py.\n def testV1(self):\n matrices = np.arange(3 * 2 * 4).reshape([3, 2, 4])\n solution = np.array([[0, 5], [8, 13], [16, 21]])\n self._assertOpOutputMatchesExpected({\"input\": matrices}, solution)\n\n # From here onwards are v2-only tests.\n def testSingleMatrix(self):\n if compat.forward_compatible(*matrix_diag_v3_forward_compat_date):\n for align in alignment_list:\n test_list = [square_cases(align), tall_cases(align), fat_cases(align)]\n for mat, tests in test_list:\n for diag_index, (solution, _) in tests.items():\n self._assertOpOutputMatchesExpected(\n {\n \"input\": mat[0],\n \"k\": diag_index,\n \"align\": align\n }, solution[0])\n\n def testBatch(self):\n if compat.forward_compatible(*matrix_diag_v3_forward_compat_date):\n for align in alignment_list:\n for mat, tests in all_tests(align):\n for diag_index, (solution, _) in tests.items():\n self._assertOpOutputMatchesExpected(\n {\n \"input\": mat,\n \"k\": diag_index,\n \"align\": align\n }, solution)\n\n def testPadding(self):\n if compat.forward_compatible(*matrix_diag_v3_forward_compat_date):\n for padding_value, align in zip_to_first_list_length([555, -11],\n alignment_list):\n for mat, tests in all_tests(align):\n for diag_index, (solution, _) in tests.items():\n mask = (solution == 0)\n solution = solution + (mask * padding_value)\n self._assertOpOutputMatchesExpected(\n {\n \"input\": mat,\n \"k\": diag_index,\n \"padding_value\": padding_value,\n \"align\": align\n }, solution)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n" ]
[ [ "tensorflow.python.keras.regularizers.l1_l2", "tensorflow.python.keras.layers.Dense", "tensorflow.python.keras.utils.np_utils.to_categorical", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.math_ops.abs", "tensorflow.python.keras.regularizers.l1", "tensorflow.python.platform.test.main", "tensorflow.python.keras.Model", "tensorflow.python.keras.testing_utils.should_run_tf_function", "tensorflow.python.keras.models.Sequential", "tensorflow.python.keras.regularizers.l2", "tensorflow.python.keras.layers.Input", "tensorflow.python.keras.testing_utils.should_run_eagerly", "tensorflow.python.keras.testing_utils.get_test_data", "tensorflow.python.keras.Input", "numpy.ones", "tensorflow.python.keras.layers.Average", "tensorflow.python.keras.models.Model", "tensorflow.python.keras.backend.mean", "tensorflow.python.ops.math_ops.reduce_sum" ], [ "tensorflow.python.ops.math_ops.imag", "tensorflow.python.ops.gradient_checker.compute_gradient", "numpy.minimum", "numpy.imag", "numpy.linspace", "tensorflow.python.ops.math_ops.ceil", "numpy.asarray", "tensorflow.python.ops.array_ops.split", "numpy.sqrt", "tensorflow.python.compat.compat.forward_compatible", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.math_ops.is_nan", "tensorflow.python.ops.math_ops.sqrt", "tensorflow.python.ops.math_ops.floor", "tensorflow.python.ops.gradients_impl.gradients", "numpy.polyval", "numpy.where", "tensorflow.python.ops.math_ops.real", "numpy.random.randint", "tensorflow.python.framework.test_util.force_cpu", "numpy.arange", "tensorflow.python.ops.array_ops.where", "tensorflow.python.ops.math_ops.is_inf", "numpy.finfo", "numpy.full", "numpy.ceil", "numpy.real", "tensorflow.python.framework.sparse_tensor.SparseTensor", "tensorflow.python.platform.test.main", "tensorflow.python.framework.test_util.device", "numpy.zeros", "tensorflow.python.ops.math_ops.polyval", "numpy.logical_not", "tensorflow.python.ops.math_ops.minimum", "numpy.isnan", "numpy.rint", "tensorflow.python.ops.math_ops.complex", "tensorflow.python.ops.math_ops.is_finite", "numpy.random.rand", "tensorflow.python.framework.ops.convert_to_tensor", "numpy.floor", "numpy.array", "tensorflow.python.ops.math_ops.rint", "tensorflow.python.ops.math_ops.conj", "numpy.maximum", "numpy.conj", "numpy.isfinite", "tensorflow.python.ops.math_ops.angle", "tensorflow.python.ops.array_ops.reshape", "numpy.shape", "numpy.broadcast_to", "numpy.prod", "tensorflow.python.framework.test_util.use_gpu", "numpy.angle", "tensorflow.python.ops.math_ops.maximum", "numpy.isinf", "numpy.vstack", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.data.experimental.ops.stats_aggregator.StatsAggregator", "tensorflow.python.data.experimental.ops.stats_ops.latency_stats", "tensorflow.python.data.experimental.ops.stats_ops.bytes_produced_stats", "tensorflow.python.ops.math_ops.mod", "tensorflow.python.platform.test.main", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.data.ops.dataset_ops.Dataset.range", "numpy.array", "tensorflow.python.data.kernel_tests.test_base.eager_only_combinations" ], [ "tensorflow.lite.testing.zip_test_utils.make_zip_of_tests", "tensorflow.compat.v1.nn.conv2d_backprop_input", "tensorflow.lite.testing.zip_test_utils.create_tensor_data", "tensorflow.nn.conv2d_transpose", "tensorflow.compat.v1.placeholder", "tensorflow.lite.testing.zip_test_utils.register_make_test_function", "tensorflow.nn.conv2d" ], [ "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.array_ops.split", "tensorflow.python.compat.compat.forward_compatible", "tensorflow.python.ops.array_ops.gather_nd", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.python.eager.backprop.GradientTape", "tensorflow.python.ops.array_ops.shape_n", "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.ops.array_ops.rank", "tensorflow.python.ops.array_ops.unstack", "tensorflow.python.ops.array_ops.gather", "tensorflow.python.platform.test.main", "tensorflow.python.ops.array_ops.size", "tensorflow.python.ops.array_ops.searchsorted", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.ops.array_ops.matrix_diag_part", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.array_ops.matrix_set_diag", "tensorflow.python.ops.array_ops.tile", "tensorflow.python.ops.array_ops.slice", "tensorflow.python.ops.parallel_for.control_flow_ops.pfor", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.ops.array_ops.one_hot", "tensorflow.python.ops.nn.l2_loss", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.ops.array_ops.broadcast_to", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.array_ops.matrix_diag", "tensorflow.python.ops.array_ops.pad", "tensorflow.python.ops.array_ops.placeholder_with_default", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.grappler.tf_optimizer.OptimizeGraph", "tensorflow.core.framework.tensor_shape_pb2.TensorShapeProto.Dim", "tensorflow.python.util.object_identity.ObjectIdentitySet", "tensorflow.python.eager.wrap_function.function_from_graph_def", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.core.framework.attr_value_pb2.AttrValue.ListValue", "tensorflow.python.framework.tensor_util.make_tensor_proto", "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.core.framework.attr_value_pb2.AttrValue", "tensorflow.python.training.saver.export_meta_graph", "numpy.array", "tensorflow.core.framework.variable_pb2.VariableDef", "tensorflow.core.protobuf.meta_graph_pb2.CollectionDef", "tensorflow.core.framework.graph_pb2.GraphDef" ], [ "tensorflow.lite.testing.zip_test_utils.make_zip_of_tests", "tensorflow.lite.testing.zip_test_utils.create_tensor_data", "tensorflow.maximum", "tensorflow.compat.v1.placeholder", "tensorflow.lite.testing.zip_test_utils.register_make_test_function", "numpy.zeros" ], [ "tensorflow.lite.testing.zip_test_utils.make_zip_of_tests", "tensorflow.lite.testing.zip_test_utils.create_tensor_data", "tensorflow.cast", "tensorflow.compat.v1.placeholder", "tensorflow.lite.testing.zip_test_utils.register_make_test_function" ], [ "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.check_ops.assert_equal", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.ops.math_ops.add", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.keras.regularizers.serialize", "tensorflow.python.util.nest.flatten" ], [ "tensorflow.not_equal", "tensorflow.lite.testing.zip_test_utils.make_zip_of_tests", "tensorflow.lite.testing.zip_test_utils.create_tensor_data", "tensorflow.compat.v1.placeholder", "tensorflow.lite.testing.zip_test_utils.register_make_test_function" ], [ "tensorflow.python.keras.distribute.distributed_training_utils.global_batch_size_supported", "numpy.asarray", "numpy.random.randn", "tensorflow.python.eager.context.executing_eagerly", "numpy.random.randint", "tensorflow.python.keras.mixed_precision.experimental.policy.global_policy", "numpy.reshape", "tensorflow.python.distribute.combinations.combine", "numpy.ceil", "tensorflow.python.util.nest.map_structure", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices", "tensorflow.python.eager.test.main", "numpy.random.choice", "tensorflow.python.keras.preprocessing.sequence.pad_sequences", "tensorflow.python.framework.random_seed.set_random_seed", "numpy.sum", "numpy.random.seed", "tensorflow.python.keras.backend.set_image_data_format", "tensorflow.python.keras.backend.set_value", "tensorflow.python.util.nest.flatten" ], [ "tensorflow.python.ops.array_ops.matrix_set_diag", "numpy.arange", "tensorflow.python.compat.compat.forward_compatible", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.array_ops.matrix_diag", "numpy.zeros_like", "numpy.random.randint", "tensorflow.python.platform.googletest.main", "numpy.array", "tensorflow.python.ops.array_ops.matrix_diag_part" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.3", "2.5", "2.2", "2.4" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.4", "2.6", "2.2", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.7", "1.10", "1.4" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] } ]
ersilia-os/osm-series4-candidates
[ "2d06ae0a5c26efea70d2a21f06a376625977b8b7" ]
[ "postprocess/_5_1_chemprop.py" ]
[ "from tqdm import tqdm\nimport pandas as pd\nfrom __init__ import FILE\n\ndf = pd.read_csv(FILE)\nsmiles = list(df[\"Smiles\"])\n\nwith open(\"_chemprop.csv\", \"w\") as f:\n f.write(\"smiles\\n\")\n for smi in smiles:\n f.write(\"{0}\\n\".format(smi))\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
jaketae/pytorch
[ "e5282c3cb8bf6ad8c5161f9d0cc271edb9abed25", "5654e6339879e438efb7cf50e88e356472eb0545" ]
[ "torch/distributed/_shard/sharded_tensor/__init__.py", "test/test_public_bindings.py" ]
[ "# coding=utf-8\n\nimport copy\nimport functools\nfrom typing import List\n\nimport torch\nimport torch.distributed._shard.sharding_spec as shard_spec\n\nfrom .api import (\n _register_sharded_op,\n Shard,\n ShardedTensor,\n ShardedTensorMetadata,\n TensorProperties,\n)\nfrom .metadata import ShardMetadata # noqa: F401\nfrom .partial_tensor import _PartialTensor\n\n\ndef empty(sharding_spec: shard_spec.ShardingSpec,\n *size,\n dtype=None,\n layout=torch.strided,\n requires_grad=False,\n pin_memory=False,\n memory_format=torch.contiguous_format,\n process_group=None,\n init_rrefs=False) -> ShardedTensor:\n \"\"\"\n Returns a :class:`ShardedTensor` filled with uninitialized data.\n Needs to be called on all ranks in an SPMD fashion.\n\n Args:\n sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification\n describing how to shard the Tensor.\n size (int...): a sequence of integers defining the shape of the output\n tensor. Can be a variable number of arguments or a collection like a list or tuple.\n\n Keyword args:\n dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.\n Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).\n layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.\n Default: ``torch.strided``.\n requires_grad (bool, optional): If autograd should record operations on the\n returned tensor. Default: ``False``.\n pin_memory (bool, optional): If set, returned tensor would be allocated in\n the pinned memory. Works only for CPU tensors. Default: ``False``.\n memory_format (:class:`torch.memory_format`, optional): the desired memory format of\n returned Tensor. Default: ``torch.contiguous_format``.\n process_group (ProcessGroup, optional): The process group to work on. If None,\n the default process group will be used.\n init_rrefs (bool, optional): Whether or not to initialize\n :class:`torch.distributed.rpc.RRef`s pointing to remote shards.\n Need to initialize the RPC Framework if specified as ``True``.\n Default: ``False``.\n\n Returns:\n A :class:`ShardedTensor` object on each rank\n \"\"\"\n return ShardedTensor(\n sharding_spec,\n *size,\n dtype=dtype,\n layout=layout,\n requires_grad=requires_grad,\n pin_memory=pin_memory,\n memory_format=memory_format,\n process_group=process_group,\n init_rrefs=init_rrefs,\n )\n\ndef ones(sharding_spec: shard_spec.ShardingSpec,\n *size,\n dtype=None,\n layout=torch.strided,\n requires_grad=False,\n pin_memory=False,\n memory_format=torch.contiguous_format,\n process_group=None,\n init_rrefs=False) -> ShardedTensor:\n \"\"\"\n Returns a :class:`ShardedTensor` with the scalar value 1.\n Needs to be called on all ranks in an SPMD fashion.\n\n Args:\n sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification\n describing how to shard the Tensor.\n size (int...): a sequence of integers defining the shape of the output\n tensor. Can be a variable number of arguments or a collection like a list or tuple.\n\n Keyword args:\n dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.\n Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).\n layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.\n Default: ``torch.strided``.\n requires_grad (bool, optional): If autograd should record operations on the\n returned tensor. Default: ``False``.\n pin_memory (bool, optional): If set, returned tensor would be allocated in\n the pinned memory. Works only for CPU tensors. Default: ``False``.\n process_group (ProcessGroup, optional): The process group to work on. If None,\n the default process group will be used.\n init_rrefs (bool, optional): Whether or not to initialize\n :class:`torch.distributed.rpc.RRef`s pointing to remote shards.\n Need to initialize the RPC Framework if specified as ``True``.\n Default: ``False``.\n\n Returns:\n A :class:`ShardedTensor` object on each rank\n \"\"\"\n return full(\n sharding_spec,\n size,\n fill_value=1,\n dtype=dtype,\n layout=layout,\n requires_grad=requires_grad,\n pin_memory=pin_memory,\n memory_format=memory_format,\n process_group=process_group,\n init_rrefs=init_rrefs\n )\n\ndef zeros(sharding_spec: shard_spec.ShardingSpec,\n *size,\n dtype=None,\n layout=torch.strided,\n requires_grad=False,\n pin_memory=False,\n memory_format=torch.contiguous_format,\n process_group=None,\n init_rrefs=False) -> ShardedTensor:\n \"\"\"\n Returns a :class:`ShardedTensor` filled with the scalar value 0.\n Needs to be called on all ranks in an SPMD fashion.\n\n Args:\n sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification\n describing how to shard the Tensor.\n size (int...): a sequence of integers defining the shape of the output\n tensor. Can be a variable number of arguments or a collection like a list or tuple.\n\n Keyword args:\n dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.\n Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).\n layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.\n Default: ``torch.strided``.\n requires_grad (bool, optional): If autograd should record operations on the\n returned tensor. Default: ``False``.\n pin_memory (bool, optional): If set, returned tensor would be allocated in\n the pinned memory. Works only for CPU tensors. Default: ``False``.\n process_group (ProcessGroup, optional): The process group to work on. If None,\n the default process group will be used.\n init_rrefs (bool, optional): Whether or not to initialize\n :class:`torch.distributed.rpc.RRef`s pointing to remote shards.\n Need to initialize the RPC Framework if specified as ``True``.\n Default: ``False``.\n\n Returns:\n A :class:`ShardedTensor` object on each rank\n \"\"\"\n return full(\n sharding_spec,\n size,\n fill_value=0,\n dtype=dtype,\n layout=layout,\n requires_grad=requires_grad,\n pin_memory=pin_memory,\n memory_format=memory_format,\n process_group=process_group,\n init_rrefs=init_rrefs\n )\n\ndef full(sharding_spec: shard_spec.ShardingSpec,\n size,\n fill_value=torch.types.Number,\n dtype=None,\n layout=torch.strided,\n requires_grad=False,\n pin_memory=False,\n memory_format=torch.contiguous_format,\n process_group=None,\n init_rrefs=False) -> ShardedTensor:\n \"\"\"\n Creates a :class:`ShardedTensor` filled with fill_value. The tensor’s dtype\n is inferred from fill_value. If dtype is specified, it will override the\n inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion.\n Args:\n sharding_spec (:class:`torch.distributed._sharding_spec.ShardingSpec`): The specification\n describing how to shard the Tensor.\n size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the\n output tensor.\n fill_value (Scalar) – the value to fill the output tensor with.\n Keyword args:\n dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.\n Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).\n layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.\n Default: ``torch.strided``.\n requires_grad (bool, optional): If autograd should record operations on the\n returned tensor. Default: ``False``.\n pin_memory (bool, optional): If set, returned tensor would be allocated in\n the pinned memory. Works only for CPU tensors. Default: ``False``.\n process_group (ProcessGroup, optional): The process group to work on. If None,\n the default process group will be used.\n init_rrefs (bool, optional): Whether or not to initialize\n :class:`torch.distributed.rpc.RRef`s pointing to remote shards.\n Need to initialize the RPC Framework if specified as ``True``.\n Default: ``False``.\n Returns:\n A :class:`ShardedTensor` object on each rank\n \"\"\"\n sharded_tensor = ShardedTensor(\n sharding_spec,\n *size,\n dtype=dtype,\n layout=layout,\n requires_grad=requires_grad,\n pin_memory=pin_memory,\n memory_format=memory_format,\n process_group=process_group,\n init_rrefs=init_rrefs,\n )\n torch.nn.init.constant_(sharded_tensor, fill_value) # type: ignore[arg-type]\n return sharded_tensor\n\ndef rand(sharding_spec: shard_spec.ShardingSpec,\n *size,\n dtype=None,\n layout=torch.strided,\n requires_grad=False,\n pin_memory=False,\n memory_format=torch.contiguous_format,\n process_group=None,\n init_rrefs=False) -> ShardedTensor:\n \"\"\"\n Creates a :class:`ShardedTensor` filled with fill_value. The tensor’s dtype\n is inferred from fill_value. If dtype is specified, it will override the\n inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion.\n\n Args:\n sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification\n describing how to shard the Tensor.\n size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the\n output tensor.\n fill_value (Scalar) – the value to fill the output tensor with.\n\n Keyword args:\n dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.\n Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).\n layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.\n Default: ``torch.strided``.\n requires_grad (bool, optional): If autograd should record operations on the\n returned tensor. Default: ``False``.\n pin_memory (bool, optional): If set, returned tensor would be allocated in\n the pinned memory. Works only for CPU tensors. Default: ``False``.\n process_group (ProcessGroup, optional): The process group to work on. If None,\n the default process group will be used.\n init_rrefs (bool, optional): Whether or not to initialize\n :class:`torch.distributed.rpc.RRef`s pointing to remote shards.\n Need to initialize the RPC Framework if specified as ``True``.\n Default: ``False``.\n\n Returns:\n A :class:`ShardedTensor` object on each rank\n \"\"\"\n sharded_tensor = ShardedTensor(\n sharding_spec,\n *size,\n dtype=dtype,\n layout=layout,\n requires_grad=requires_grad,\n pin_memory=pin_memory,\n memory_format=memory_format,\n process_group=process_group,\n init_rrefs=init_rrefs,\n )\n torch.nn.init.uniform_(sharded_tensor, 0, 1) # type: ignore[arg-type]\n return sharded_tensor\n\ndef init_from_local_shards(\n local_shards: List[Shard],\n *global_size,\n process_group=None,\n init_rrefs=False) -> ShardedTensor:\n \"\"\"\n Creates an :class:`ShardedTensor` from local shards and the global metadata.\n Needs to be called on all ranks in an SPMD fashion.\n\n Args:\n local_shards (List[:class `torch.distributed._shard.sharded_tensor.Shard`]): A list\n of shards that represent the local shards on this rank.\n global_size (int...): a list, tuple, or `torch.Size` of integers defining the\n shape of the overall sharded tensor.\n\n Keyword args:\n process_group (ProcessGroup, optional): The process group to work on. If None,\n the default process group will be used.\n init_rrefs (bool, optional): Whether or not to initialize\n :class:`torch.distributed.rpc.RRef`s pointing to remote shards.\n Need to initialize the RPC Framework if specified as ``True``.\n Default: ``False``.\n\n Returns:\n A :class:`ShardedTensor` object handle on this rank\n\n\n Examples:\n Suppose we want construct a sharded tensor on two ranks, global size = (10, 5),\n each shard have a (5, 5) local tensor, we can do it like below:\n\n on rank 0:\n >>> local_shard_metadata = ShardMetadata(\n >>> shard_offsets=[0, 0]\n >>> shard_lengths=[5, 5]\n >>> placement=\"rank:0/cuda:0\"\n >>> )\n >>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)]\n >>> sharded_tensor = init_from_local_shards(local_shards, [10, 5])\n\n on rank 1:\n >>> local_shard_metadata = ShardMetadata(\n >>> shard_offsets=[5, 0]\n >>> shard_lengths=[5, 5]\n >>> placement=\"rank:1/cuda:1\"\n >>> )\n >>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)]\n >>> sharded_tensor = init_from_local_shards(local_shards, [10, 5])\n \"\"\"\n return ShardedTensor._init_from_local_shards(\n local_shards,\n *global_size,\n process_group=process_group,\n init_rrefs=init_rrefs\n )\n\ndef state_dict_hook(module, destination, prefix, local_metadata):\n \"\"\"\n Hook to add ShardedTensor to Module's ``state_dict``. Needs to be\n registered to the Module using\n :meth:`torch.nn.Module._register_state_dict_hook`.\n \"\"\"\n for submodule_name, submodule in module.named_modules():\n for attr_name, attr in submodule.__dict__.items():\n if isinstance(attr, ShardedTensor):\n destination[prefix + submodule_name + '.' + attr_name] = attr\n\ndef pre_load_state_dict_hook(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n \"\"\"\n Pre-load state dict hook to add ShardedTensor to the module.\n \"\"\"\n for submodule_name, submodule in module.named_modules():\n for attr_name, attr in submodule.__dict__.items():\n key = prefix + submodule_name + '.' + attr_name\n if key in state_dict:\n if isinstance(state_dict[key], ShardedTensor):\n setattr(submodule, attr_name, state_dict[key])\n\ndef sharded_op_impl(func):\n \"\"\"\n Provides a way for users to write their own custom sharded operator. This\n can be used to override existing ShardedTensor operators or write a new\n one not supported by ShardedTensor. If the operator in question is covered\n by ``__torch_function__`` dispatch and has a ShardedTensor as any of its\n parameters, the function provided will be invoked for that operator.\n\n Example::\n >>> @sharded_op_impl(torch.nn.functional.linear)\n >>> def my_custom_sharded_linear(types, args, kwargs, process_group):\n >>> ....\n >>>\n >>> input = torch.rand(10, 32)\n >>> weight = sharded_tensor.rand(32, 16)\n >>> bias = torch.rand(16)\n >>> # This will call 'my_custom_sharded_linear'\n >>> torch.nn.functional.linear(input, weight, bias)\n\n The types, args and kwargs parameters are the same parameters that are\n passed to ``__torch_function__`` dispatch API\n (https://pytorch.org/docs/stable/notes/extending.html#extending-torch).\n There is an additional ``process_group`` parameter which is the\n process_group used for the ShardedTensor and can be used by\n implementations for communications within a sharded implementation.\n\n Args:\n func(Callable): Torch function for which we want to provide a sharded\n implementation (ex: torch.nn.functional.linear)\n \"\"\"\n def decorator_sharded_func(wrapped_func):\n _register_sharded_op(func, wrapped_func)\n\n @functools.wraps(wrapped_func)\n def wrapper(*args, **kwargs):\n return wrapped_func(*args, **kwargs)\n return wrapper\n return decorator_sharded_func\n\n# Import all builtin sharded ops\nfrom ._ops import * # noqa: F403\n\ndef _reshard_output(\n module: torch.nn.Module,\n resharding_spec: shard_spec.ShardingSpec) -> torch.nn.Module:\n \"\"\"\n Hook a module with local shards collection in the forward pass according\n to the given ``resharding_spec``.\n\n Args:\n module (:class:`torch.nn.Module`): Module whose output needs to be resharded.\n resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`):\n The specification describing how the output of the module will be resharded.\n\n Returns:\n A :class:`torch.nn.Module` object with collection API hooked.\n \"\"\"\n def hook_func(_module, _input, output):\n if isinstance(output, ShardedTensor) or isinstance(output, _PartialTensor):\n return output.reshard(resharding_spec)\n return output\n module.register_forward_hook(hook_func)\n return module\n\n\ndef _collect_local_shard(module: torch.nn.Module) -> torch.nn.Module:\n \"\"\"\n Hook a module with local shards collection in the forward pass.\n\n This API is typically used to convert a sharded representation back to data parallel\n representation. In particular, it returns the local tensor for this Shard. If the\n size along the sharding dimension for the local tensor is 1, this dimension is removed\n from the final result. For example a [4, 16] ShardedTensor across 4 ranks is typically\n a local Tensor of size [16] across each rank and not [1, 16] across each rank.\n\n Args:\n module (:class:`torch.nn.Module`): Module whose output needs to be resharded.\n\n Returns:\n A :class:`torch.nn.Module` object with collection API hooked.\n \"\"\"\n\n def hook_func(_module, _input, output):\n if isinstance(output, ShardedTensor):\n local_tensor = output.local_tensor()\n # Squeeze the # of dimensions manually.\n if local_tensor.size(output._sharding_spec.dim) == 1: # type: ignore[attr-defined]\n local_tensor = local_tensor.squeeze(\n output._sharding_spec.dim # type: ignore[attr-defined]\n )\n return local_tensor\n module.register_forward_hook(hook_func)\n return module\n", "# -*- coding: utf-8 -*-\n# Owner(s): [\"module: autograd\"]\n\nfrom torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS\nimport pkgutil\nimport torch\nimport sys\nfrom typing import Callable\nimport inspect\nimport json\nimport os\nimport unittest\n\nclass TestPublicBindings(TestCase):\n def test_no_new_bindings(self):\n \"\"\"\n This test aims to stop the introduction of new JIT bindings into torch._C\n whose names do not start with _. Such bindings are made available as\n torch.XXX, which may not be desirable.\n\n If your change causes this test to fail, add your new binding to a relevant\n submodule of torch._C, such as torch._C._jit (or other relevant submodule of\n torch._C). If your binding really needs to be available as torch.XXX, add it\n to torch._C and add it to the allowlist below.\n\n If you have removed a binding, remove it from the allowlist as well.\n \"\"\"\n # This allowlist contains every binding in torch._C that is copied into torch at\n # the time of writing. It was generated with\n #\n # {elem for elem in dir(torch._C) if not elem.startswith(\"_\")}\n #\n torch_C_allowlist_superset = {\n \"AggregationType\",\n \"AliasDb\",\n \"AnyType\",\n \"Argument\",\n \"ArgumentSpec\",\n \"autocast_decrement_nesting\",\n \"autocast_increment_nesting\",\n \"AVG\",\n \"BenchmarkConfig\",\n \"BenchmarkExecutionStats\",\n \"BFloat16StorageBase\",\n \"Block\",\n \"BoolStorageBase\",\n \"BoolType\",\n \"BufferDict\",\n \"ByteStorageBase\",\n \"CallStack\",\n \"Capsule\",\n \"CharStorageBase\",\n \"ClassType\",\n \"clear_autocast_cache\",\n \"Code\",\n \"CompilationUnit\",\n \"CompleteArgumentSpec\",\n \"ComplexDoubleStorageBase\",\n \"ComplexFloatStorageBase\",\n \"ComplexType\",\n \"ConcreteModuleType\",\n \"ConcreteModuleTypeBuilder\",\n \"CONV_BN_FUSION\",\n \"cpp\",\n \"CudaBFloat16StorageBase\",\n \"CudaBFloat16TensorBase\",\n \"CudaBFloat16TensorBase\",\n \"CudaBoolStorageBase\",\n \"CudaBoolTensorBase\",\n \"CudaBoolTensorBase\",\n \"CudaByteStorageBase\",\n \"CudaByteTensorBase\",\n \"CudaByteTensorBase\",\n \"CudaCharStorageBase\",\n \"CudaCharTensorBase\",\n \"CudaCharTensorBase\",\n \"CudaComplexDoubleStorageBase\",\n \"CudaComplexDoubleTensorBase\",\n \"CudaComplexDoubleTensorBase\",\n \"CudaComplexFloatStorageBase\",\n \"CudaComplexFloatTensorBase\",\n \"CudaComplexFloatTensorBase\",\n \"CudaDoubleStorageBase\",\n \"CudaDoubleTensorBase\",\n \"CudaDoubleTensorBase\",\n \"CudaFloatStorageBase\",\n \"CudaFloatTensorBase\",\n \"CudaHalfStorageBase\",\n \"CudaHalfTensorBase\",\n \"CudaIntStorageBase\",\n \"CudaIntTensorBase\",\n \"CudaIntTensorBase\",\n \"CudaLongStorageBase\",\n \"CudaLongTensorBase\",\n \"CudaLongTensorBase\",\n \"CudaShortStorageBase\",\n \"CudaShortTensorBase\",\n \"CudaShortTensorBase\",\n \"DeepCopyMemoTable\",\n \"default_generator\",\n \"DeserializationStorageContext\",\n \"device\",\n \"DeviceObjType\",\n \"DictType\",\n \"DisableTorchFunction\",\n \"DoubleStorageBase\",\n \"dtype\",\n \"EnumType\",\n \"ErrorReport\",\n \"ExecutionPlan\",\n \"FatalError\",\n \"FileCheck\",\n \"finfo\",\n \"FloatStorageBase\",\n \"FloatType\",\n \"fork\",\n \"FunctionSchema\",\n \"FUSE_ADD_RELU\",\n \"Future\",\n \"FutureType\",\n \"Generator\",\n \"get_autocast_cpu_dtype\",\n \"get_default_dtype\",\n \"get_num_interop_threads\",\n \"get_num_threads\",\n \"Gradient\",\n \"Graph\",\n \"GraphExecutorState\",\n \"HalfStorageBase\",\n \"has_cuda\",\n \"has_cudnn\",\n \"has_lapack\",\n \"has_mkl\",\n \"has_mkldnn\",\n \"has_mlc\",\n \"has_openmp\",\n \"has_spectral\",\n \"HOIST_CONV_PACKED_PARAMS\",\n \"iinfo\",\n \"import_ir_module_from_buffer\",\n \"import_ir_module\",\n \"InferredType\",\n \"init_num_threads\",\n \"INSERT_FOLD_PREPACK_OPS\",\n \"InterfaceType\",\n \"IntStorageBase\",\n \"IntType\",\n \"SymIntType\",\n \"IODescriptor\",\n \"is_anomaly_enabled\",\n \"is_autocast_cache_enabled\",\n \"is_autocast_cpu_enabled\",\n \"is_autocast_enabled\",\n \"is_grad_enabled\",\n \"is_inference_mode_enabled\",\n \"JITException\",\n \"layout\",\n \"ListType\",\n \"LiteScriptModule\",\n \"LockingLogger\",\n \"LoggerBase\",\n \"LongStorageBase\",\n \"memory_format\",\n \"merge_type_from_type_comment\",\n \"MobileOptimizerType\",\n \"ModuleDict\",\n \"Node\",\n \"NoneType\",\n \"NoopLogger\",\n \"NumberType\",\n \"OperatorInfo\",\n \"OptionalType\",\n \"ParameterDict\",\n \"parse_ir\",\n \"parse_schema\",\n \"parse_type_comment\",\n \"PyObjectType\",\n \"PyTorchFileReader\",\n \"PyTorchFileWriter\",\n \"QInt32StorageBase\",\n \"QInt8StorageBase\",\n \"qscheme\",\n \"QUInt4x2StorageBase\",\n \"QUInt2x4StorageBase\",\n \"QUInt8StorageBase\",\n \"read_vitals\",\n \"REMOVE_DROPOUT\",\n \"RRefType\",\n \"ScriptClass\",\n \"ScriptClassFunction\",\n \"ScriptDict\",\n \"ScriptDictIterator\",\n \"ScriptDictKeyIterator\",\n \"ScriptList\",\n \"ScriptListIterator\",\n \"ScriptFunction\",\n \"ScriptMethod\",\n \"ScriptModule\",\n \"ScriptModuleSerializer\",\n \"ScriptObject\",\n \"ScriptObjectProperty\",\n \"SerializationStorageContext\",\n \"set_anomaly_enabled\",\n \"set_autocast_cache_enabled\",\n \"set_autocast_cpu_dtype\",\n \"set_autocast_cpu_enabled\",\n \"set_autocast_enabled\",\n \"set_flush_denormal\",\n \"set_num_interop_threads\",\n \"set_num_threads\",\n \"set_vital\",\n \"ShortStorageBase\",\n \"Size\",\n \"StaticModule\",\n \"Stream\",\n \"StreamObjType\",\n \"StringType\",\n \"SUM\",\n \"TensorType\",\n \"ThroughputBenchmark\",\n \"TracingState\",\n \"TupleType\",\n \"Type\",\n \"unify_type_list\",\n \"UnionType\",\n \"Use\",\n \"Value\",\n \"autocast_decrement_nesting\",\n \"autocast_increment_nesting\",\n \"clear_autocast_cache\",\n \"cpp\",\n \"default_generator\",\n \"device\",\n \"dtype\",\n \"finfo\",\n \"fork\",\n \"get_default_dtype\",\n \"get_num_interop_threads\",\n \"get_num_threads\",\n \"has_cuda\",\n \"has_cudnn\",\n \"has_lapack\",\n \"has_mkl\",\n \"has_mkldnn\",\n \"has_mlc\",\n \"has_openmp\",\n \"iinfo\",\n \"import_ir_module\",\n \"import_ir_module_from_buffer\",\n \"init_num_threads\",\n \"is_anomaly_enabled\",\n \"is_autocast_enabled\",\n \"is_grad_enabled\",\n \"layout\",\n \"memory_format\",\n \"merge_type_from_type_comment\",\n \"parse_ir\",\n \"parse_schema\",\n \"parse_type_comment\",\n \"qscheme\",\n \"set_anomaly_enabled\",\n \"set_autocast_enabled\",\n 'set_autocast_gpu_dtype',\n 'get_autocast_gpu_dtype',\n \"set_flush_denormal\",\n \"set_num_interop_threads\",\n \"set_num_threads\",\n \"unify_type_list\",\n \"vitals_enabled\",\n\n \"wait\",\n }\n torch_C_bindings = {elem for elem in dir(torch._C) if not elem.startswith(\"_\")}\n\n # Check that the torch._C bindings are all in the allowlist. Since\n # bindings can change based on how PyTorch was compiled (e.g. with/without\n # CUDA), the two may not be an exact match but the bindings should be\n # a subset of the allowlist.\n difference = torch_C_bindings.difference(torch_C_allowlist_superset)\n msg = f\"torch._C had bindings that are not present in the allowlist:\\n{difference}\"\n self.assertTrue(torch_C_bindings.issubset(torch_C_allowlist_superset), msg)\n\n # AttributeError: module 'torch.distributed' has no attribute '_shard'\n @unittest.skipIf(IS_WINDOWS, \"Distributed Attribute Error\")\n def test_correct_module_names(self):\n '''\n An API is considered public, if its `__module__` starts with `torch.`\n and there is no name in `__module__` or the object itself that starts with “_”.\n Each public package should either:\n - (preferred) Define `__all__` and all callables and classes in there must have their\n `__module__` start with the current submodule's path. Things not in `__all__` should\n NOT have their `__module__` start with the current submodule.\n - (for simple python-only modules) Not define `__all__` and all the elements in `dir(submod)` must have their\n `__module__` that start with the current submodule.\n '''\n failure_list = []\n with open(os.path.join(os.path.dirname(__file__), 'allowlist_for_publicAPI.json')) as json_file:\n # no new entries should be added to this allow_dict.\n # New APIs must follow the public API guidelines.\n allow_dict = json.load(json_file)\n\n def test_module(modname):\n split_strs = modname.split('.')\n mod = sys.modules.get(modname)\n for elem in split_strs:\n if elem.startswith(\"_\"):\n return\n\n def add_to_failure_list_if_not_in_allow_dict(modname, elem, elem_module):\n if modname in allow_dict and elem in allow_dict[modname]:\n return\n failure_list.append((modname, elem, elem_module))\n\n # verifies that each public API has the correct module name and naming semantics\n def looks_public_or_not(elem, modname, mod, is_public=True):\n obj = getattr(mod, elem)\n if not (isinstance(obj, Callable) or inspect.isclass(obj)):\n return\n elem_module = getattr(obj, '__module__', None)\n elem_modname_starts_with_mod = elem_module is not None and \\\n elem_module.startswith(modname) and '._' not in elem_module\n # elem's name must NOT begin with an `_` and it's module name\n # SHOULD start with it's current module since it's a public API\n looks_public = not elem.startswith('_') and elem_modname_starts_with_mod\n if is_public != looks_public:\n add_to_failure_list_if_not_in_allow_dict(modname, elem, elem_module)\n\n if hasattr(modname, '__all__'):\n public_api = mod.__all__\n all_api = dir(modname)\n for elem in all_api:\n looks_public_or_not(elem, modname, is_public=elem in public_api)\n\n else:\n all_api = dir(mod)\n for elem in all_api:\n if not elem.startswith('_'):\n looks_public_or_not(elem, modname, mod, is_public=True)\n\n for _, modname, ispkg in pkgutil.walk_packages(path=torch.__path__, prefix=torch.__name__ + '.'):\n test_module(modname)\n\n test_module('torch')\n msg = \"Following new APIs ( displayed in the form (module, element, element module) )\" \\\n \" were added that do not meet our guidelines for public API\" \\\n \" Please review https://docs.google.com/document/d/10yx2-4gs0gTMOimVS403MnoAWkqitS8TUHX73PN8EjE/edit?pli=1#\" \\\n \" for more information:\\n\" + \"\\n\".join(map(str, failure_list))\n\n # empty lists are considered false in python\n self.assertTrue(not failure_list, msg)\n\nif __name__ == '__main__':\n run_tests()\n" ]
[ [ "torch.nn.init.constant_", "torch.nn.init.uniform_" ], [ "torch.testing._internal.common_utils.run_tests" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ofirpress/shortformer
[ "0281f7618fb3833c8ac99f3e8e0512aed95fa2a1" ]
[ "fairseq/data/iterators.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport itertools\nimport logging\nimport math\nimport operator\nimport os\nimport queue\nimport time\nfrom threading import Thread\n\nimport numpy as np\nimport torch\n\nfrom fairseq.data import data_utils\n\n\nlogger = logging.getLogger(__name__)\n\n# Object used by _background_consumer to signal the source is exhausted\n# to the main thread.\n_sentinel = object()\n\n\nclass CountingIterator(object):\n \"\"\"Wrapper around an iterable that maintains the iteration count.\n\n Args:\n iterable (iterable): iterable to wrap\n start (int): starting iteration count. Note that this doesn't\n actually advance the iterator.\n total (int): override the iterator length returned by\n ``__len__``. This can be used to truncate *iterator*.\n\n Attributes:\n n (int): number of elements consumed from this iterator\n \"\"\"\n\n def __init__(self, iterable, start=None, total=None):\n self.iterable = iterable\n self.itr = iter(self)\n\n if start is None:\n self.n = getattr(iterable, 'n', 0)\n else:\n self.n = start\n\n if total is None:\n self.total = self.n + len(iterable)\n else:\n self.total = total\n\n def __len__(self):\n return self.total\n\n def __iter__(self):\n for x in self.iterable:\n if self.n >= self.total:\n raise RuntimeError(\n 'Mismatch between actual and expected iterable length. '\n 'Please report this to the fairseq developers.'\n )\n self.n += 1\n yield x\n\n def __next__(self):\n return next(self.itr)\n\n def has_next(self):\n \"\"\"Whether the iterator has been exhausted.\"\"\"\n return self.n < len(self)\n\n def skip(self, num_to_skip):\n \"\"\"Fast-forward the iterator by skipping *num_to_skip* elements.\"\"\"\n next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)\n return self\n\n def take(self, n):\n \"\"\"\n Truncates the iterator to n elements at most.\n \"\"\"\n self.total = min(self.total, n)\n\n # Propagate this change to the underlying iterator\n # Only take after what we have already consumed (i.e. after restarting\n # from checkpoint mid epoch, we have to subtract self.n which is the\n # starting point)\n #\n # This to maintain the invariant self.total = self.n + len(iterable),\n # before calling __next__ or __iter__\n propagated_take = max(n - self.n, 0)\n if hasattr(self.iterable, \"take\"):\n self.iterable.take(propagated_take)\n else:\n self.iterable = itertools.islice(self.iterable, propagated_take)\n\n\nclass EpochBatchIterating(object):\n def __len__(self) -> int:\n raise NotImplementedError\n\n @property\n def next_epoch_idx(self):\n raise NotImplementedError\n\n def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):\n \"\"\"Return a new iterator over the dataset.\n\n Args:\n shuffle (bool, optional): shuffle batches before returning the\n iterator (default: True).\n fix_batches_to_gpus: ensure that batches are always\n allocated to the same shards across epochs. Requires\n that :attr:`dataset` supports prefetching (default: False).\n \"\"\"\n raise NotImplementedError\n\n def end_of_epoch(self) -> bool:\n \"\"\"Returns whether the most recent epoch iterator has been exhausted\"\"\"\n raise NotImplementedError\n\n @property\n def iterations_in_epoch(self) -> int:\n \"\"\"The number of consumed batches in the current epoch.\"\"\"\n raise NotImplementedError\n\n def state_dict(self):\n \"\"\"Returns a dictionary containing a whole state of the iterator.\"\"\"\n raise NotImplementedError\n\n def load_state_dict(self, state_dict):\n \"\"\"Copies the state of the iterator from the given *state_dict*.\"\"\"\n raise NotImplementedError\n\n\nclass StreamingEpochBatchIterator(EpochBatchIterating):\n def __init__(\n self, dataset, epoch=1, num_shards=1, shard_id=0,\n ):\n assert isinstance(dataset, torch.utils.data.IterableDataset)\n self.dataset = dataset\n self.epoch = max(epoch, 1) # we use 1-based indexing for epochs\n self._current_epoch_iterator = None\n self.num_shards = num_shards\n self.shard_id = shard_id\n\n @property\n def next_epoch_idx(self):\n \"\"\"Return the epoch index after *next_epoch_itr* is called.\"\"\"\n if self._current_epoch_iterator is not None and self.end_of_epoch():\n return self.epoch + 1\n else:\n return self.epoch\n\n def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):\n self.epoch = self.next_epoch_idx\n self.dataset.set_epoch(self.epoch)\n self._current_epoch_iterator = CountingIterator(\n iterable=ShardedIterator(\n iterable=self.dataset,\n num_shards=self.num_shards,\n shard_id=self.shard_id,\n ),\n )\n return self._current_epoch_iterator\n\n def end_of_epoch(self) -> bool:\n return not self._current_epoch_iterator.has_next()\n\n @property\n def iterations_in_epoch(self) -> int:\n if self._current_epoch_iterator is not None:\n return self._current_epoch_iterator.n\n return 0\n\n def state_dict(self):\n return {\n 'epoch': self.epoch,\n }\n\n def load_state_dict(self, state_dict):\n self.epoch = state_dict['epoch']\n\n\nclass EpochBatchIterator(EpochBatchIterating):\n \"\"\"A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.\n\n Compared to :class:`torch.utils.data.DataLoader`, this iterator:\n\n - can be reused across multiple epochs with the :func:`next_epoch_itr`\n method (optionally shuffled between epochs)\n - can be serialized/deserialized with the :func:`state_dict` and\n :func:`load_state_dict` methods\n - supports sharding with the *num_shards* and *shard_id* arguments\n\n Args:\n dataset (~torch.utils.data.Dataset): dataset from which to load the data\n collate_fn (callable): merges a list of samples to form a mini-batch\n batch_sampler (~torch.utils.data.Sampler or a callable): an iterator over batches of\n indices, or a callable to create such an iterator (~torch.utils.data.Sampler).\n A callable batch_sampler will be called for each epoch to enable per epoch dynamic\n batch iterators defined by this callable batch_sampler.\n seed (int, optional): seed for random number generator for\n reproducibility (default: 1).\n num_shards (int, optional): shard the data iterator into N\n shards (default: 1).\n shard_id (int, optional): which shard of the data iterator to\n return (default: 0).\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means the data will be loaded in the main process\n (default: 0).\n epoch (int, optional): the epoch to start the iterator from\n (default: 1).\n buffer_size (int, optional): the number of batches to keep ready in the\n queue. Helps speeding up dataloading. When buffer_size is zero, the\n default torch.utils.data.DataLoader preloading is used.\n timeout (int, optional): if positive, the timeout value for collecting a batch\n from workers. Should always be non-negative. (default: ``0``)\n \"\"\"\n\n def __init__(\n self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0,\n num_workers=0, epoch=1, buffer_size=0, timeout=0,\n ):\n assert isinstance(dataset, torch.utils.data.Dataset)\n self.dataset = dataset\n self.collate_fn = collate_fn\n self.batch_sampler = batch_sampler\n self._frozen_batches = tuple(batch_sampler) if not callable(batch_sampler) else None\n self.seed = seed\n self.num_shards = num_shards\n self.shard_id = shard_id\n self.num_workers = num_workers\n # This upper limit here is to prevent people from abusing this feature\n # in a shared computing environment.\n self.buffer_size = min(buffer_size, 20)\n self.timeout = timeout\n\n self.epoch = max(epoch, 1) # we use 1-based indexing for epochs\n self.shuffle = True\n self._cur_epoch_itr = None\n self._next_epoch_itr = None\n self._supports_prefetch = getattr(dataset, 'supports_prefetch', False)\n\n @property\n def frozen_batches(self):\n if self._frozen_batches is None:\n self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch))\n return self._frozen_batches\n\n def __len__(self):\n return int(math.ceil(len(self.frozen_batches) / float(self.num_shards)))\n\n @property\n def n(self):\n return self.iterations_in_epoch\n\n @property\n def next_epoch_idx(self):\n \"\"\"Return the epoch index after *next_epoch_itr* is called.\"\"\"\n if self._next_epoch_itr is not None:\n return self.epoch\n elif self._cur_epoch_itr is not None and self.end_of_epoch():\n return self.epoch + 1\n else:\n return self.epoch\n\n def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):\n \"\"\"Return a new iterator over the dataset.\n\n Args:\n shuffle (bool, optional): shuffle batches before returning the\n iterator (default: True).\n fix_batches_to_gpus: ensure that batches are always\n allocated to the same shards across epochs. Requires\n that :attr:`dataset` supports prefetching (default: False).\n \"\"\"\n self.epoch = self.next_epoch_idx\n self.dataset.set_epoch(self.epoch)\n if self._next_epoch_itr is not None:\n self._cur_epoch_itr = self._next_epoch_itr\n self._next_epoch_itr = None\n else:\n if callable(self.batch_sampler):\n # reset _frozen_batches to refresh the next epoch\n self._frozen_batches = None\n self._cur_epoch_itr = self._get_iterator_for_epoch(\n self.epoch, shuffle, fix_batches_to_gpus=fix_batches_to_gpus,\n )\n self.shuffle = shuffle\n return self._cur_epoch_itr\n\n def end_of_epoch(self) -> bool:\n \"\"\"Returns whether the most recent epoch iterator has been exhausted\"\"\"\n return not self._cur_epoch_itr.has_next()\n\n @property\n def iterations_in_epoch(self):\n \"\"\"The number of consumed batches in the current epoch.\"\"\"\n if self._cur_epoch_itr is not None:\n return self._cur_epoch_itr.n\n elif self._next_epoch_itr is not None:\n return self._next_epoch_itr.n\n return 0\n\n def state_dict(self):\n \"\"\"Returns a dictionary containing a whole state of the iterator.\"\"\"\n if self.end_of_epoch():\n epoch = self.epoch + 1\n iter_in_epoch = 0\n else:\n epoch = self.epoch\n iter_in_epoch = self.iterations_in_epoch\n return {\n 'version': 2,\n 'epoch': epoch,\n 'iterations_in_epoch': iter_in_epoch,\n 'shuffle': self.shuffle,\n }\n\n def load_state_dict(self, state_dict):\n \"\"\"Copies the state of the iterator from the given *state_dict*.\"\"\"\n self.epoch = state_dict['epoch']\n itr_pos = state_dict.get('iterations_in_epoch', 0)\n version = state_dict.get('version', 1)\n if itr_pos > 0:\n # fast-forward epoch iterator\n self._next_epoch_itr = self._get_iterator_for_epoch(\n self.epoch,\n shuffle=state_dict.get('shuffle', True),\n offset=itr_pos,\n )\n if self._next_epoch_itr is None:\n if version == 1:\n # legacy behavior: we finished the epoch, increment epoch counter\n self.epoch += 1\n else:\n raise RuntimeError(\n 'Cannot resume training due to dataloader mismatch, please '\n 'report this to the fairseq developers. You can relaunch '\n 'training with `--reset-dataloader` and it should work.'\n )\n else:\n self._next_epoch_itr = None\n\n def _get_iterator_for_epoch(self, epoch, shuffle, fix_batches_to_gpus=False, offset=0):\n\n def shuffle_batches(batches, seed):\n with data_utils.numpy_seed(seed):\n np.random.shuffle(batches)\n return batches\n\n if self._supports_prefetch:\n batches = self.frozen_batches\n\n if shuffle and not fix_batches_to_gpus:\n batches = shuffle_batches(list(batches), self.seed + epoch)\n\n batches = list(ShardedIterator(\n batches, self.num_shards, self.shard_id, fill_value=[]\n ))\n self.dataset.prefetch([i for s in batches for i in s])\n\n if shuffle and fix_batches_to_gpus:\n batches = shuffle_batches(batches, self.seed + epoch + self.shard_id)\n else:\n if shuffle:\n batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch)\n else:\n batches = self.frozen_batches\n batches = list(ShardedIterator(\n batches, self.num_shards, self.shard_id, fill_value=[]\n ))\n\n if offset > 0 and offset >= len(batches):\n return None\n\n if self.num_workers > 0:\n os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'\n\n # Create data loader\n itr = torch.utils.data.DataLoader(\n self.dataset,\n collate_fn=self.collate_fn,\n batch_sampler=batches[offset:],\n num_workers=self.num_workers,\n timeout=self.timeout,\n )\n\n # Wrap with a BufferedIterator if needed\n if self.buffer_size > 0:\n itr = BufferedIterator(self.buffer_size, itr)\n\n # Wrap with CoutingIterator\n itr = CountingIterator(itr, start=offset)\n return itr\n\n\nclass GroupedIterator(CountingIterator):\n \"\"\"Wrapper around an iterable that returns groups (chunks) of items.\n\n Args:\n iterable (iterable): iterable to wrap\n chunk_size (int): size of each chunk\n\n Attributes:\n n (int): number of elements consumed from this iterator\n \"\"\"\n\n def __init__(self, iterable, chunk_size):\n itr = _chunk_iterator(iterable, chunk_size)\n super().__init__(\n itr,\n start=int(math.ceil(getattr(iterable, 'n', 0) / float(chunk_size))),\n total=int(math.ceil(len(iterable) / float(chunk_size))),\n )\n self.chunk_size = chunk_size\n\n\ndef _chunk_iterator(itr, chunk_size):\n chunk = []\n for x in itr:\n chunk.append(x)\n if len(chunk) == chunk_size:\n yield chunk\n chunk = []\n if len(chunk) > 0:\n yield chunk\n\n\nclass ShardedIterator(CountingIterator):\n \"\"\"A sharded wrapper around an iterable, padded to length.\n\n Args:\n iterable (iterable): iterable to wrap\n num_shards (int): number of shards to split the iterable into\n shard_id (int): which shard to iterator over\n fill_value (Any, optional): padding value when the iterable doesn't\n evenly divide *num_shards* (default: None).\n\n Attributes:\n n (int): number of elements consumed from this iterator\n \"\"\"\n\n def __init__(self, iterable, num_shards, shard_id, fill_value=None):\n if shard_id < 0 or shard_id >= num_shards:\n raise ValueError('shard_id must be between 0 and num_shards')\n sharded_len = int(math.ceil(len(iterable) / float(num_shards)))\n\n batch_size = len(list(iterable)[0])\n last = max( list(map(max, *list(iterable))))\n\n # This function receives a list [1,2,3,...., last] where each number represents one of the input subsequences\n # In the unmodified fairseq, if you have 4 GPUS, fairseq will give the first GPU subsequences [1,5,9,13,...],\n # the second GPU will get [2,6,10,14,..], the third GPU will get [3,7,11,15] and so on...\n # If we want to do caching, we can't use that. We need each GPU to get a continuous list of input subsequences (like [1,2,3,4,5,...]).\n # So what the following code does, is it splits the input into *continuous* chunks of subsequences. For example, if we have\n # 4 GPUs and 100,000 input subsequences, the first GPU will get [1,2,3,...,25000], the second GPU will get [25001,25002,25003,...],\n # and so on.\n # The above description was written with the assumption that batch_size is 1. This function also works when batch_size is greater than 1.\n\n iterable = range(0, last)\n all_itrs = []\n for i in range(shard_id*batch_size, (shard_id+1)*batch_size):\n itr = list(itertools.islice(iterable, i * sharded_len,\n (i +1 )* sharded_len ))\n\n\n all_itrs.append(itr)\n\n itr = [x for x in itertools.chain(*itertools.zip_longest(*all_itrs)) if x is not None]\n itr = [itr[i:i+batch_size] for i in range(0, len(itr), batch_size)] #split to batches\n\n\n if len(itr) != sharded_len: #this makes sure that we don't miss any input subsequences\n to_add = sharded_len - len(itr)\n to_add = [[e] for e in range(sharded_len-to_add, sharded_len)]\n itr = itr + to_add\n\n\n\n super().__init__(\n itr,\n start=int(math.ceil(getattr(iterable, 'n', 0) / float(num_shards))),\n total=sharded_len,\n )\n\n\nclass BackgroundConsumer(Thread):\n def __init__(self, queue, source, max_len):\n Thread.__init__(self)\n\n self._queue = queue\n self._source = source\n self._max_len = max_len\n self.count = 0\n\n def run(self):\n try:\n for item in self._source:\n self._queue.put(item)\n\n # Stop if we reached the maximum length\n self.count += 1\n if self._max_len is not None and self.count >= self._max_len:\n break\n\n # Signal the consumer we are done.\n self._queue.put(_sentinel)\n except Exception as e:\n self._queue.put(e)\n\n\nclass BufferedIterator(object):\n def __init__(self, size, iterable):\n self._queue = queue.Queue(size)\n self._iterable = iterable\n self._consumer = None\n\n self.start_time = time.time()\n self.warning_time = None\n\n self.total = len(iterable)\n\n def _create_consumer(self):\n self._consumer = BackgroundConsumer(\n self._queue,\n self._iterable,\n self.total,\n )\n self._consumer.daemon = True\n self._consumer.start()\n\n def __iter__(self):\n return self\n\n def __len__(self):\n return self.total\n\n def take(self, n):\n self.total = min(self.total, n)\n\n # Propagate this change to the underlying iterator\n if hasattr(self._iterable, \"take\"):\n self._iterable.take(n)\n else:\n self._iterable = itertools.islice(self._iterable, n)\n\n def __next__(self):\n # Create consumer if not created yet\n if self._consumer is None:\n self._create_consumer()\n\n # Notify the user if there is a data loading bottleneck\n if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)):\n if time.time() - self.start_time > 5 * 60:\n if self.warning_time is None or time.time() - self.warning_time > 15 * 60:\n logger.debug(\n \"Data loading buffer is empty or nearly empty. This may \"\n \"indicate a data loading bottleneck, and increasing the \"\n \"number of workers (--num-workers) may help.\"\n )\n self.warning_time = time.time()\n\n # Get next example\n item = self._queue.get(True)\n if isinstance(item, Exception):\n raise item\n if item is _sentinel:\n raise StopIteration()\n return item\n" ]
[ [ "torch.utils.data.DataLoader", "numpy.random.shuffle" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
berkott/SpaceInvadersAI
[ "0d1d095f60b06f09b337bd3abf7bb46a08a8ed70" ]
[ "NeuroEvolution/evolution.py" ]
[ "import gym\nimport keras as k\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\nfrom keras.optimizers import Adam\nimport numpy as np\nfrom datetime import datetime\nfrom matplotlib import pyplot as PLT\nimport time\nimport csv\nimport os\n\n# You can adjust these hyperparameters\nPOPULATION_SIZE = 50\nL1=20\nL2=10\nL3=50\nL4=4\n# L1=2\n# L2=3\n# L3=4\n# L4=5\nPOOLING_SIZE = (2,2)\nFILTER_SIZE_1 = (3,3)\nFILTER_SIZE_2 = (5,5)\nELITE_SET_SIZE = 5\nMUTATION_RATE = 0.5\n\nFRAME_SIZE = 210*160*1\nINPUT_DIM = 2*FRAME_SIZE\nINPUT_SHAPE = (210, 160, 2)\nFINAL_DIMENSION_X = int(((INPUT_SHAPE[0] - 2*int(FILTER_SIZE_1[0]/2))/2 - 2*int(FILTER_SIZE_2[0]/2))/2)\nFINAL_DIMENSION_Y = int(((INPUT_SHAPE[1] - 2*int(FILTER_SIZE_1[0]/2))/2 - 2*int(FILTER_SIZE_2[0]/2))/2)\n\n\nenv = gym.make('SpaceInvaders-v0')\nkeepTraining = True\nslack_logs = np.zeros((6,1))\n\ndef visualize(featureVector):\n regularImage = featureVector[0,:FRAME_SIZE].reshape((210,160))\n differenceImage = featureVector[0,FRAME_SIZE:].reshape((210,160))\n PLT.imshow(regularImage)\n PLT.show()\n PLT.imshow(differenceImage)\n PLT.show()\n\ndef writeCsv(index, data):\n slack_logs[index] = data\n\n # For slack_logs:\n # [0] Generation\n # [1] Highest Score\n # [2] Current Score\n # [3] Games Played\n # [4] Start Time\n # [5] All Time High Score\n\n with open(\"logs.csv\", \"w\", newline='') as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n writer.writerows(slack_logs)\n\ndef calculatePolicySize():\n # INPUT_DIM * L1+L1+L1 * L2+L2+L2 * L3+L3+L3 * L4+L4\n # FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * INPUT_SHAPE[2] * L1 + L1 + \n # FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * L1 * L2 + L2 + \n # final_dimension_x*final_dimension_y*L2*L3 + L3 + \n # L3*L4\n return FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * INPUT_SHAPE[2] * L1 + L1 + FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * L1 * L2 + L2 + FINAL_DIMENSION_X*FINAL_DIMENSION_Y*L2*L3 + L3 + L3 * L4 + L4\n\n# This function is called each time a new memeber of the population is created\ndef initPopulation():\n population = np.random.rand(POPULATION_SIZE, calculatePolicySize())\n population = population*2-1\n return population\n\ndef convert_prediction_to_action(prediction):\n index = np.argmax(prediction[0])\n # NOOP\n if (index == 0):\n return 0\n # FIRE\n elif (index == 1):\n return 1\n # RIGHT\n elif (index == 2):\n return 3\n # LEFT\n elif (index == 3):\n return 4\n return 0\n\ndef playGame(model):\n score=0\n done=False\n action=0\n frame = np.zeros((1,FRAME_SIZE))\n previous_frame = np.zeros((1,FRAME_SIZE))\n env.reset()\n observation_dim = list(INPUT_SHAPE)\n observation_dim.insert(0,1)\n observation_dim = tuple(observation_dim)\n while not done:\n env.render()\n observation, reward, done, _ = env.step(action)\n frame = np.reshape(observation[:,:,0],(1,FRAME_SIZE))\n frame = np.where(frame > 0, 1.0,0)\n difference = frame-previous_frame\n final_observation=np.zeros((1,INPUT_DIM))\n final_observation[0,:FRAME_SIZE]=frame\n final_observation[0,FRAME_SIZE:]=difference\n final_observation = np.reshape(final_observation, observation_dim)\n prediction = model.predict(final_observation)\n action = convert_prediction_to_action(prediction)\n score+=reward\n\n writeCsv(2, score)\n\n previous_frame = np.copy(frame)\n\n # print(\"Score:\",score)\n return score\n\n# This is where the weights are put into the neural net to see how well it goes\ndef evaluate(dnnmodel, population, gamesPlayed):\n scores=np.zeros(POPULATION_SIZE)\n for i in range(POPULATION_SIZE):\n nnFormatPolicyVector = applyPolicyVectorToNN(population[i])\n dnnmodel.set_weights(nnFormatPolicyVector)\n scores[i] = playGame(dnnmodel)\n gamesPlayed+=1\n writeCsv(3, gamesPlayed)\n return scores\n\n\n# Constructs the model that is to be used\ndef buildModel():\n model = Sequential()\n # layer1=Dense(L1, activation = 'relu', input_dim = INPUT_DIM, kernel_initializer='uniform')\n layer1=Conv2D(L1, FILTER_SIZE_1, activation='relu', input_shape = INPUT_SHAPE, kernel_initializer='uniform')\n model.add(layer1)\n model.add(MaxPooling2D(pool_size=POOLING_SIZE))\n \n layer2=Conv2D(L2, FILTER_SIZE_2, activation='relu', kernel_initializer='uniform')\n model.add(layer2)\n model.add(MaxPooling2D(pool_size=POOLING_SIZE))\n\n # model.add(Dropout(0.25))\n model.add(Flatten())\n\n layer3=Dense(L3, activation = 'relu', kernel_initializer='uniform')\n model.add(layer3)\n\n layer4=Dense(L4, activation ='softmax', kernel_initializer='uniform')\n model.add(layer4)\n\n adam = Adam(lr=0.01)\n model.compile(loss='mean_squared_error', optimizer=adam)\n weights=model.get_weights()\n print(len(weights))\n print(\"====================================\")\n return model\n\ndef applyPolicyVectorToNN(policyVector):\n # INPUT_DIM * L1+L1+L1 * L2+L2+L2 * L3+L3+L3 * L4+L4\n # FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * INPUT_SHAPE[2] * L1 + L1 + \n # FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * L1 * L2 + L2 + \n # final_dimension_x*final_dimension_y*L2*L3 + L3 + \n # L3*L4\n\n offset=FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * INPUT_SHAPE[2] * L1\n sec1 = policyVector[:offset].reshape(FILTER_SIZE_1[0], FILTER_SIZE_1[1], INPUT_SHAPE[2], L1)\n sec2 = policyVector[offset:offset+L1]\n offset+=L1\n sec3 = policyVector[offset:offset+FILTER_SIZE_2[0] * FILTER_SIZE_2[1] * L1 * L2].reshape(FILTER_SIZE_2[0], FILTER_SIZE_2[1], L1, L2)\n offset+=FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * L1 * L2\n sec4 = policyVector[offset:offset+L2]\n offset+=L2\n sec5 = policyVector[offset:offset+FINAL_DIMENSION_X*FINAL_DIMENSION_Y*L2*L3].reshape(FINAL_DIMENSION_X*FINAL_DIMENSION_Y*L2, L3)\n offset+=FINAL_DIMENSION_X*FINAL_DIMENSION_Y*L2*L3\n sec6 = policyVector[offset:offset+L3]\n offset+=L3\n sec7 = policyVector[offset:offset+L3*L4].reshape(L3, L4)\n offset+=L3*L4\n sec8 = policyVector[offset:]\n\n nnFormat = []\n nnFormat.append(sec1)\n nnFormat.append(sec2)\n nnFormat.append(sec3)\n nnFormat.append(sec4)\n nnFormat.append(sec5)\n nnFormat.append(sec6)\n nnFormat.append(sec7)\n nnFormat.append(sec8)\n return nnFormat\n\n# This is where the members of the population are ranked\ndef selection(scores, population):\n eliteSet = np.zeros((ELITE_SET_SIZE,calculatePolicySize()))\n scoresTemp=np.copy(scores)\n for i in range(ELITE_SET_SIZE):\n index = np.argmax(scoresTemp)\n scoresTemp[index] = 0\n eliteSet[i] = population[index]\n return eliteSet\n\ndef cross(policy1, policy2):\n newPolicy = policy1.copy()\n mask = np.random.randint(2, size=newPolicy.shape).astype(np.bool)\n newPolicy[mask] = policy2[mask]\n # for i in range(calculatePolicySize()):\n # rand = np.random.uniform()\n # if rand > 0.5:\n # newPolicy[i] = policy2[i]\n return newPolicy\n\n# This is where crossover occurs based on the selection process\ndef crossover(scores, population):\n crossoverSet = np.zeros((POPULATION_SIZE,calculatePolicySize()))\n selectionProbability = np.array(scores)/np.sum(scores)\n for i in range(POPULATION_SIZE - ELITE_SET_SIZE):\n randomIndex = np.random.choice(range(POPULATION_SIZE), p=selectionProbability)\n policy1 = population[randomIndex]\n randomIndex = np.random.choice(range(POPULATION_SIZE), p=selectionProbability)\n policy2 = population[randomIndex]\n newPolicy = cross(policy1, policy2)\n crossoverSet[i]=newPolicy\n return crossoverSet\n\n# Lastly, the mutation is a point mutation that sometimes occurs\ndef mutation(crossoverPopulation):\n i = int((POPULATION_SIZE - ELITE_SET_SIZE) * np.random.random_sample())\n j = int(calculatePolicySize() * np.random.random_sample())\n\n for _ in range(int(i*j*MUTATION_RATE)):\n crossoverPopulation[i][j] = np.random.random_sample() * 2 - 1\n # for i in range(POPULATION_SIZE - ELITE_SET_SIZE):\n # for j in range(calculatePolicySize()):\n # rand = np.random.uniform()\n # if(rand < MUTATION_RATE):\n # crossoverPopulation[i][j] = np.random.random_sample() * 2 - 1\n return crossoverPopulation\n\ndef generateNewGeneration(scores, population):\n elitePopulation = selection(scores, population)\n crossoverPopulation = crossover(scores, population)\n mutationPopulation = mutation(crossoverPopulation)\n \n for i in range(ELITE_SET_SIZE):\n mutationPopulation[POPULATION_SIZE-ELITE_SET_SIZE+i] = elitePopulation[i] \n\n return mutationPopulation\n\ndef saveHighestScorePolicy(population, generation, scores):\n if (generation % 10 == 0):\n index = np.argmax(scores)\n filename='generation'+str(generation)+'HS'+str(scores[index])+'.npy'\n np.save(os.path.join('SavedScores', filename) ,population[index])\n print(\"Saved generation to file \"+filename)\n\ndef loadPolicy(filename, population, index):\n policy=np.load(filename)\n print(\"Loaded\\n\",policy)\n population[index]=policy\n\ndef measureTime():\n global lasttime\n currentTime=time.time()\n diff=currentTime-lasttime\n lasttime=currentTime\n return diff\n\n# test_selection()\n# quit()\n\nenv.reset()\npopulation = initPopulation()\n# loadPolicy('generation0.npy',population,0)\ndnnmodel = buildModel()\ngeneration = 0\nlasttime = time.time()\nall_time_high_score = 0\n\nwriteCsv(4, time.time())\n\nwhile (keepTraining):\n scores = evaluate(dnnmodel, population, generation*POPULATION_SIZE)\n print(int(measureTime()),\" sec Generation: \", generation, \" Highest Score: \", np.max(scores), \" Games Played: \", generation*POPULATION_SIZE+POPULATION_SIZE)\n\n writeCsv(0, generation)\n writeCsv(1, np.max(scores))\n if (np.max(scores) > all_time_high_score):\n all_time_high_score = np.max(scores)\n writeCsv(5, all_time_high_score)\n\n saveHighestScorePolicy(population, generation, scores)\n population = generateNewGeneration(scores, population)\n print(int(measureTime()),\" sec New generation created.\")\n generation+=1\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.sum", "numpy.reshape", "numpy.random.random_sample", "numpy.max", "numpy.copy", "numpy.argmax", "numpy.load", "numpy.array", "matplotlib.pyplot.show", "numpy.zeros", "numpy.where", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BeibinLi/SSD
[ "2cd30f02c21b0a8731a34dca2a89d6e099ca3442" ]
[ "ssd/modeling/backbone/vgg.py" ]
[ "import torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ssd.layers import L2Norm\nfrom ssd.modeling import registry\nfrom ssd.utils.model_zoo import load_state_dict_from_url\n\nmodel_urls = {\n 'vgg': 'https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth',\n}\n\n\n# borrowed from https://github.com/amdegroot/ssd.pytorch/blob/master/ssd.py\ndef add_vgg(cfg, batch_norm=False):\n layers = []\n in_channels = 3\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n elif v == 'C':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)\n conv7 = nn.Conv2d(1024, 1024, kernel_size=1)\n layers += [pool5, conv6,\n nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]\n return layers\n\n\ndef add_extras(cfg, i, size=300):\n # Extra layers added to VGG for feature scaling\n layers = []\n in_channels = i\n flag = False\n for k, v in enumerate(cfg):\n if in_channels != 'S':\n if v == 'S':\n layers += [nn.Conv2d(in_channels, cfg[k + 1], kernel_size=(1, 3)[flag], stride=2, padding=1)]\n else:\n layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]\n flag = not flag\n in_channels = v\n if size == 512:\n layers.append(nn.Conv2d(in_channels, 128, kernel_size=1, stride=1))\n layers.append(nn.Conv2d(128, 256, kernel_size=4, stride=1, padding=1))\n return layers\n\n\ndef add_header(vgg, extra_layers, boxes_per_location, num_classes):\n regression_headers = []\n classification_headers = []\n vgg_source = [21, -2]\n for k, v in enumerate(vgg_source):\n regression_headers += [nn.Conv2d(vgg[v].out_channels,\n boxes_per_location[k] * 4, kernel_size=3, padding=1)]\n classification_headers += [nn.Conv2d(vgg[v].out_channels,\n boxes_per_location[k] * num_classes, kernel_size=3, padding=1)]\n for k, v in enumerate(extra_layers[1::2], 2):\n regression_headers += [nn.Conv2d(v.out_channels, boxes_per_location[k]\n * 4, kernel_size=3, padding=1)]\n classification_headers += [nn.Conv2d(v.out_channels, boxes_per_location[k]\n * num_classes, kernel_size=3, padding=1)]\n return regression_headers, classification_headers\n\n\nvgg_base = {\n '300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',\n 512, 512, 512],\n '512': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',\n 512, 512, 512],\n}\nextras_base = {\n '300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],\n '512': [256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256],\n}\n\n\nclass VGG(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n size = cfg.INPUT.IMAGE_SIZE\n vgg_config = vgg_base[str(size)]\n extras_config = extras_base[str(size)]\n\n self.vgg = nn.ModuleList(add_vgg(vgg_config))\n self.extras = nn.ModuleList(add_extras(extras_config, i=1024, size=size))\n self.l2_norm = L2Norm(512, scale=20)\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.extras.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.xavier_uniform_(m.weight)\n nn.init.zeros_(m.bias)\n\n def init_from_pretrain(self, state_dict):\n self.vgg.load_state_dict(state_dict)\n\n def forward(self, x):\n features = []\n for i in range(23):\n x = self.vgg[i](x)\n s = self.l2_norm(x) # Conv4_3 L2 normalization\n features.append(s)\n\n # apply vgg up to fc7\n for i in range(23, len(self.vgg)):\n x = self.vgg[i](x)\n features.append(x)\n\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n features.append(x)\n\n return tuple(features)\n\n\[email protected]('vgg')\ndef vgg(cfg, pretrained=True):\n model = VGG(cfg)\n if pretrained:\n model.init_from_pretrain(load_state_dict_from_url(model_urls['vgg']))\n return model\n" ]
[ [ "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.init.xavier_uniform_", "torch.nn.BatchNorm2d", "torch.nn.init.zeros_", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fedarko/songbird
[ "44827596bc9ca16d8046aeafee24ee1dd74dcc0b" ]
[ "songbird/util.py" ]
[ "import os\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nfrom sklearn.utils import check_random_state\nfrom skbio.stats.composition import clr_inv as softmax\nfrom biom import Table\nfrom patsy import dmatrix\n\n\ndef random_multinomial_model(num_samples, num_features,\n reps=1,\n low=2, high=10,\n beta_mean=0,\n beta_scale=5,\n mu=1,\n sigma=1,\n seed=0):\n \"\"\" Generates a table using a random poisson regression model.\n\n Here we will be simulating microbial counts given the model, and the\n corresponding model priors.\n\n Parameters\n ----------\n num_samples : int\n Number of samples\n num_features : int\n Number of features\n tree : np.array\n Tree specifying orthonormal contrast matrix.\n low : float\n Smallest gradient value.\n high : float\n Largest gradient value.\n beta_mean : float\n Mean of beta prior (for regression coefficients)\n beta_scale : float\n Scale of beta prior (for regression coefficients)\n mu : float\n Mean sequencing depth (in log units)\n sigma : float\n Variance for sequencing depth\n\n Returns\n -------\n table : biom.Table\n Biom representation of the count table.\n metadata : pd.DataFrame\n DataFrame containing relevant metadata.\n beta : np.array\n Regression parameter estimates.\n \"\"\"\n N = num_samples\n\n # generate all of the coefficient using the random poisson model\n state = check_random_state(seed)\n beta = state.normal(beta_mean, beta_scale, size=(2, num_features-1))\n\n X = np.hstack([np.linspace(low, high, num_samples // reps)]\n for _ in range(reps))\n X = np.vstack((np.ones(N), X)).T\n phi = np.hstack((np.zeros((N, 1)), X @ beta))\n probs = softmax(phi)\n n = [mu] * N\n\n table = np.vstack(\n state.multinomial(n[i], probs[i, :])\n for i in range(N)\n ).T\n\n samp_ids = pd.Index(['S%d' % i for i in range(num_samples)],\n name='sampleid')\n feat_ids = ['F%d' % i for i in range(num_features)]\n balance_ids = ['L%d' % i for i in range(num_features-1)]\n\n table = Table(table, feat_ids, samp_ids)\n metadata = pd.DataFrame(X, columns=['Ones', 'X'], index=samp_ids)\n beta = pd.DataFrame(beta.T, columns=['Intercept', 'beta'],\n index=balance_ids)\n\n return table, metadata, beta\n\n\ndef _type_cast_to_float(df):\n \"\"\" Attempt to cast all of the values in dataframe to float.\n\n This will try to type cast all of the series within the\n dataframe into floats. If a column cannot be type casted,\n it will be kept as is.\n\n Parameters\n ----------\n df : pd.DataFrame\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n # TODO: Will need to improve this, as this is a very hacky solution.\n for c in df.columns:\n s = df[c]\n try:\n df[c] = s.astype(np.float64)\n except Exception:\n continue\n return df\n\n\ndef read_metadata(filepath):\n \"\"\" Reads in a sample metadata file\n\n Parameters\n ----------\n filepath: str\n The file path location of the sample metadata file\n\n Returns\n -------\n pd.DataFrame :\n The metadata table with inferred types.\n \"\"\"\n metadata = pd.read_table(\n filepath, dtype=object)\n cols = metadata.columns\n metadata = metadata.set_index(cols[0])\n metadata = _type_cast_to_float(metadata.copy())\n\n return metadata\n\n\ndef match_and_filter(table, metadata, formula,\n min_sample_count, min_feature_count):\n \"\"\" Matches and aligns biom and metadata tables.\n\n This will also return the patsy representation.\n\n Parameters\n ----------\n table : biom.Table\n Table of abundances\n metadata : pd.DataFrame\n Sample metadata\n\n Returns\n -------\n table : biom.Table\n Filtered biom table\n metadata : pd.DataFrame\n Sample metadata\n \"\"\"\n # match them\n\n def sample_filter(val, id_, md):\n return id_ in metadata.index and np.sum(val) > min_sample_count\n\n def read_filter(val, id_, md):\n return np.sum(val > 0) > min_feature_count\n\n table = table.filter(sample_filter, axis='sample', inplace=False)\n table = table.filter(read_filter, axis='observation', inplace=False)\n\n metadata = metadata.loc[table.ids(axis='sample')]\n metadata = metadata.loc[~metadata.index.duplicated(keep='first')]\n\n def sort_f(xs):\n return [xs[metadata.index.get_loc(x)] for x in xs]\n\n table = table.sort(sort_f=sort_f, axis='sample')\n design = dmatrix(formula, metadata, return_type='dataframe')\n design = design.dropna()\n\n def design_filter(val, id_, md):\n return id_ in design.index\n\n table = table.filter(design_filter, axis='sample')\n return table, metadata, design\n\n\ndef split_training(dense_table, metadata, design, training_column=None,\n num_random_test_examples=10, seed=None):\n\n if training_column is None:\n np.random.seed(seed)\n idx = np.random.random(design.shape[0])\n i = np.argsort(idx)[num_random_test_examples]\n\n threshold = idx[i]\n train_idx = ~(idx < threshold)\n else:\n train_idx = metadata.loc[design.index, training_column] == \"Train\"\n\n trainX = design.loc[train_idx].values\n testX = design.loc[~train_idx].values\n\n trainY = dense_table.loc[train_idx].values\n testY = dense_table.loc[~train_idx].values\n\n return trainX, testX, trainY, testY\n\n\ndef silence_output():\n # suppress profiling messages & compilation warnings\n # taken from:\n # https://stackoverflow.com/questions/47068709/your-cpu-supports-\n # instructions-that-this-tensorflow-binary-was-not-compiled-to-u\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n # suppress deprecation warnings\n # taken from https://github.com/tensorflow/tensorflow/issues/27023\n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n" ]
[ [ "numpy.random.random", "numpy.sum", "numpy.random.seed", "numpy.linspace", "pandas.DataFrame", "tensorflow.compat.v1.logging.set_verbosity", "numpy.ones", "pandas.read_table", "numpy.argsort", "numpy.zeros", "sklearn.utils.check_random_state" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
lsst-sitcom/spot_motion_monitor
[ "3d0242276198126240667ba13e95b7bdf901d053", "3d0242276198126240667ba13e95b7bdf901d053" ]
[ "tests/models/test_full_frame_model.py", "spot_motion_monitor/views/centroid_1d_plot_widget.py" ]
[ "# This file is part of spot_motion_monitor.\n#\n# Developed for LSST System Integration, Test and Commissioning.\n#\n# See the LICENSE file at the top-level directory of this distribution\n# for details of code ownership.\n#\n# Use of this source code is governed by a 3-clause BSD-style\n# license that can be found in the LICENSE file.\n\nimport numpy as np\nimport pytest\n\nfrom spot_motion_monitor.camera.gaussian_camera import GaussianCamera\nfrom spot_motion_monitor.models import FullFrameModel\nfrom spot_motion_monitor.utils import FrameRejected, TimeHandler\n\nclass TestFullFrameModel():\n\n def setup_class(cls):\n cls.model = FullFrameModel()\n cls.model.timeHandler = TimeHandler()\n\n def checkFrame(self, flux, maxAdc, comX, comY):\n return flux > 4000 and maxAdc > 130 and comX > 0 and comY > 0\n\n def test_parametersAfterConstruction(self):\n assert self.model.sigmaScale == 5.0\n assert self.model.minimumNumPixels == 10\n assert self.model.timeHandler is not None\n\n def test_frameCalculations(self):\n # This test requires the generation of a CCD frame which will be\n # provided by the GaussianCamera\n camera = GaussianCamera()\n camera.seed = 1000\n camera.startup()\n frame = camera.getFullFrame()\n info = self.model.calculateCentroid(frame)\n assert info.centerX == 288.47687644439395\n assert info.centerY == 224.45394404821826\n assert info.flux == 3235.9182163661176\n assert info.maxAdc == 135.83703259361937\n assert info.fwhm == 5.749039360993981\n assert info.stdNoObjects is None\n\n def test_badFrameCalculation(self):\n frame = np.ones((480, 640))\n with pytest.raises(FrameRejected):\n self.model.calculateCentroid(frame)\n\n def test_failedFrameCheck(self):\n # This test requires the generation of a CCD frame which will be\n # provided by the GaussianCamera\n self.model.frameCheck = self.checkFrame\n camera = GaussianCamera()\n camera.seed = 1000\n camera.startup()\n frame = camera.getFullFrame()\n with pytest.raises(FrameRejected):\n self.model.calculateCentroid(frame)\n self.model.frameCheck = None\n", "# This file is part of spot_motion_monitor.\n#\n# Developed for LSST System Integration, Test and Commissioning.\n#\n# See the LICENSE file at the top-level directory of this distribution\n# for details of code ownership.\n#\n# Use of this source code is governed by a 3-clause BSD-style\n# license that can be found in the LICENSE file.\n\nimport numpy as np\nfrom pyqtgraph import GraphicsLayoutWidget\n\nfrom ..utils import AutoscaleState, noneToDefaultOrValue\n\n__all__ = ['Centroid1dPlotWidget']\n\nclass Centroid1dPlotWidget(GraphicsLayoutWidget):\n\n \"\"\"This class handles managing the centroid plots for both x and y\n coordinates.\n\n Attributes\n ----------\n autoscale : `utils.AutoscaleState`\n State of plot auto scaling.\n axis : str\n Component axis (X or Y).\n curve : pyqtgraph.PlotDataItem\n Instance of the line in the plot.\n data : numpy.array\n Container for the centroid data.\n dataCounter : int\n Number of times data array has been appended to up until array size.\n dataSize : int\n The requested size of the data array.\n numAccumFrames : int\n The number of frames to accumulate before calculating y range.\n pixelRangeAddition : int\n The value to subtract and add to the mean of the accumulated data.\n plot : pyqtgraph.PlotItem\n Instance of the graphics plot.\n roiFps : float\n The camera ROI FPS.\n rollArray : bool\n Flag as to when to start rolling the data array of centroid values.\n timeRange : numpy.array\n The values for the accumulation time range.\n yRange : list\n The bounds for the y axis of the plot when disabling auto range.\n \"\"\"\n\n def __init__(self, parent=None):\n \"\"\"Initialize the class.\n\n Parameters\n ----------\n parent : None, optional\n Top-level widget.\n \"\"\"\n super().__init__(parent)\n self.plot = None\n self.curve = None\n self.dataSize = None\n self.data = None\n self.timeRange = None\n self.rollArray = False\n self.dataCounter = 0\n self.roiFps = None\n self.autoscale = AutoscaleState.PARTIAL\n self.yRange = None\n self.pixelRangeAddition = 10\n self.numAccumFrames = 15\n self.axis = None\n\n def clearPlot(self):\n \"\"\"Reset all data and clear the plot.\n \"\"\"\n self.rollArray = False\n self.dataCounter = 0\n self.data = np.zeros(self.dataSize)\n self.curve.clear()\n self.plot.enableAutoRange()\n self.yRange = None\n self.plot.setRange(yRange=(-0.5, 0.5))\n\n def getConfiguration(self):\n \"\"\"Get the current plot configuration.\n\n Returns\n -------\n bool, tuple, int\n The set of current configuration parameters.\n \"\"\"\n if self.yRange is not None:\n yRange = [self.yRange[0], self.yRange[1]]\n else:\n yRange = [None, None]\n return self.autoscale, yRange, self.pixelRangeAddition\n\n def setConfiguration(self, config):\n \"\"\"Set the new parameters into the widget.\n\n Parameters\n ----------\n config : `config.CentroidPlotConfig`\n The new parameters to apply.\n \"\"\"\n self.autoscale = getattr(config, f'autoscale{self.axis}')\n if self.autoscale == AutoscaleState.ON:\n self.plot.enableAutoRange()\n self.yRange = None\n elif self.autoscale == AutoscaleState.PARTIAL:\n self.yRange = None\n self.pixelRangeAddition = getattr(config, f'pixelRangeAddition{self.axis}')\n else:\n minimum = noneToDefaultOrValue(getattr(config, f'minimum{self.axis}'), default=0)\n maximum = noneToDefaultOrValue(getattr(config, f'maximum{self.axis}'), default=1000)\n self.yRange = [minimum, maximum]\n self.plot.setRange(yRange=self.yRange)\n self.plot.disableAutoRange()\n\n def setup(self, arraySize, axisLabel, roiFps):\n \"\"\"Provide information for setting up the plot.\n\n Parameters\n ----------\n arraySize : int\n The size for the plot data array.\n axisLabel : str\n The label for the axis.\n roiFps : float\n The camera ROI FPS.\n \"\"\"\n self.axis = axisLabel\n self.dataSize = arraySize\n self.data = np.zeros(self.dataSize)\n self.roiFps = roiFps\n self.timeRange = np.arange(self.dataSize) / self.roiFps\n self.plot = self.addPlot()\n self.curve = self.plot.plot(self.timeRange, self.data)\n self.plot.setLabel('bottom', 'Time', units='s')\n self.plot.setLabel('left', axisLabel, units='pixel')\n\n def setArraySize(self, arraySize):\n \"\"\"Update the stored array size and adjust arrays.\n\n Parameters\n ----------\n arraySize : int\n The new array size to use.\n \"\"\"\n self.dataSize = arraySize\n self.data = np.zeros(self.dataSize)\n self.timeRange = np.arange(self.dataSize) / self.roiFps\n self.curve.setData(self.timeRange, self.data)\n self.rollArray = False\n\n def setRoiFps(self, roiFps):\n \"\"\"Update the stored ROI FPS and adjust arrays.\n\n Parameters\n ----------\n roiFps : int\n The new ROI FPS.\n \"\"\"\n self.roiFps = roiFps\n self.timeRange = np.arange(self.dataSize) / self.roiFps\n self.curve.setData(self.timeRange, self.data)\n\n def updatePlot(self, centroid):\n \"\"\"Update the plot with a new centroid.\n\n Parameters\n ----------\n centroid : float\n The current centroid value to plot.\n \"\"\"\n if self.rollArray:\n self.data[:-1] = self.data[1:]\n self.data[-1] = centroid\n else:\n self.data[self.dataCounter] = centroid\n\n if self.dataCounter < self.dataSize:\n self.dataCounter += 1\n if self.dataCounter == self.dataSize:\n self.rollArray = True\n\n if self.autoscale == AutoscaleState.PARTIAL:\n if self.dataCounter == self.numAccumFrames and self.yRange is None:\n cmean = int(np.mean(self.data[0:self.numAccumFrames]))\n self.yRange = [cmean - self.pixelRangeAddition, cmean + self.pixelRangeAddition]\n self.plot.setRange(yRange=self.yRange)\n self.plot.disableAutoRange()\n\n self.curve.setData(self.timeRange, self.data)\n" ]
[ [ "numpy.ones" ], [ "numpy.arange", "numpy.zeros", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jmendozais/SDSSDepth
[ "7a4d0c5affef3eda7056876ccb2365ac883c08eb" ]
[ "loss/general_adaptive_loss.py" ]
[ "import sys\nimport math\nimport os\n\nimport torch\nimport torchvision\nimport numpy as np\n\nfrom pkg_resources import resource_stream\n\ndef interpolate1d(x, values, tangents):\n '''\n Returns:\n Returns the interpolated or extrapolated values for each query point,\n depending on whether or not the query lies within the span of the spline.\n '''\n assert torch.is_tensor(x)\n assert torch.is_tensor(values)\n assert torch.is_tensor(tangents)\n float_dtype = x.dtype\n assert values.dtype == float_dtype\n assert tangents.dtype == float_dtype\n assert len(values.shape) == 1\n assert len(tangents.shape) == 1\n assert values.shape[0] == tangents.shape[0]\n\n x_lo = torch.floor(torch.clamp(x, torch.as_tensor(0),\n values.shape[0] - 2)).type(torch.int64)\n x_hi = x_lo + 1\n\n # Compute the relative distance between each `x` and the knot below it.\n t = x - x_lo.type(float_dtype)\n\n # Compute the cubic hermite expansion of `t`.\n t_sq = t**2\n t_cu = t * t_sq\n h01 = -2. * t_cu + 3. * t_sq\n h00 = 1. - h01\n h11 = t_cu - t_sq\n h10 = h11 - t_sq + t\n\n # Linearly extrapolate above and below the extents of the spline for all\n # values.\n value_before = tangents[0] * t + values[0]\n value_after = tangents[-1] * (t - 1.) + values[-1]\n\n # Cubically interpolate between the knots below and above each query point.\n neighbor_values_lo = values[x_lo]\n neighbor_values_hi = values[x_hi]\n neighbor_tangents_lo = tangents[x_lo]\n neighbor_tangents_hi = tangents[x_hi]\n value_mid = (\n neighbor_values_lo * h00 + neighbor_values_hi * h01 +\n neighbor_tangents_lo * h10 + neighbor_tangents_hi * h11)\n\n return torch.where(t < 0., value_before,\n torch.where(t > 1., value_after, value_mid))\n\n\ndef log_safe(x):\n x = torch.as_tensor(x)\n return torch.log(torch.min(x, torch.tensor(33e37).to(x)))\n\n\ndef load_spline_params():\n dirname = os.path.dirname(__file__)\n with open(os.path.join(dirname, '../misc/partition_spline.npz'), \"rb\") as spline_file:\n with np.load(spline_file, allow_pickle=False) as f:\n spline_x_scale = torch.tensor(f['x_scale'])\n spline_values = torch.tensor(f['values'])\n spline_tangents = torch.tensor(f['tangents'])\n\n return spline_x_scale, spline_values, spline_tangents\n\n\ndef get_partition_init(shape):\n shape = torch.as_tensor(shape)\n\n base1 = (2.25 * shape - 4.5) / (torch.abs(shape - 2) + 0.25) + shape + 2\n base2 = 5. / 18. * log_safe(4 * shape - 15) + 8\n\n return torch.where(shape < 4, base1, base2)\n\n\ndef get_partition(shape):\n shape = torch.as_tensor(shape)\n assert (shape >= 0).all()\n\n init = get_partition_init(shape)\n\n x_scale, values, tangents = load_spline_params()\n\n return interpolate1d(init * x_scale.to(init), values.to(init), tangents.to(init))\n\n\ndef general_adaptive_loss(x, shape, bowl=1.):\n input_shape = x.shape\n shape = torch.as_tensor(shape).to(x.device)\n bowl = torch.as_tensor(bowl).to(x.device)\n\n b = x.size(0)\n x = x.view(b, -1)\n\n if len(shape.shape) == 0:\n shape = shape.unsqueeze(dim=0).expand([b, ]).unsqueeze(dim=1)\n else:\n shape = shape.view(b, -1)\n\n if len(bowl.shape) == 0:\n bowl = bowl.unsqueeze(dim=0).expand([b, ]).unsqueeze(dim=1)\n else:\n bowl = bowl.view(b, -1)\n\n partition = get_partition(shape)\n ans = (torch.abs(shape - 2)/shape) * (torch.pow((torch.square(x/bowl) /\n torch.abs(shape - 2) + 1), shape/2) - 1) + log_safe(bowl) + log_safe(partition)\n\n return ans.view(input_shape)\n" ]
[ [ "torch.abs", "torch.is_tensor", "torch.tensor", "torch.square", "torch.where", "numpy.load", "torch.as_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dzungcamlang/noise_adversarial_tacotron
[ "7a7fda49eb8bf82f5139743d55639d48ff204e9e" ]
[ "dataset/cut_chime.py" ]
[ "import hp\nfrom pathlib import Path\nimport numpy as np\nfrom tqdm import tqdm\nimport librosa\nimport torch\nimport librosa.filters\nimport numpy as np\nimport scipy\nfrom random import randint\nfrom os import makedirs\n\n\ndef load_wav(path, sample_rate):\n return librosa.core.load(path, sr=sample_rate)[0]\n\n\ndef save_wav(wav, path, sample_rate):\n wav *= 32767 / max(0.01, np.max(np.abs(wav)))\n scipy.io.wavfile.write(path, sample_rate, wav.astype(np.int16))\n\n\ndef get_segments(source, length, count):\n begins = []\n l = len(source)\n for _ in range(count):\n begins.append(randint(0, l - length - 1))\n segments = []\n for begin in begins:\n segments.append(source[begin: begin + length])\n return segments\n\n\ndef process_chime(\n source=hp.whole_chime_path,\n target=hp.part_chime_path,\n sr=16000,\n duration=30,\n count=10\n):\n \"\"\"\n Randomly picking segments from CHiME dataset, since full dataset is not necessary in our case.\n :param source:\n :param target:\n :param sr:\n :param duration:\n :param count:\n :return:\n \"\"\"\n makedirs(str(target), exist_ok=True)\n for path in tqdm(source.glob(\"*.wav\")):\n wave = load_wav(path, sr)\n if len(wave) < sr * 30: continue\n waves = get_segments(wave, duration * sr, count)\n for i, wave in enumerate(waves, 1):\n save_wav(wave, str(target / f\"{path.stem}_{i}.wav\"), sr)\n\n\nif __name__ == '__main__':\n print(\"Beginning segmenting CHiME4 noises.\")\n process_chime()\n print(\"Processing Finished\")\n" ]
[ [ "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JaeyoonSSim/Design-Project
[ "8a0037bec50b44b3f5d92da5254e79964fdaf9cf" ]
[ "Detector_1/fusion_detecting.py" ]
[ "import cv2\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nimport time\r\n\r\n# Initialize the parameters\r\nconfThreshold = 0.5 # Confidence threshold\r\nnmsThreshold = 0.4 # Non-maximum suppression threshold\r\ninpWidth = 416 # Width of network's input image\r\ninpHeight = 416 # Height of network's input image\r\nstarting_time = 0\r\nframe_id = 0\r\nfont = cv2.FONT_HERSHEY_PLAIN\r\n\r\n# Load names of classes\r\nclassesFile = \"coco.names\"\r\nclasses = None\r\nwith open(classesFile, 'rt') as f:\r\n classes = f.read().rstrip('\\n').split('\\n')\r\n\r\n# Give the configuration and weight files for the model and load the network using them.\r\nmodelConfiguration = \"yolov3.cfg\"\r\nmodelWeights = \"yolov3.weights\"\r\nnet = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)\r\nnet.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\r\nnet.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\r\n\r\ninputFile = \"presen_T.mp4\"\r\ninputFile2 = \"presen_R.mp4\"\r\noutputFile = \"yolo_out_py.avi\"\r\n\r\n# Open the video file\r\nif not os.path.isfile(inputFile):\r\n print(\"Input video file \", inputFile, \" doesn't exist\")\r\n sys.exit(1)\r\ncap = cv2.VideoCapture(inputFile)\r\ncap2 = cv2.VideoCapture(inputFile2)\r\noutputFile = inputFile[:-4] + \"_yolo_out_py.avi\"\r\n\r\n# Get the video writer initialized to save the output video\r\nvid_writer = cv2.VideoWriter(outputFile, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30,\r\n (round(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))\r\n\r\n# Get the names of the output layers\r\ndef getOutputsNames(net):\r\n # Get the names of all the layers in the network\r\n layersNames = net.getLayerNames()\r\n # Get the names of the output layers, i.e. the layers with unconnected outputs\r\n return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]\r\n\r\n# Draw the predicted bounding box\r\ndef drawPred(classId, conf, left, top, right, bottom):\r\n # Draw a bounding box.\r\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0))\r\n label = '%.2f' % conf\r\n\r\n # Get the label for the class name and its confidence\r\n if classes:\r\n assert (classId < len(classes))\r\n label = '%s:%s' % (classes[classId], label)\r\n\r\n # Display the label at the top of the bounding box\r\n labelSize, baseLine = cv2.getTextSize(label, font, 0.5, 1)\r\n top = max(top, labelSize[1])\r\n cv2.putText(frame, label, (left, top), font, 1, (0, 255, 0), 2)\r\n\r\n# Remove the bounding boxes with low confidence using non-maxima suppression\r\ndef postprocess(frame, outs):\r\n frameHeight = frame.shape[0]\r\n frameWidth = frame.shape[1]\r\n\r\n # Scan through all the bounding boxes output from the network and keep only the\r\n # ones with high confidence scores. Assign the box's class label as the class with the highest score.\r\n classIds = []\r\n confidences = []\r\n boxes = []\r\n for out in outs:\r\n for detection in out:\r\n scores = detection[5:]\r\n classId = np.argmax(scores)\r\n confidence = scores[classId]\r\n if confidence > confThreshold:\r\n center_x = int(detection[0] * frameWidth)\r\n center_y = int(detection[1] * frameHeight)\r\n width = int(detection[2] * frameWidth)\r\n height = int(detection[3] * frameHeight)\r\n left = int(center_x - width / 2)\r\n top = int(center_y - height / 2)\r\n classIds.append(classId)\r\n confidences.append(float(confidence))\r\n boxes.append([left, top, width, height])\r\n\r\n # Perform non maximum suppression to eliminate redundant overlapping boxes with\r\n # lower confidences.\r\n indices = cv2.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)\r\n for i in indices:\r\n i = i[0]\r\n box = boxes[i]\r\n left = box[0]\r\n top = box[1]\r\n width = box[2]\r\n height = box[3]\r\n drawPred(classIds[i], confidences[i], left, top, left + width, top + height)\r\n\r\n# Main\r\nwhile True:\r\n # get frame from the video\r\n hasFrame, frame = cap.read()\r\n hasFrame2, frame2 = cap2.read()\r\n\r\n frame = cv2.resize(frame, dsize=(600, 402))\r\n frame2 = cv2.resize(frame2, dsize=(600, 402))\r\n\r\n cv2.imshow(\"Camera\", frame)\r\n cv2.imshow(\"Thermal_Camera\", frame2)\r\n # Stop the program if reached end of video\r\n if not hasFrame:\r\n print(\"Done processing !!!\")\r\n cv2.waitKey(3000)\r\n break\r\n\r\n # Create a 4D blob from a frame.\r\n blob = cv2.dnn.blobFromImage(frame, 1 / 255, (inpWidth, inpHeight), [0, 0, 0], 1, crop=False)\r\n\r\n # Sets the input to the network\r\n net.setInput(blob)\r\n\r\n # Runs the forward pass to get output of the output layers\r\n outs = net.forward(getOutputsNames(net))\r\n\r\n # Remove the bounding boxes with low confidence\r\n postprocess(frame, outs)\r\n\r\n # Print the FPS\r\n current_time = time.time()\r\n sec = current_time - starting_time\r\n starting_time = current_time\r\n fps = 1 / (sec)\r\n str2 = \"FPS : %0.1f\" % fps\r\n # cv2.putText(frame, str2, (10, 50), font, 2, (0, 255, 0), 2)\r\n\r\n # Write the frame with the detection boxes\r\n vid_writer.write(frame.astype(np.uint8))\r\n\r\n # CAMERA RESULT\r\n cv2.imshow(\"CAMERA_Detection\", frame)\r\n\r\n\r\n img2 = None\r\n fast = cv2.FastFeatureDetector_create(30)\r\n fast.setNonmaxSuppression(0)\r\n kp = fast.detect(frame2, None)\r\n img2 = cv2.drawKeypoints(frame2, kp, img2, (0, 255, 255))\r\n # cv2.imshow(\"THERMAL\", img2)\r\n\r\n\r\n hsv = cv2.cvtColor(frame2, cv2.COLOR_BGR2HSV)\r\n car_prediction = 30\r\n lower_white = np.array([0, 0, 255 - car_prediction], dtype=np.uint8)\r\n upper_white = np.array([255, car_prediction, 255], dtype=np.uint8)\r\n mask_white = cv2.inRange(hsv, lower_white, upper_white)\r\n res = cv2.bitwise_and(frame2, frame2, mask=mask_white)\r\n # cv2.imshow(\"THERMAL_CAR\", res)\r\n\r\n\r\n res2 = None\r\n res2 = res\r\n igray = cv2.cvtColor(res2, cv2.COLOR_BGR2GRAY)\r\n iret, ibinary = cv2.threshold(igray, 127, 255, cv2.THRESH_BINARY)\r\n contours, hierachy = cv2.findContours(ibinary, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\r\n for i in range(len(contours)):\r\n cv2.drawContours(res2, [contours[i]], 0, (255, 255, 255), 2)\r\n cv2.putText(res2, \"car\", tuple(contours[i][0][0]), font, 1, (0, 255, 0), 1)\r\n # cv2.imshow(\"THERMAL_CONTOUR\", res2)\r\n\r\n\r\n # THERMAL PROCESSING RESULT\r\n dst = cv2.addWeighted(res2, 1, frame2, 1, 0)\r\n #cv2.imshow('THERMAL_RES',dst)\r\n #cv2.imshow(\"THERMAL\",frame2)\r\n\r\n # FINAL RESULT\r\n dst2 = cv2.addWeighted(res2, 1, frame, 1, 0)\r\n cv2.imshow(\"RESULT\",dst2)\r\n\r\n\r\n # End the video with \"Esc\"\r\n key = cv2.waitKey(1)\r\n if key == 27:\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()" ]
[ [ "numpy.array", "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
grisoniFr/virtual_libraries
[ "0aac0ce249f6f3bc529abb3cbdf2d3f49be84388" ]
[ "experiments/do_data_generation.py" ]
[ "import os, sys\nimport time\nimport warnings\nimport argparse\nimport configparser\nimport ast\nimport numpy as np\nfrom math import log\nfrom rdkit import Chem\nfrom rdkit import rdBase\nrdBase.DisableLog('rdApp.*')\nfrom rdkit.Chem import Draw\n\nfrom keras.models import load_model\n\nsys.path.append('../src/')\nfrom python import helper as hp\nfrom python import fixed_parameters as FP\n\nparser = argparse.ArgumentParser(description='SMILES generation')\nparser.add_argument('-fn','--filename', type=str, help='Path to the fine-tuning txt file', required=True)\nparser.add_argument('-m','--model_path', type=str, help='Path to a pretrained model', required=True)\nparser.add_argument('-v','--verbose', type=bool, help='Verbose', required=True)\n\n\ndef int_to_smile(array, indices_token, pad_char):\n \"\"\" \n From an array of int, return a list of \n molecules in string smile format\n Note: remove the padding char\n \"\"\"\n all_mols = []\n for seq in array:\n new_mol = [indices_token[str(int(x))] for x in seq]\n all_mols.append(''.join(new_mol).replace(pad_char, ''))\n return all_mols\n\n\ndef one_hot_encode(token_lists, n_chars):\n \n output = np.zeros((len(token_lists), len(token_lists[0]), n_chars))\n for i, token_list in enumerate(token_lists):\n for j, token in enumerate(token_list):\n output[i, j, int(token)] = 1\n return output\n \ndef sample(model, temp, start_char, end_char, max_len, indices_token, token_indices):\n \n n_chars = len(indices_token)\n\n seed_token = [token_indices[start_char]]\n generated = indices_token[str(seed_token[0])]\n \n while generated[-1] != end_char and len(generated) < max_len:\n x_seed = one_hot_encode([seed_token], n_chars)\n full_preds = model.predict(x_seed, verbose=0)[0]\n logits = full_preds[-1]\n \n probas, next_char_ind = get_token_proba(logits, temp)\n \n next_char = indices_token[str(next_char_ind)]\n generated += next_char\n seed_token += [next_char_ind]\n \n return generated\n\ndef get_token_proba(preds, temp):\n \n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temp\n exp_preds = np.exp(preds)\n \n probas = exp_preds / np.sum(exp_preds)\n char_ind = np.argmax(np.random.multinomial(1, probas, 1))\n \n return probas, char_ind\n\ndef softmax(preds):\n return np.exp(preds)/np.sum(np.exp(preds))\n\n\nif __name__ == '__main__':\n \n start = time.time()\n \n ####################################\n # get back parameters\n args = vars(parser.parse_args())\n \n verbose = args['verbose']\n filename = args['filename']\n model_path = args['model_path']\n name_data = filename.split('/')[-1].replace('.txt','')\n config = configparser.ConfigParser()\n config.read('parameters.ini')\n \n if verbose: print('\\nSTART SAMPLING')\n ####################################\n \n \n \n ####################################\n # path to save data\n save_path = f'results/{name_data}/generated_data/'\n os.makedirs(save_path, exist_ok=True)\n \n # path to checkpoints\n dir_ckpts = f'results/{name_data}/models/'\n ####################################\n \n \n \n \n ####################################\n # Parameters to sample novo smiles\n temp = float(config['EXPERIMENTS']['temp'])\n n_sample = int(config['EXPERIMENTS']['n_sample'])\n if n_sample>5000:\n warnings.warn('You will sample more than 5000 SMILES; this will take a while')\n \n max_len = int(config['PROCESSING']['max_len'])\n pad_char = FP.PROCESSING_FIXED['pad_char']\n start_char = FP.PROCESSING_FIXED['start_char']\n end_char = FP.PROCESSING_FIXED['end_char']\n indices_token = FP.INDICES_TOKEN\n token_indices = FP.TOKEN_INDICES\n ####################################\n \n \n \n ####################################\n # start the sampling of new SMILES\n epoch = model_path.split('/')[-1].replace('.h5', '')\n if verbose: print(f'Sampling from model saved at epoch {epoch}')\n \n model = load_model(model_path)\n \n generated_smi = []\n for n in range(n_sample):\n generated_smi.append(sample(model, temp, \n start_char, end_char, max_len+1, \n indices_token, token_indices))\n hp.save_obj(generated_smi, f'{save_path}{epoch}_{temp}')\n \n end = time.time()\n if verbose: print(f'SAMPLING DONE for model from epoch {epoch} in {end-start:.2f} seconds') \n ####################################\n " ]
[ [ "numpy.log", "numpy.asarray", "numpy.random.multinomial", "numpy.exp", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JosmarSuarez/yolact
[ "43b694603638562ffcdc81df7b04783c9990291c" ]
[ "yolact.py" ]
[ "import torch, torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision.models.resnet import Bottleneck\nimport numpy as np\nfrom itertools import product\nfrom math import sqrt\nfrom typing import List\nfrom collections import defaultdict\n\nfrom data.config import cfg, mask_type\nfrom layers import Detect\nfrom layers.interpolate import InterpolateModule\nfrom backbone import construct_backbone\n\nimport torch.backends.cudnn as cudnn\nfrom utils import timer\nfrom utils.functions import MovingAverage, make_net\n\n# This is required for Pytorch 1.0.1 on Windows to initialize Cuda on some driver versions.\n# See the bug report here: https://github.com/pytorch/pytorch/issues/17108\ntorch.cuda.current_device()\n\n# As of March 10, 2019, Pytorch DataParallel still doesn't support JIT Script Modules\nuse_jit = torch.cuda.device_count() <= 1\nif not use_jit:\n print('Multiple GPUs detected! Turning off JIT.')\n\nScriptModuleWrapper = torch.jit.ScriptModule if use_jit else nn.Module\nscript_method_wrapper = torch.jit.script_method if use_jit else lambda fn, _rcn=None: fn\n\n\n\nclass Concat(nn.Module):\n def __init__(self, nets, extra_params):\n super().__init__()\n\n self.nets = nn.ModuleList(nets)\n self.extra_params = extra_params\n \n def forward(self, x):\n # Concat each along the channel dimension\n return torch.cat([net(x) for net in self.nets], dim=1, **self.extra_params)\n\nprior_cache = defaultdict(lambda: None)\n\nclass PredictionModule(nn.Module):\n \"\"\"\n The (c) prediction module adapted from DSSD:\n https://arxiv.org/pdf/1701.06659.pdf\n\n Note that this is slightly different to the module in the paper\n because the Bottleneck block actually has a 3x3 convolution in\n the middle instead of a 1x1 convolution. Though, I really can't\n be arsed to implement it myself, and, who knows, this might be\n better.\n\n Args:\n - in_channels: The input feature size.\n - out_channels: The output feature size (must be a multiple of 4).\n - aspect_ratios: A list of lists of priorbox aspect ratios (one list per scale).\n - scales: A list of priorbox scales relative to this layer's convsize.\n For instance: If this layer has convouts of size 30x30 for\n an image of size 600x600, the 'default' (scale\n of 1) for this layer would produce bounding\n boxes with an area of 20x20px. If the scale is\n .5 on the other hand, this layer would consider\n bounding boxes with area 10x10px, etc.\n - parent: If parent is a PredictionModule, this module will use all the layers\n from parent instead of from this module.\n \"\"\"\n \n def __init__(self, in_channels, out_channels=1024, aspect_ratios=[[1]], scales=[1], parent=None, index=0):\n super().__init__()\n\n self.num_classes = cfg.num_classes\n self.mask_dim = cfg.mask_dim # Defined by Yolact\n self.num_priors = sum(len(x)*len(scales) for x in aspect_ratios)\n self.parent = [parent] # Don't include this in the state dict\n self.index = index\n self.num_heads = cfg.num_heads # Defined by Yolact\n\n if cfg.mask_proto_split_prototypes_by_head and cfg.mask_type == mask_type.lincomb:\n self.mask_dim = self.mask_dim // self.num_heads\n\n if cfg.mask_proto_prototypes_as_features:\n in_channels += self.mask_dim\n \n if parent is None:\n if cfg.extra_head_net is None:\n out_channels = in_channels\n else:\n self.upfeature, out_channels = make_net(in_channels, cfg.extra_head_net)\n\n if cfg.use_prediction_module:\n self.block = Bottleneck(out_channels, out_channels // 4)\n self.conv = nn.Conv2d(out_channels, out_channels, kernel_size=1, bias=True)\n self.bn = nn.BatchNorm2d(out_channels)\n\n self.bbox_layer = nn.Conv2d(out_channels, self.num_priors * 4, **cfg.head_layer_params)\n self.conf_layer = nn.Conv2d(out_channels, self.num_priors * self.num_classes, **cfg.head_layer_params)\n self.mask_layer = nn.Conv2d(out_channels, self.num_priors * self.mask_dim, **cfg.head_layer_params)\n \n if cfg.use_mask_scoring:\n self.score_layer = nn.Conv2d(out_channels, self.num_priors, **cfg.head_layer_params)\n\n if cfg.use_instance_coeff:\n self.inst_layer = nn.Conv2d(out_channels, self.num_priors * cfg.num_instance_coeffs, **cfg.head_layer_params)\n \n # What is this ugly lambda doing in the middle of all this clean prediction module code?\n def make_extra(num_layers):\n if num_layers == 0:\n return lambda x: x\n else:\n # Looks more complicated than it is. This just creates an array of num_layers alternating conv-relu\n return nn.Sequential(*sum([[\n nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),\n nn.ReLU(inplace=True)\n ] for _ in range(num_layers)], []))\n\n self.bbox_extra, self.conf_extra, self.mask_extra = [make_extra(x) for x in cfg.extra_layers]\n \n if cfg.mask_type == mask_type.lincomb and cfg.mask_proto_coeff_gate:\n self.gate_layer = nn.Conv2d(out_channels, self.num_priors * self.mask_dim, kernel_size=3, padding=1)\n\n self.aspect_ratios = aspect_ratios\n self.scales = scales\n\n self.priors = None\n self.last_conv_size = None\n self.last_img_size = None\n\n def forward(self, x):\n \"\"\"\n Args:\n - x: The convOut from a layer in the backbone network\n Size: [batch_size, in_channels, conv_h, conv_w])\n\n Returns a tuple (bbox_coords, class_confs, mask_output, prior_boxes) with sizes\n - bbox_coords: [batch_size, conv_h*conv_w*num_priors, 4]\n - class_confs: [batch_size, conv_h*conv_w*num_priors, num_classes]\n - mask_output: [batch_size, conv_h*conv_w*num_priors, mask_dim]\n - prior_boxes: [conv_h*conv_w*num_priors, 4]\n \"\"\"\n # In case we want to use another module's layers\n src = self if self.parent[0] is None else self.parent[0]\n \n conv_h = x.size(2)\n conv_w = x.size(3)\n \n if cfg.extra_head_net is not None:\n x = src.upfeature(x)\n \n if cfg.use_prediction_module:\n # The two branches of PM design (c)\n a = src.block(x)\n \n b = src.conv(x)\n b = src.bn(b)\n b = F.relu(b)\n \n # TODO: Possibly switch this out for a product\n x = a + b\n\n bbox_x = src.bbox_extra(x)\n conf_x = src.conf_extra(x)\n mask_x = src.mask_extra(x)\n\n bbox = src.bbox_layer(bbox_x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, 4)\n conf = src.conf_layer(conf_x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, self.num_classes)\n \n if cfg.eval_mask_branch:\n mask = src.mask_layer(mask_x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, self.mask_dim)\n else:\n mask = torch.zeros(x.size(0), bbox.size(1), self.mask_dim, device=bbox.device)\n\n if cfg.use_mask_scoring:\n score = src.score_layer(x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, 1)\n\n if cfg.use_instance_coeff:\n inst = src.inst_layer(x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, cfg.num_instance_coeffs) \n\n # See box_utils.decode for an explanation of this\n if cfg.use_yolo_regressors:\n bbox[:, :, :2] = torch.sigmoid(bbox[:, :, :2]) - 0.5\n bbox[:, :, 0] /= conv_w\n bbox[:, :, 1] /= conv_h\n\n if cfg.eval_mask_branch:\n if cfg.mask_type == mask_type.direct:\n mask = torch.sigmoid(mask)\n elif cfg.mask_type == mask_type.lincomb:\n mask = cfg.mask_proto_coeff_activation(mask)\n\n if cfg.mask_proto_coeff_gate:\n gate = src.gate_layer(x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, self.mask_dim)\n mask = mask * torch.sigmoid(gate)\n\n if cfg.mask_proto_split_prototypes_by_head and cfg.mask_type == mask_type.lincomb:\n mask = F.pad(mask, (self.index * self.mask_dim, (self.num_heads - self.index - 1) * self.mask_dim), mode='constant', value=0)\n \n priors = self.make_priors(conv_h, conv_w, x.device)\n\n preds = { 'loc': bbox, 'conf': conf, 'mask': mask, 'priors': priors }\n\n if cfg.use_mask_scoring:\n preds['score'] = score\n\n if cfg.use_instance_coeff:\n preds['inst'] = inst\n \n return preds\n\n def make_priors(self, conv_h, conv_w, device):\n \"\"\" Note that priors are [x,y,width,height] where (x,y) is the center of the box. \"\"\"\n global prior_cache\n size = (conv_h, conv_w)\n\n with timer.env('makepriors'):\n if self.last_img_size != (cfg._tmp_img_w, cfg._tmp_img_h):\n prior_data = []\n\n # Iteration order is important (it has to sync up with the convout)\n for j, i in product(range(conv_h), range(conv_w)):\n # +0.5 because priors are in center-size notation\n x = (i + 0.5) / conv_w\n y = (j + 0.5) / conv_h\n \n for ars in self.aspect_ratios:\n for scale in self.scales:\n for ar in ars:\n if not cfg.backbone.preapply_sqrt:\n ar = sqrt(ar)\n\n if cfg.backbone.use_pixel_scales:\n w = scale * ar / cfg.max_size\n h = scale / ar / cfg.max_size\n else:\n w = scale * ar / conv_w\n h = scale / ar / conv_h\n \n # This is for backward compatability with a bug where I made everything square by accident\n if cfg.backbone.use_square_anchors:\n h = w\n\n prior_data += [x, y, w, h]\n\n self.priors = torch.Tensor(prior_data, device=device).view(-1, 4).detach()\n self.priors.requires_grad = False\n self.last_img_size = (cfg._tmp_img_w, cfg._tmp_img_h)\n self.last_conv_size = (conv_w, conv_h)\n prior_cache[size] = None\n elif self.priors.device != device:\n # This whole weird situation is so that DataParalell doesn't copy the priors each iteration\n if prior_cache[size] is None:\n prior_cache[size] = {}\n \n if device not in prior_cache[size]:\n prior_cache[size][device] = self.priors.to(device)\n\n self.priors = prior_cache[size][device]\n \n return self.priors\n\nclass FPN(ScriptModuleWrapper):\n \"\"\"\n Implements a general version of the FPN introduced in\n https://arxiv.org/pdf/1612.03144.pdf\n\n Parameters (in cfg.fpn):\n - num_features (int): The number of output features in the fpn layers.\n - interpolation_mode (str): The mode to pass to F.interpolate.\n - num_downsample (int): The number of downsampled layers to add onto the selected layers.\n These extra layers are downsampled from the last selected layer.\n\n Args:\n - in_channels (list): For each conv layer you supply in the forward pass,\n how many features will it have?\n \"\"\"\n __constants__ = ['interpolation_mode', 'num_downsample', 'use_conv_downsample', 'relu_pred_layers',\n 'lat_layers', 'pred_layers', 'downsample_layers', 'relu_downsample_layers']\n\n def __init__(self, in_channels):\n super().__init__()\n\n self.lat_layers = nn.ModuleList([\n nn.Conv2d(x, cfg.fpn.num_features, kernel_size=1)\n for x in reversed(in_channels)\n ])\n\n # This is here for backwards compatability\n padding = 1 if cfg.fpn.pad else 0\n self.pred_layers = nn.ModuleList([\n nn.Conv2d(cfg.fpn.num_features, cfg.fpn.num_features, kernel_size=3, padding=padding)\n for _ in in_channels\n ])\n\n if cfg.fpn.use_conv_downsample:\n self.downsample_layers = nn.ModuleList([\n nn.Conv2d(cfg.fpn.num_features, cfg.fpn.num_features, kernel_size=3, padding=1, stride=2)\n for _ in range(cfg.fpn.num_downsample)\n ])\n \n self.interpolation_mode = cfg.fpn.interpolation_mode\n self.num_downsample = cfg.fpn.num_downsample\n self.use_conv_downsample = cfg.fpn.use_conv_downsample\n self.relu_downsample_layers = cfg.fpn.relu_downsample_layers\n self.relu_pred_layers = cfg.fpn.relu_pred_layers\n\n @script_method_wrapper\n def forward(self, convouts:List[torch.Tensor]):\n \"\"\"\n Args:\n - convouts (list): A list of convouts for the corresponding layers in in_channels.\n Returns:\n - A list of FPN convouts in the same order as x with extra downsample layers if requested.\n \"\"\"\n\n out = []\n x = torch.zeros(1, device=convouts[0].device)\n for i in range(len(convouts)):\n out.append(x)\n\n # For backward compatability, the conv layers are stored in reverse but the input and output is\n # given in the correct order. Thus, use j=-i-1 for the input and output and i for the conv layers.\n j = len(convouts)\n for lat_layer in self.lat_layers:\n j -= 1\n\n if j < len(convouts) - 1:\n _, _, h, w = convouts[j].size()\n x = F.interpolate(x, size=(h, w), mode=self.interpolation_mode, align_corners=False)\n \n x = x + lat_layer(convouts[j])\n out[j] = x\n \n # This janky second loop is here because TorchScript.\n j = len(convouts)\n for pred_layer in self.pred_layers:\n j -= 1\n out[j] = pred_layer(out[j])\n\n if self.relu_pred_layers:\n F.relu(out[j], inplace=True)\n\n cur_idx = len(out)\n\n # In the original paper, this takes care of P6\n if self.use_conv_downsample:\n for downsample_layer in self.downsample_layers:\n out.append(downsample_layer(out[-1]))\n else:\n for idx in range(self.num_downsample):\n # Note: this is an untested alternative to out.append(out[-1][:, :, ::2, ::2]). Thanks TorchScript.\n out.append(nn.functional.max_pool2d(out[-1], 1, stride=2))\n\n if self.relu_downsample_layers:\n for idx in range(len(out) - cur_idx):\n out[idx] = F.relu(out[idx + cur_idx], inplace=False)\n\n return out\n\nclass FastMaskIoUNet(ScriptModuleWrapper):\n\n def __init__(self):\n super().__init__()\n input_channels = 1\n last_layer = [(cfg.num_classes-1, 1, {})]\n self.maskiou_net, _ = make_net(input_channels, cfg.maskiou_net + last_layer, include_last_relu=True)\n\n def forward(self, x):\n x = self.maskiou_net(x)\n maskiou_p = F.max_pool2d(x, kernel_size=x.size()[2:]).squeeze(-1).squeeze(-1)\n\n return maskiou_p\n\n\n\nclass Yolact(nn.Module):\n \"\"\"\n\n\n ██╗ ██╗ ██████╗ ██╗ █████╗ ██████╗████████╗\n ╚██╗ ██╔╝██╔═══██╗██║ ██╔══██╗██╔════╝╚══██╔══╝\n ╚████╔╝ ██║ ██║██║ ███████║██║ ██║ \n ╚██╔╝ ██║ ██║██║ ██╔══██║██║ ██║ \n ██║ ╚██████╔╝███████╗██║ ██║╚██████╗ ██║ \n ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ \n\n\n You can set the arguments by changing them in the backbone config object in config.py.\n\n Parameters (in cfg.backbone):\n - selected_layers: The indices of the conv layers to use for prediction.\n - pred_scales: A list with len(selected_layers) containing tuples of scales (see PredictionModule)\n - pred_aspect_ratios: A list of lists of aspect ratios with len(selected_layers) (see PredictionModule)\n \"\"\"\n\n def __init__(self, only_last_layer=False):\n super().__init__()\n \n self.only_last_layer = only_last_layer\n self.backbone = construct_backbone(cfg.backbone)\n\n if cfg.freeze_bn:\n self.freeze_bn()\n\n # Compute mask_dim here and add it back to the config. Make sure Yolact's constructor is called early!\n if cfg.mask_type == mask_type.direct:\n cfg.mask_dim = cfg.mask_size**2\n elif cfg.mask_type == mask_type.lincomb:\n if cfg.mask_proto_use_grid:\n self.grid = torch.Tensor(np.load(cfg.mask_proto_grid_file))\n self.num_grids = self.grid.size(0)\n else:\n self.num_grids = 0\n\n self.proto_src = cfg.mask_proto_src\n \n if self.proto_src is None: in_channels = 3\n elif cfg.fpn is not None: in_channels = cfg.fpn.num_features\n else: in_channels = self.backbone.channels[self.proto_src]\n in_channels += self.num_grids\n\n # The include_last_relu=false here is because we might want to change it to another function\n self.proto_net, cfg.mask_dim = make_net(in_channels, cfg.mask_proto_net, include_last_relu=False)\n\n if cfg.mask_proto_bias:\n cfg.mask_dim += 1\n\n\n self.selected_layers = cfg.backbone.selected_layers\n src_channels = self.backbone.channels\n\n if cfg.use_maskiou:\n self.maskiou_net = FastMaskIoUNet()\n\n if cfg.fpn is not None:\n # Some hacky rewiring to accomodate the FPN\n self.fpn = FPN([src_channels[i] for i in self.selected_layers])\n self.selected_layers = list(range(len(self.selected_layers) + cfg.fpn.num_downsample))\n src_channels = [cfg.fpn.num_features] * len(self.selected_layers)\n\n\n self.prediction_layers = nn.ModuleList()\n cfg.num_heads = len(self.selected_layers)\n\n for idx, layer_idx in enumerate(self.selected_layers):\n # If we're sharing prediction module weights, have every module's parent be the first one\n parent = None\n if cfg.share_prediction_module and idx > 0:\n parent = self.prediction_layers[0]\n\n pred = PredictionModule(src_channels[layer_idx], src_channels[layer_idx],\n aspect_ratios = cfg.backbone.pred_aspect_ratios[idx],\n scales = cfg.backbone.pred_scales[idx],\n parent = parent,\n index = idx)\n self.prediction_layers.append(pred)\n\n # Extra parameters for the extra losses\n if cfg.use_class_existence_loss:\n # This comes from the smallest layer selected\n # Also note that cfg.num_classes includes background\n self.class_existence_fc = nn.Linear(src_channels[-1], cfg.num_classes - 1)\n \n if cfg.use_semantic_segmentation_loss:\n self.semantic_seg_conv = nn.Conv2d(src_channels[0], cfg.num_classes-1, kernel_size=1)\n\n # For use in evaluation\n self.detect = Detect(cfg.num_classes, bkg_label=0, top_k=cfg.nms_top_k,\n conf_thresh=cfg.nms_conf_thresh, nms_thresh=cfg.nms_thresh)\n\n def save_weights(self, path):\n \"\"\" Saves the model's weights using compression because the file sizes were getting too big. \"\"\"\n torch.save(self.state_dict(), path)\n \n def load_weights(self, path):\n \"\"\" Loads weights from a compressed save file. \"\"\"\n state_dict = torch.load(path)\n\n # For backward compatability, remove these (the new variable is called layers)\n for key in list(state_dict.keys()):\n if key.startswith('backbone.layer') and not key.startswith('backbone.layers'):\n del state_dict[key]\n \n # Also for backward compatibility with v1.0 weights, do this check\n if key.startswith('fpn.downsample_layers.'):\n if cfg.fpn is not None and int(key.split('.')[2]) >= cfg.fpn.num_downsample:\n del state_dict[key]\n # Uncomment this in normal conditions\n # self.load_state_dict(state_dict)\n # Added this for fine-tuning. Comment this in normal conditions.\n try:\n self.load_state_dict(state_dict)\n except RuntimeError as e:\n print('Ignoring \"' + str(e) + '\"')\n\n def init_weights(self, backbone_path):\n \"\"\" Initialize weights for training. \"\"\"\n # Initialize the backbone with the pretrained weights.\n self.backbone.init_backbone(backbone_path)\n\n conv_constants = getattr(nn.Conv2d(1, 1, 1), '__constants__')\n \n # Quick lambda to test if one list contains the other\n def all_in(x, y):\n for _x in x:\n if _x not in y:\n return False\n return True\n\n # Initialize the rest of the conv layers with xavier\n for name, module in self.named_modules():\n # See issue #127 for why we need such a complicated condition if the module is a WeakScriptModuleProxy\n # Broke in 1.3 (see issue #175), WeakScriptModuleProxy was turned into just ScriptModule.\n # Broke in 1.4 (see issue #292), where RecursiveScriptModule is the new star of the show.\n # Note that this might break with future pytorch updates, so let me know if it does\n is_script_conv = False\n if 'Script' in type(module).__name__:\n # 1.4 workaround: now there's an original_name member so just use that\n if hasattr(module, 'original_name'):\n is_script_conv = 'Conv' in module.original_name\n # 1.3 workaround: check if this has the same constants as a conv module\n else:\n is_script_conv = (\n all_in(module.__dict__['_constants_set'], conv_constants)\n and all_in(conv_constants, module.__dict__['_constants_set']))\n \n is_conv_layer = isinstance(module, nn.Conv2d) or is_script_conv\n\n if is_conv_layer and module not in self.backbone.backbone_modules:\n nn.init.xavier_uniform_(module.weight.data)\n\n if module.bias is not None:\n if cfg.use_focal_loss and 'conf_layer' in name:\n if not cfg.use_sigmoid_focal_loss:\n # Initialize the last layer as in the focal loss paper.\n # Because we use softmax and not sigmoid, I had to derive an alternate expression\n # on a notecard. Define pi to be the probability of outputting a foreground detection.\n # Then let z = sum(exp(x)) - exp(x_0). Finally let c be the number of foreground classes.\n # Chugging through the math, this gives us\n # x_0 = log(z * (1 - pi) / pi) where 0 is the background class\n # x_i = log(z / c) for all i > 0\n # For simplicity (and because we have a degree of freedom here), set z = 1. Then we have\n # x_0 = log((1 - pi) / pi) note: don't split up the log for numerical stability\n # x_i = -log(c) for all i > 0\n module.bias.data[0] = np.log((1 - cfg.focal_loss_init_pi) / cfg.focal_loss_init_pi)\n module.bias.data[1:] = -np.log(module.bias.size(0) - 1)\n else:\n module.bias.data[0] = -np.log(cfg.focal_loss_init_pi / (1 - cfg.focal_loss_init_pi))\n module.bias.data[1:] = -np.log((1 - cfg.focal_loss_init_pi) / cfg.focal_loss_init_pi)\n else:\n module.bias.data.zero_()\n \n def train(self, mode=True):\n super().train(mode)\n\n if cfg.freeze_bn:\n self.freeze_bn()\n\n def freeze_bn(self, enable=False):\n \"\"\" Adapted from https://discuss.pytorch.org/t/how-to-train-with-frozen-batchnorm/12106/8 \"\"\"\n for module in self.modules():\n if isinstance(module, nn.BatchNorm2d):\n module.train() if enable else module.eval()\n\n module.weight.requires_grad = enable\n module.bias.requires_grad = enable\n \n def forward(self, x):\n \"\"\" The input should be of size [batch_size, 3, img_h, img_w] \"\"\"\n _, _, img_h, img_w = x.size()\n cfg._tmp_img_h = img_h\n cfg._tmp_img_w = img_w\n \n with timer.env('backbone'):\n outs = self.backbone(x)\n\n if cfg.fpn is not None:\n with timer.env('fpn'):\n # Use backbone.selected_layers because we overwrote self.selected_layers\n outs = [outs[i] for i in cfg.backbone.selected_layers]\n outs = self.fpn(outs)\n\n proto_out = None\n if cfg.mask_type == mask_type.lincomb and cfg.eval_mask_branch:\n with timer.env('proto'):\n proto_x = x if self.proto_src is None else outs[self.proto_src]\n \n if self.num_grids > 0:\n grids = self.grid.repeat(proto_x.size(0), 1, 1, 1)\n proto_x = torch.cat([proto_x, grids], dim=1)\n\n proto_out = self.proto_net(proto_x)\n proto_out = cfg.mask_proto_prototype_activation(proto_out)\n\n if cfg.mask_proto_prototypes_as_features:\n # Clone here because we don't want to permute this, though idk if contiguous makes this unnecessary\n proto_downsampled = proto_out.clone()\n\n if cfg.mask_proto_prototypes_as_features_no_grad:\n proto_downsampled = proto_out.detach()\n \n # Move the features last so the multiplication is easy\n proto_out = proto_out.permute(0, 2, 3, 1).contiguous()\n\n if cfg.mask_proto_bias:\n bias_shape = [x for x in proto_out.size()]\n bias_shape[-1] = 1\n proto_out = torch.cat([proto_out, torch.ones(*bias_shape)], -1)\n\n\n with timer.env('pred_heads'):\n pred_outs = { 'loc': [], 'conf': [], 'mask': [], 'priors': [] }\n\n if cfg.use_mask_scoring:\n pred_outs['score'] = []\n\n if cfg.use_instance_coeff:\n pred_outs['inst'] = []\n \n for idx, pred_layer in zip(self.selected_layers, self.prediction_layers):\n pred_x = outs[idx]\n\n if cfg.mask_type == mask_type.lincomb and cfg.mask_proto_prototypes_as_features:\n # Scale the prototypes down to the current prediction layer's size and add it as inputs\n proto_downsampled = F.interpolate(proto_downsampled, size=outs[idx].size()[2:], mode='bilinear', align_corners=False)\n pred_x = torch.cat([pred_x, proto_downsampled], dim=1)\n\n # A hack for the way dataparallel works\n if cfg.share_prediction_module and pred_layer is not self.prediction_layers[0]:\n pred_layer.parent = [self.prediction_layers[0]]\n \n if self.only_last_layer:\n p = pred_layer(pred_x.detach())\n else:\n p = pred_layer(pred_x)\n \n for k, v in p.items():\n pred_outs[k].append(v)\n\n for k, v in pred_outs.items():\n pred_outs[k] = torch.cat(v, -2)\n\n if proto_out is not None:\n pred_outs['proto'] = proto_out\n\n if self.training:\n # For the extra loss functions\n if cfg.use_class_existence_loss:\n pred_outs['classes'] = self.class_existence_fc(outs[-1].mean(dim=(2, 3)))\n\n if cfg.use_semantic_segmentation_loss:\n pred_outs['segm'] = self.semantic_seg_conv(outs[0])\n\n return pred_outs\n else:\n if cfg.use_mask_scoring:\n pred_outs['score'] = torch.sigmoid(pred_outs['score'])\n\n if cfg.use_focal_loss:\n if cfg.use_sigmoid_focal_loss:\n # Note: even though conf[0] exists, this mode doesn't train it so don't use it\n pred_outs['conf'] = torch.sigmoid(pred_outs['conf'])\n if cfg.use_mask_scoring:\n pred_outs['conf'] *= pred_outs['score']\n elif cfg.use_objectness_score:\n # See focal_loss_sigmoid in multibox_loss.py for details\n objectness = torch.sigmoid(pred_outs['conf'][:, :, 0])\n pred_outs['conf'][:, :, 1:] = objectness[:, :, None] * F.softmax(pred_outs['conf'][:, :, 1:], -1)\n pred_outs['conf'][:, :, 0 ] = 1 - objectness\n else:\n pred_outs['conf'] = F.softmax(pred_outs['conf'], -1)\n else:\n\n if cfg.use_objectness_score:\n objectness = torch.sigmoid(pred_outs['conf'][:, :, 0])\n \n pred_outs['conf'][:, :, 1:] = (objectness > 0.10)[..., None] \\\n * F.softmax(pred_outs['conf'][:, :, 1:], dim=-1)\n \n else:\n pred_outs['conf'] = F.softmax(pred_outs['conf'], -1)\n\n return self.detect(pred_outs, self)\n\n\n\n\n# Some testing code\nif __name__ == '__main__':\n from utils.functions import init_console\n init_console()\n\n # Use the first argument to set the config if you want\n import sys\n if len(sys.argv) > 1:\n from data.config import set_cfg\n set_cfg(sys.argv[1])\n\n net = Yolact()\n net.train()\n net.init_weights(backbone_path='weights/' + cfg.backbone.path)\n\n # GPU\n net = net.cuda()\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n\n x = torch.zeros((1, 3, cfg.max_size, cfg.max_size))\n y = net(x)\n\n for p in net.prediction_layers:\n print(p.last_conv_size)\n\n print()\n for k, a in y.items():\n print(k + ': ', a.size(), torch.sum(a))\n exit()\n \n net(x)\n # timer.disable('pass2')\n avg = MovingAverage()\n try:\n while True:\n timer.reset()\n with timer.env('everything else'):\n net(x)\n avg.add(timer.total_time())\n print('\\033[2J') # Moves console cursor to 0,0\n timer.print_stats()\n print('Avg fps: %.2f\\tAvg ms: %.2f ' % (1/avg.get_avg(), avg.get_avg()*1000))\n except KeyboardInterrupt:\n pass\n" ]
[ [ "torch.set_default_tensor_type", "torch.nn.functional.softmax", "torch.zeros", "torch.load", "torch.cat", "torch.sum", "torch.nn.functional.interpolate", "torch.ones", "torch.nn.functional.relu", "numpy.load", "torch.nn.functional.max_pool2d", "torch.nn.functional.pad", "torch.sigmoid", "numpy.log", "torch.cuda.current_device", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.BatchNorm2d", "torch.cuda.device_count", "torch.Tensor", "torch.nn.init.xavier_uniform_", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
metabacchi/FuzzyClassificator
[ "f59c10364b872edce342403db6ef26e30d7f69b8", "c54661f13857d5bcb0095ba2fb12f5a403a4a70f" ]
[ "pybrain/tools/functions.py", "pybrain/rl/environments/twoplayergames/capturegame.py" ]
[ "__author__ = 'Tom Schaul, [email protected]'\n\nfrom scipy import array, exp, tanh, clip, log, dot, sqrt, power, pi, tan, diag, rand, real_if_close\nfrom scipy.linalg import inv, det, svd, logm, expm2\n\n\ndef semilinear(x):\n \"\"\" This function ensures that the values of the array are always positive. It is\n x+1 for x=>0 and exp(x) for x<0. \"\"\"\n try:\n # assume x is a numpy array\n shape = x.shape\n x.flatten()\n x = x.tolist()\n except AttributeError:\n # no, it wasn't: build shape from length of list\n shape = (1, len(x))\n def f(val):\n if val < 0:\n # exponential function for x<0\n return safeExp(val)\n else:\n # linear function for x>=0\n return val + 1.0\n return array(map(f, x)).reshape(shape)\n\n\ndef semilinearPrime(x):\n \"\"\" This function is the first derivative of the semilinear function (above).\n It is needed for the backward pass of the module. \"\"\"\n try:\n # assume x is a numpy array\n shape = x.shape\n x.flatten()\n x = x.tolist()\n except AttributeError:\n # no, it wasn't: build shape from length of list\n shape = (1, len(x))\n def f(val):\n if val < 0:\n # exponential function for x<0\n return safeExp(val)\n else:\n # linear function for x>=0\n return 1.0\n return array(map(f, x)).reshape(shape)\n\n\ndef safeExp(x):\n \"\"\" Bounded range for the exponential function (won't produce inf or NaN). \"\"\"\n return exp(clip(x, -500, 500))\n\n\ndef sigmoid(x):\n \"\"\" Logistic sigmoid function. \"\"\"\n return 1. / (1. + safeExp(-x))\n\n\ndef sigmoidPrime(x):\n \"\"\" Derivative of logistic sigmoid. \"\"\"\n tmp = sigmoid(x)\n return tmp * (1 - tmp)\n\n\ndef tanhPrime(x):\n \"\"\" Derivative of tanh. \"\"\"\n tmp = tanh(x)\n return 1 - tmp * tmp\n\n\ndef ranking(R):\n \"\"\" Produces a linear ranking of the values in R. \"\"\"\n l = sorted(list(enumerate(R)), cmp=lambda a, b: cmp(a[1], b[1]))\n l = sorted(list(enumerate(l)), cmp=lambda a, b: cmp(a[1], b[1]))\n return array(map(lambda kv: kv[0], l))\n\n\ndef expln(x):\n \"\"\" This continuous function ensures that the values of the array are always positive.\n It is ln(x+1)+1 for x >= 0 and exp(x) for x < 0. \"\"\"\n def f(val):\n if val < 0:\n # exponential function for x < 0\n return exp(val)\n else:\n # natural log function for x >= 0\n return log(val + 1.0) + 1\n try:\n result = array(map(f, x))\n except TypeError:\n result = array(f(x))\n\n return result\n\n\ndef explnPrime(x):\n \"\"\" This function is the first derivative of the expln function (above).\n It is needed for the backward pass of the module. \"\"\"\n def f(val):\n if val < 0:\n # exponential function for x<0\n return exp(val)\n else:\n # linear function for x>=0\n return 1.0 / (val + 1.0)\n try:\n result = array(map(f, x))\n except TypeError:\n result = array(f(x))\n\n return result\n\n\ndef multivariateNormalPdf(z, x, sigma):\n \"\"\" The pdf of a multivariate normal distribution (not in scipy).\n The sample z and the mean x should be 1-dim-arrays, and sigma a square 2-dim-array. \"\"\"\n assert len(z.shape) == 1 and len(x.shape) == 1 and len(x) == len(z) and sigma.shape == (len(x), len(z))\n tmp = -0.5 * dot(dot((z - x), inv(sigma)), (z - x))\n res = (1. / power(2.0 * pi, len(z) / 2.)) * (1. / sqrt(det(sigma))) * exp(tmp)\n return res\n\n\ndef simpleMultivariateNormalPdf(z, detFactorSigma):\n \"\"\" Assuming z has been transformed to a mean of zero and an identity matrix of covariances.\n Needs to provide the determinant of the factorized (real) covariance matrix. \"\"\"\n dim = len(z)\n return exp(-0.5 * dot(z, z)) / (power(2.0 * pi, dim / 2.) * detFactorSigma)\n\n\ndef multivariateCauchy(mu, sigma, onlyDiagonal=True):\n \"\"\" Generates a sample according to a given multivariate Cauchy distribution. \"\"\"\n if not onlyDiagonal:\n u, s, d = svd(sigma)\n coeffs = sqrt(s)\n else:\n coeffs = diag(sigma)\n r = rand(len(mu))\n res = coeffs * tan(pi * (r - 0.5))\n if not onlyDiagonal:\n res = dot(d, dot(res, u))\n return res + mu\n\n\ndef approxChiFunction(dim):\n \"\"\" Returns Chi (expectation of the length of a normal random vector)\n approximation according to: Ostermeier 1997. \"\"\"\n dim = float(dim)\n return sqrt(dim) * (1 - 1 / (4 * dim) + 1 / (21 * dim ** 2))\n\n\ndef sqrtm(M):\n \"\"\" Returns the symmetric semi-definite positive square root of a matrix. \"\"\"\n r = real_if_close(expm2(0.5 * logm(M)), 1e-8)\n return (r + r.T) / 2\n\n", "__author__ = 'Tom Schaul, [email protected]'\n\nfrom random import choice\nfrom scipy import zeros\n\nfrom twoplayergame import TwoPlayerGame\n\n\n# TODO: undo operation\n\n\nclass CaptureGame(TwoPlayerGame):\n \"\"\" the capture game is a simplified version of the Go game: the first player to capture a stone wins!\n Pass moves are forbidden.\"\"\"\n # CHECKME: suicide allowed?\n\n BLACK = 1\n WHITE = -1\n EMPTY = 0\n\n startcolor = BLACK\n\n def __init__(self, size, suicideenabled=True):\n \"\"\" the size of the board is generally between 3 and 19. \"\"\"\n self.size = size\n self.suicideenabled = suicideenabled\n self.reset()\n\n def _iterPos(self):\n \"\"\" an iterator over all the positions of the board. \"\"\"\n for i in range(self.size):\n for j in range(self.size):\n yield (i, j)\n\n def reset(self):\n \"\"\" empty the board. \"\"\"\n TwoPlayerGame.reset(self)\n self.movesDone = 0\n self.b = {}\n for p in self._iterPos():\n self.b[p] = self.EMPTY\n # which stone belongs to which group\n self.groups = {}\n # how many liberties does each group have\n self.liberties = {}\n\n @property\n def indim(self):\n return self.size ** 2\n\n @property\n def outdim(self):\n return 2 * self.size ** 2\n\n def getBoardArray(self):\n \"\"\" an array with two boolean values per position, indicating\n 'white stone present' and 'black stone present' respectively. \"\"\"\n a = zeros(self.outdim)\n for i, p in enumerate(self._iterPos()):\n if self.b[p] == self.WHITE:\n a[2 * i] = 1\n elif self.b[p] == self.BLACK:\n a[2 * i + 1] = 1\n return a\n\n def isLegal(self, c, pos):\n if pos not in self.b:\n return False\n elif self.b[pos] != self.EMPTY:\n return False\n elif not self.suicideenabled:\n return not self._suicide(c, pos)\n return True\n\n def doMove(self, c, pos):\n \"\"\" the action is a (color, position) tuple, for the next stone to move.\n returns True if the move was legal. \"\"\"\n self.movesDone += 1\n if pos == 'resign':\n self.winner = -c\n return True\n elif not self.isLegal(c, pos):\n return False\n elif self._suicide(c, pos):\n assert self.suicideenabled\n self.b[pos] = 'y'\n self.winner = -c\n return True\n elif self._capture(c, pos):\n self.winner = c\n self.b[pos] = 'x'\n return True\n else:\n self._setStone(c, pos)\n return True\n\n def getSensors(self):\n \"\"\" just a list of the board position states. \"\"\"\n return map(lambda x: x[1], sorted(self.b.items()))\n\n def __str__(self):\n s = ''\n for i in range(self.size):\n for j in range(self.size):\n val = self.b[(i, j)]\n if val == self.EMPTY: s += ' .'\n elif val == self.BLACK: s += ' X'\n elif val == self.WHITE: s += ' O'\n else: s += ' ' + str(val)\n s += '\\n'\n if self.winner:\n if self.winner == self.BLACK:\n w = 'Black (#)'\n elif self.winner == self.WHITE:\n w = 'White (*)'\n else:\n w = self.winner\n s += 'Winner: ' + w\n s += ' (moves done:' + str(self.movesDone) + ')\\n'\n return s\n\n def _neighbors(self, pos):\n \"\"\" the 4 neighboring positions \"\"\"\n res = []\n if pos[1] < self.size - 1: res.append((pos[0], pos[1] + 1))\n if pos[1] > 0: res.append((pos[0], pos[1] - 1))\n if pos[0] < self.size - 1: res.append((pos[0] + 1, pos[1]))\n if pos[0] > 0: res.append((pos[0] - 1, pos[1]))\n return res\n\n def _setStone(self, c, pos):\n \"\"\" set stone, and update liberties and groups. \"\"\"\n self.b[pos] = c\n merge = False\n self.groups[pos] = self.size * pos[0] + pos[1]\n freen = filter(lambda n: self.b[n] == self.EMPTY, self._neighbors(pos))\n self.liberties[self.groups[pos]] = set(freen)\n for n in self._neighbors(pos):\n if self.b[n] == -c:\n self.liberties[self.groups[n]].difference_update([pos])\n elif self.b[n] == c:\n if merge:\n newg = self.groups[pos]\n oldg = self.groups[n]\n if newg == oldg:\n self.liberties[newg].difference_update([pos])\n else:\n # merging 2 groups\n for p in self.groups.keys():\n if self.groups[p] == oldg:\n self.groups[p] = newg\n self.liberties[newg].update(self.liberties[oldg])\n self.liberties[newg].difference_update([pos])\n del self.liberties[oldg]\n else:\n # connect to this group\n del self.liberties[self.groups[pos]]\n self.groups[pos] = self.groups[n]\n self.liberties[self.groups[n]].update(freen)\n self.liberties[self.groups[n]].difference_update([pos])\n merge = True\n\n def _suicide(self, c, pos):\n \"\"\" would putting a stone here be suicide for c? \"\"\"\n # any free neighbors?\n for n in self._neighbors(pos):\n if self.b[n] == self.EMPTY:\n return False\n\n # any friendly neighbor with extra liberties?\n for n in self._neighbors(pos):\n if self.b[n] == c:\n if len(self.liberties[self.groups[n]]) > 1:\n return False\n\n # capture all surrounding ennemies?\n if self._capture(c, pos):\n return False\n\n return True\n\n def _capture(self, c, pos):\n \"\"\" would putting a stone here lead to a capture? \"\"\"\n for n in self._neighbors(pos):\n if self.b[n] == -c:\n if len(self.liberties[self.groups[n]]) == 1:\n return True\n return False\n\n def getLiberties(self, pos):\n \"\"\" how many liberties does the stone at pos have? \"\"\"\n if self.b[pos] == self.EMPTY:\n return None\n return len(self.liberties[self.groups[pos]])\n\n def getGroupSize(self, pos):\n \"\"\" what size is the worm that this stone is part of? \"\"\"\n if self.b[pos] == self.EMPTY:\n return None\n g = self.groups[pos]\n return len(filter(lambda x: x == g, self.groups.values()))\n\n def getLegals(self, c):\n \"\"\" return all the legal positions for a color \"\"\"\n return filter(lambda p: self.b[p] == self.EMPTY, self._iterPos())\n\n def getAcceptable(self, c):\n \"\"\" return all legal positions for a color that don't commit suicide. \"\"\"\n return filter(lambda p: not self._suicide(c, p), self.getLegals(c))\n\n def getKilling(self, c):\n \"\"\" return all legal positions for a color that immediately kill the opponent. \"\"\"\n return filter(lambda p: self._capture(c, p), self.getAcceptable(c))\n\n def randomBoard(self, nbmoves):\n \"\"\" produce a random, undecided and legal capture-game board, after at most nbmoves.\n :return: the number of moves actually done. \"\"\"\n c = self.BLACK\n self.reset()\n for i in range(nbmoves):\n l = set(self.getAcceptable(c))\n l.difference_update(self.getKilling(c))\n if len(l) == 0:\n return i\n self._setStone(c, choice(list(l)))\n c = -c\n return nbmoves\n\n def giveHandicap(self, h, color=BLACK):\n i = 0\n for pos in self._handicapIterator():\n i += 1\n if i > h:\n return\n if self.isLegal(color, pos):\n self._setStone(color, pos)\n\n def _handicapIterator(self):\n s = self.size\n assert s > 2\n yield (1, 1)\n if s > 3:\n # 4 corners\n yield (s - 2, s - 2)\n yield (1, s - 2)\n yield (s - 2, 1)\n if s > 4:\n for i in range(2, s - 2):\n yield (i, 1)\n yield (i, s - 2)\n yield (1, i)\n yield (s - 2, i)\n\n def playToTheEnd(self, p1, p2):\n \"\"\" alternate playing moves between players until the game is over. \"\"\"\n assert p1.color == -p2.color\n i = 0\n p1.game = self\n p2.game = self\n players = [p1, p2]\n while not self.gameOver():\n p = players[i]\n self.performAction(p.getAction())\n i = (i + 1) % 2\n\n" ]
[ [ "scipy.tan", "scipy.linalg.svd", "scipy.log", "scipy.dot", "scipy.linalg.det", "scipy.clip", "scipy.diag", "scipy.sqrt", "scipy.linalg.logm", "scipy.linalg.inv", "scipy.tanh", "scipy.power", "scipy.exp" ], [ "scipy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RQuispeC/opacus
[ "5c83d59fc169e93667946204f7a6859827a38ace", "5c83d59fc169e93667946204f7a6859827a38ace" ]
[ "opacus/tests/ddp_hook_check.py", "opacus/grad_sample/group_norm.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport os\nimport sys\nimport unittest\n\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nimport torch.optim as optim\nfrom opacus import PrivacyEngine\nfrom opacus.distributed import DifferentiallyPrivateDistributedDataParallel as DPDDP\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\n\nPRIVACY_ALPHAS = [1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64))\n\n\ndef setup_and_get_device(rank, world_size, nonce=0):\n \"\"\"\n Initialize the torch.distributed process group.\n If you run multiple groups in parallel or if you have zombie processes, you can add a nonce to avoid errors.\n \"\"\"\n device = 0\n if sys.platform == \"win32\":\n # Distributed package only covers collective communications with Gloo\n # backend and FileStore on Windows platform. Set init_method parameter\n # in init_process_group to a local file.\n # Example init_method=\"file:///f:/libtmp/some_file\"\n init_method = \"file:///{your local file path}\"\n\n # initialize the process group\n dist.init_process_group(\n \"gloo\", init_method=init_method, rank=rank, world_size=world_size\n )\n device = rank\n elif os.environ.get(\"SLURM_NTASKS\") is not None:\n # Running on a Slurm cluster\n os.environ[\"MASTER_ADDR\"] = \"127.0.0.1\"\n os.environ[\"MASTER_PORT\"] = str(7440 + nonce)\n local_rank = int(os.environ.get(\"SLURM_LOCALID\"))\n dist.init_process_group(backend=\"gloo\", rank=rank, world_size=world_size)\n\n # The device is the local rank (if you have 2 nodes with 8 GPUs each, you will have two \"cuda:0\" devices)\n device = local_rank\n else:\n os.environ[\"MASTER_ADDR\"] = \"localhost\"\n os.environ[\"MASTER_PORT\"] = \"12355\"\n\n os.environ[\"RANK\"] = str(rank)\n os.environ[\"WORLD_SIZE\"] = str(world_size)\n dist.init_process_group(\n init_method=\"env://\",\n backend=\"nccl\",\n )\n\n # Single node experiment\n device = rank\n return device\n\n\ndef cleanup():\n dist.destroy_process_group()\n\n\nclass ToyModel(nn.Module):\n def __init__(self):\n super(ToyModel, self).__init__()\n self.net1 = nn.Linear(10, 10)\n self.relu = nn.ReLU()\n self.net2 = nn.Linear(10, 5)\n\n def forward(self, x):\n return self.net2(self.relu(self.net1(x)))\n\n\ndef demo_basic(rank, world_size, weight, dp, noise_multiplier=0, max_grad_norm=1e8):\n # We don't want the 2 GPUs to work on the same examples/labels in parallel\n torch.manual_seed(rank)\n batch_size = 32\n withdp = \"with\" + (\"out \" if not dp else \"\")\n print(f\"Running basic DDP {withdp} differential privacy example on rank {rank}.\")\n\n device = setup_and_get_device(rank, world_size)\n\n # create model and move it to GPU with id rank\n model = ToyModel().to(device)\n print(f\"Initial weight: {model.net1.weight.data}\")\n\n # Freeze all the parameters except one, to ensure that the noise is the same\n # (the DDP hook does not browse the layers in the same order as the naive implementation)\n model.net1.bias.requires_grad = False\n model.net2.bias.requires_grad = False\n model.net2.weight.requires_grad = False\n\n if dp:\n ddp_model = DPDDP(model)\n engine = PrivacyEngine(\n ddp_model,\n batch_size=batch_size,\n sample_size=10 * batch_size,\n alphas=PRIVACY_ALPHAS,\n noise_multiplier=noise_multiplier,\n max_grad_norm=[max_grad_norm],\n )\n engine.random_number_generator = engine._set_seed(0)\n else:\n ddp_model = DDP(model, device_ids=[device])\n\n loss_fn = nn.MSELoss()\n optimizer = optim.SGD(ddp_model.parameters(), lr=1)\n if dp:\n engine.attach(optimizer)\n\n optimizer.zero_grad()\n labels = torch.randn(batch_size, 5).to(device)\n\n outputs = ddp_model(torch.randn(batch_size, 10).to(device))\n loss_fn(outputs, labels).backward()\n optimizer.step()\n\n weight.copy_(model.net1.weight.data.cpu())\n\n cleanup()\n\n\ndef demo_ddp_hook(rank, world_size, weight, dp, noise_multiplier, max_grad_norm):\n torch.manual_seed(rank)\n batch_size = 32\n withdp = \"with\" + (\"out \" if not dp else \"\")\n print(f\"Running DDP hook {withdp} differential privacy example on rank {rank}.\")\n\n device = setup_and_get_device(rank, world_size, nonce=1)\n\n # create model and move it to GPU with id rank\n model = ToyModel().to(device)\n\n model.net1.bias.requires_grad = False\n model.net2.bias.requires_grad = False\n model.net2.weight.requires_grad = False\n\n ddp_model = DDP(model, device_ids=[device])\n\n if dp:\n engine = PrivacyEngine(\n ddp_model,\n batch_size=batch_size,\n sample_size=10 * batch_size,\n alphas=PRIVACY_ALPHAS,\n noise_multiplier=noise_multiplier,\n max_grad_norm=[max_grad_norm],\n )\n engine.random_number_generator = engine._set_seed(0)\n\n loss_fn = nn.MSELoss()\n optimizer = optim.SGD(ddp_model.parameters(), lr=1)\n if dp:\n engine.attach(optimizer)\n\n optimizer.zero_grad()\n labels = torch.randn(batch_size, 5).to(device)\n\n outputs = ddp_model(torch.randn(batch_size, 10).to(device))\n loss_fn(outputs, labels).backward()\n optimizer.step()\n\n weight.copy_(model.net1.weight.data.cpu())\n\n del ddp_model\n cleanup()\n\n\ndef add_remove_ddp_hooks(\n rank, world_size, remaining_hooks, dp, noise_multiplier=0, max_grad_norm=1e8\n):\n device = setup_and_get_device(rank, world_size, nonce=2)\n\n model = ToyModel().to(device)\n ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=[device])\n\n engine = PrivacyEngine(\n ddp_model,\n batch_size=1,\n sample_size=10,\n alphas=PRIVACY_ALPHAS,\n noise_multiplier=noise_multiplier,\n max_grad_norm=[max_grad_norm],\n )\n\n optimizer = optim.SGD(ddp_model.parameters(), lr=1)\n\n engine.attach(optimizer)\n\n remaining_hooks[\"attached\"] = {\n p: p._backward_hooks for p in engine.module.parameters() if p._backward_hooks\n }\n engine.detach()\n\n remaining_hooks[\"detached\"] = {\n p: p._backward_hooks for p in engine.module.parameters() if p._backward_hooks\n }\n\n cleanup()\n\n\ndef debug(rank, world_size, tensor, dp, noise_multiplier=0, max_grad_norm=1e8):\n local_rank = setup_and_get_device(rank, world_size)\n print(f\"Rank: {rank},World size: {world_size}, local_rank: {local_rank}\")\n tensor = tensor.to(local_rank)\n print(f\"dp: {dp}\")\n print(tensor)\n\n cleanup()\n\n\ndef run_function(local_function, tensor, dp, noise_multiplier=0, max_grad_norm=1e8):\n if os.environ.get(\"SLURM_NTASKS\") is not None:\n world_size = int(os.environ.get(\"SLURM_NTASKS\"))\n rank = int(os.environ.get(\"SLURM_PROCID\"))\n print(f\"Running on a Slurm cluster with {world_size} tasks.\")\n\n local_function(rank, world_size, tensor, dp, noise_multiplier, max_grad_norm)\n else:\n world_size = torch.cuda.device_count()\n print(f\"Spawning multiple processes on a local machine with {world_size} GPUs\")\n\n # The rank will be passed as the first argument\n mp.spawn(\n local_function,\n args=(\n world_size,\n tensor,\n dp,\n noise_multiplier,\n max_grad_norm,\n ),\n nprocs=world_size,\n join=True,\n )\n return world_size\n\n\nclass GradientComputationTest(unittest.TestCase):\n def test_connection(self):\n tensor = torch.zeros(10, 10)\n world_size = run_function(debug, tensor, dp=True)\n\n self.assertTrue(\n world_size >= 2, f\"Need at least 2 gpus but was provided only {world_size}.\"\n )\n\n def test_gradient_noclip_zeronoise(self):\n # Tests that gradient is the same with DP or with DDP\n weight_dp, weight_nodp = torch.zeros(10, 10), torch.zeros(10, 10)\n\n run_function(demo_basic, weight_dp, dp=True)\n run_function(demo_basic, weight_nodp, dp=False)\n\n self.assertTrue(torch.norm(weight_dp - weight_nodp) < 1e-7)\n\n def test_ddp_hook(self):\n # Tests that the DDP hook does the same thing as naive aggregation with per layer clipping\n weight_ddp_naive, weight_ddp_hook = torch.zeros(10, 10), torch.zeros(10, 10)\n\n run_function(\n demo_basic,\n weight_ddp_naive,\n dp=True,\n noise_multiplier=0.1,\n max_grad_norm=1.0,\n )\n\n run_function(\n demo_ddp_hook,\n weight_ddp_hook,\n dp=True,\n noise_multiplier=0.1,\n max_grad_norm=1.0,\n )\n\n self.assertTrue(\n torch.norm(weight_ddp_naive - weight_ddp_hook) < 1e-7,\n f\"DDP naive: {weight_ddp_naive}\\nDDP hook: {weight_ddp_hook}\",\n )\n\n def test_add_remove_ddp_hooks(self):\n\n remaining_hooks = {\n \"attached\": None,\n \"detached\": None,\n }\n\n run_function(\n add_remove_ddp_hooks,\n remaining_hooks,\n dp=True,\n noise_multiplier=0.1,\n max_grad_norm=1.0,\n )\n\n assert remaining_hooks[\"attached\"], \"There are no hooks.\"\n\n assert not remaining_hooks[\n \"detached\"\n ], f\"Some hooks remain after .remove_hooks(): {remaining_hooks}\"\n", "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n\nfrom typing import Dict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .utils import register_grad_sampler\n\n\n@register_grad_sampler(nn.GroupNorm)\ndef compute_group_norm_grad_sample(\n layer: nn.GroupNorm,\n activations: torch.Tensor,\n backprops: torch.Tensor,\n) -> Dict[nn.Parameter, torch.Tensor]:\n \"\"\"\n Computes per sample gradients for GroupNorm\n\n Args:\n layer: Layer\n activations: Activations\n backprops: Backpropagations\n \"\"\"\n gs = F.group_norm(activations, layer.num_groups, eps=layer.eps) * backprops\n ret = {layer.weight: torch.einsum(\"ni...->ni\", gs)}\n if layer.bias is not None:\n ret[layer.bias] = torch.einsum(\"ni...->ni\", backprops)\n return ret\n" ]
[ [ "torch.norm", "torch.distributed.init_process_group", "torch.multiprocessing.spawn", "torch.zeros", "torch.manual_seed", "torch.cuda.device_count", "torch.randn", "torch.nn.Linear", "torch.distributed.destroy_process_group", "torch.nn.ReLU", "torch.nn.MSELoss", "torch.nn.parallel.DistributedDataParallel" ], [ "torch.einsum", "torch.nn.functional.group_norm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
scpepper69/ml
[ "13ad41dd7b22d3fa152cf3665fc4dc7c1c747917" ]
[ "mnist/app/app/mnist.py" ]
[ "from datetime import datetime\nimport cv2\nimport re\nimport base64\nfrom flask import Flask, render_template, request, jsonify\nfrom flask_cors import CORS\nimport numpy as np\n\nfrom io import BytesIO\nfrom PIL import Image, ImageOps\nimport os,sys\nimport requests\nfrom graphpipe import remote\nfrom matplotlib import pylab as plt\n\n\napp = Flask(__name__)\nCORS(app) # To Post by Ajax\n\[email protected]('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n ans,t1,t2,t3 = get_answer(request)\n return jsonify({'ans': ans, 't1': t1, 't2': t2, 't3': t3})\n else:\n return render_template('index.html')\n\ndef result(img):\n img = img.reshape(1, 784)\n img = img.astype(np.float32)\n img = np.multiply(img, 1.0 / 255.0)\n pred = remote.execute(\"http://localhost:9001\", img)\n r = np.argmax(pred, axis=1)\n pp = pred*100\n top1 = str(np.argsort(-pp)[0][0])+ \" (\" +str(int(np.sort(-pp)[0][0]*-1))+\"%)\"\n top2 = str(np.argsort(-pp)[0][1])+ \" (\" +str(int(np.sort(-pp)[0][1]*-1))+\"%)\"\n top3 = str(np.argsort(-pp)[0][2])+ \" (\" +str(int(np.sort(-pp)[0][2]*-1))+\"%)\"\n# return int(r)\n return r,top1,top2,top3\n\ndef get_answer(req):\n img_str = re.search(r'base64,(.*)', req.form['img']).group(1)\n nparr = np.fromstring(base64.b64decode(img_str), np.uint8)\n img_src = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n img_negaposi = 255 - img_src\n img_gray = cv2.cvtColor(img_negaposi, cv2.COLOR_BGR2GRAY)\n img_resize = cv2.resize(img_gray,(28,28))\n cv2.imwrite(f\"images/{datetime.now().strftime('%s')}.jpg\",img_resize)\n ans,t1,t2,t3 = result(img_resize)\n return int(ans),t1,t2,t3\n\nif __name__ == \"__main__\":\n app.run(debug=False, host='0.0.0.0', port=8001)\n" ]
[ [ "numpy.argsort", "numpy.sort", "numpy.argmax", "numpy.multiply" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Manish-rai21bit/deep_learning_for_camera_trap_images
[ "f9d9fd50824ece4743b39d5136f67235871cc0ef", "f9d9fd50824ece4743b39d5136f67235871cc0ef" ]
[ "phase2_recognition_only/architectures/vgg.py", "phase2_recognition_only/architectures/resnet.py" ]
[ "import tensorflow as tf\nimport common\n\ndef inference(x, num_output, wd, dropout_rate, is_training, transfer_mode= False, model_type= 'A'):\n # Create tables describing VGG configurations A, B, D, E\n if model_type == 'A':\n config = [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M']\n elif model_type == 'B':\n config = [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M']\n elif model_type == 'D':\n config = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']\n elif model_type == 'E':\n config = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']\n else:\n print('Unknown model type: ' + model_type + ' | Please specify a modelType A or B or D or E')\n \n network= x\n\n for k,v in enumerate(config):\n if v == 'M':\n network= common.maxPool(network, 2, 2)\n else: \n with tf.variable_scope('conv'+str(k)):\n network = common.spatialConvolution(network, 3, 1, v, wd= wd)\n network = tf.nn.relu(network)\n\n network= common.flatten(network)\n\n with tf.variable_scope('fc1'): \n network = common.fullyConnected(network, 4096, wd= wd)\n network = tf.nn.relu(network)\n network = common.batchNormalization(network, is_training= is_training)\n network = tf.nn.dropout(network, dropout_rate)\n with tf.variable_scope('fc2'):\n network = common.fullyConnected(network, 4096, wd= wd)\n network = tf.nn.relu(network)\n network = common.batchNormalization(network, is_training= is_training)\n network = tf.nn.dropout(network, dropout_rate)\n if not transfer_mode:\n with tf.variable_scope('output'):\n network = common.fullyConnected(network, num_output, wd= wd)\n else:\n with tf.variable_scope('transfer_output'):\n network = common.fullyConnected(network, num_output, wd= wd)\n\n return network\n", "import tensorflow as tf\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.training import moving_averages\nimport common\n\n\ndef inference(x, depth, num_output, wd, is_training):\n num_blockes= []\n bottleneck= False\n if depth == 18:\n num_blocks= [2, 2, 2, 2]\n elif depth == 34:\n num_blocks= [3, 4, 6, 3]\n elif depth == 50:\n num_blocks= [3, 4, 6, 3]\n bottleneck= True\n elif depth == 101:\n num_blocks= [3, 4, 23, 3]\n bottleneck= True\n elif depth == 152:\n num_blocks= [3, 8, 36, 3]\n bottleneck= True\n\n return getModel(x, num_output, wd, is_training, num_blocks= num_blocks, bottleneck= bottleneck)\n\n\ndef getModel(x, num_output, wd, is_training, num_blocks=[3, 4, 6, 3], # defaults to 50-layer network\n bottleneck= True):\n conv_weight_initializer = tf.truncated_normal_initializer(stddev= 0.1)\n fc_weight_initializer = tf.truncated_normal_initializer(stddev= 0.01)\n with tf.variable_scope('scale1'):\n x = common.spatialConvolution(x, 7, 2, 64, weight_initializer= conv_weight_initializer, wd= wd)\n x = common.batchNormalization(x, is_training= is_training)\n x = tf.nn.relu(x)\n\n with tf.variable_scope('scale2'):\n x = common.maxPool(x, 3, 2)\n x = common.resnetStack(x, num_blocks[0], 1, 64, bottleneck, wd= wd, is_training= is_training)\n\n with tf.variable_scope('scale3'):\n x = common.resnetStack(x, num_blocks[1], 2, 128, bottleneck, wd= wd, is_training= is_training)\n\n with tf.variable_scope('scale4'):\n x = common.resnetStack(x, num_blocks[2], 2, 256, bottleneck, wd= wd, is_training= is_training)\n\n with tf.variable_scope('scale5'):\n x = common.resnetStack(x, num_blocks[3], 2, 512, bottleneck, wd= wd, is_training= is_training)\n\n # post-net\n x = tf.reduce_mean(x, reduction_indices= [1, 2], name= \"avg_pool\")\n\n with tf.variable_scope('output'):\n x = common.fullyConnected(x, num_output, weight_initializer= fc_weight_initializer, bias_initializer= tf.zeros_initializer, wd= wd)\n\n return x\n" ]
[ [ "tensorflow.variable_scope", "tensorflow.nn.relu", "tensorflow.nn.dropout" ], [ "tensorflow.variable_scope", "tensorflow.nn.relu", "tensorflow.truncated_normal_initializer", "tensorflow.reduce_mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.4", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
innovator-zero/Python
[ "f776eb081c6688c2f5a98b0050b33582c1769391" ]
[ "kmeans/fish.py" ]
[ "import numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\npoints=np.loadtxt('points.txt')\nherring_r = np.loadtxt('distribution.txt')\nherring=np.zeros((802,350))\nfor i in range(350):\n for j in range(802):\n herring[j,349-i]=herring_r[i,j]\n\n# s=np.zeros(10)\n#\n# for i in range(10):\n# x=int(round(points[i,0]))-1\n# y=int(round(points[i,1]))\n#\n# for xx in range(x-11,x+12):\n# for yy in range(y-11,y+12):\n# if herring[xx,yy]>0:\n# s[i]+=herring[xx,yy]\n#\n# f = open('fish_count.txt', 'w')\n# for i in range(10):\n# f.write(str(s[i])+'\\n')\n# f.close()\ns=0\nfor i in range(802):\n for j in range(350):\n if herring[i,j]>0:\n s+=herring[i,j]\n\nprint(s)\n\n\n" ]
[ [ "numpy.zeros", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
luoyi1hao/ACRN_Chest_X-ray_IA
[ "b2ecaf88e6b1bb59101fd2d611bf9d1e6716367a", "b2ecaf88e6b1bb59101fd2d611bf9d1e6716367a" ]
[ "acregnet/train_acregnet.py", "acregnet/data.py" ]
[ "from data import DataHandler\nfrom models import ACRegNet\nimport tensorflow as tf\nfrom utils import get_random_batch, read_config_file, create_dir\n\n\nRUN_IN_GPU = False\n\n\ndef train_acregnet_model(config):\n tf.reset_default_graph()\n tf_config = tf.ConfigProto()\n\n if RUN_IN_GPU:\n tf_config.gpu_options.allow_growth = True\n\n sess = tf.Session(config=tf_config)\n\n train_ims, _ = DataHandler.load_images(config['train_ims_file'])\n train_lbs, _ = DataHandler.load_labels(config['train_lbs_file'])\n print('Loading training data...done')\n\n acregnet = ACRegNet(sess, config, 'ACRegNet', is_train=True)\n print('Building AC-RegNet model...done')\n\n print('Training...')\n for i in range(config['iterations']):\n batch_ims_x, batch_ims_y, batch_lbs_x, batch_lbs_y = get_random_batch(\n train_ims, config['batch_size'], train_lbs)\n cur_loss = acregnet.fit(\n batch_ims_x, batch_ims_y, batch_lbs_x, batch_lbs_y)\n print('Iteration {:>8d}/{}: Loss: {}'.format(\n i + 1, config['iterations'], cur_loss))\n\n acregnet.save(config['ckpt_dir'])\n print('Saving current AC-RegNet model...done')\n\n print('Training...done')\n\n tf.reset_default_graph()\n sess.close()\n\n\nif __name__ == \"__main__\":\n config = read_config_file('./config/JSRT/ACRegNet.cfg')\n create_dir(config['ckpt_dir'])\n train_acregnet_model(config)\n", "import os\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport cv2\n\n\nclass DataHandler(object):\n\n def _load_data(im_fnames, add_channel_dim=True):\n im0 = cv2.imread(im_fnames[0], 0)\n im_batch = np.zeros((len(im_fnames),) + im0.shape)\n im_batch[0] = im0\n for i, fname in enumerate(im_fnames[1:], 1):\n im_batch[i] = cv2.imread(fname, 0)\n\n if add_channel_dim:\n return np.expand_dims(im_batch, axis=-1)\n\n return im_batch\n\n @staticmethod\n def load_images(_file, normalize=True):\n im_fnames = list(np.loadtxt(_file, dtype='str'))\n im_batch = DataHandler._load_data(im_fnames).astype(np.float32)\n\n if normalize:\n im_batch = im_batch / 255.\n\n return im_batch, im_fnames\n\n @staticmethod\n def load_labels(_file):\n lb_fnames = list(np.loadtxt(_file, dtype='str'))\n lb_batch = DataHandler._load_data(lb_fnames).astype(np.int32)\n\n cur_labels = np.unique(lb_batch)\n new_labels = range(np.unique(lb_batch).shape[0])\n if not np.array_equal(cur_labels, new_labels):\n for cur_l, new_l in zip(cur_labels, new_labels):\n lb_batch[lb_batch == cur_l] = new_l\n\n return lb_batch, lb_fnames\n\n @staticmethod\n def train_test_split(data_dir, out_dir,\n test_size=0.2, seed=1):\n data_fnames = [\n os.path.join(data_dir, f) for f in sorted(os.listdir(data_dir))]\n\n train_fnames, test_fnames = train_test_split(\n data_fnames, test_size, True, seed)\n\n np.savetxt(os.path.join(out_dir, 'train_fnames'),\n np.array(train_fnames), fmt='%s')\n np.savetxt(os.path.join(out_dir, 'test_fnames'),\n np.array(test_fnames), fmt='%s')\n\n @staticmethod\n def train_valid_test_split(data_dir, out_dir, valid_size=0.1,\n test_size=0.2, seed=1):\n data_fnames = [\n os.path.join(data_dir, f) for f in sorted(os.listdir(data_dir))]\n\n train_fnames, test_fnames = train_test_split(\n data_fnames, test_size, True, seed)\n train_fnames, valid_fnames = train_test_split(\n train_fnames, valid_size/(1 - test_size), False, seed + 1)\n\n np.savetxt(os.path.join(out_dir, 'train_fnames'),\n np.array(train_fnames), fmt='%s')\n np.savetxt(os.path.join(out_dir, 'valid_fnames'),\n np.array(valid_fnames), fmt='%s')\n np.savetxt(os.path.join(out_dir, 'test_fnames'),\n np.array(test_fnames), fmt='%s')\n" ]
[ [ "tensorflow.ConfigProto", "tensorflow.reset_default_graph", "tensorflow.Session" ], [ "numpy.expand_dims", "numpy.array_equal", "numpy.unique", "sklearn.model_selection.train_test_split", "numpy.array", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NNstorm/MinkowskiEngine
[ "443b37a58c379b2482b5d160d9e874b356b4bf2f", "443b37a58c379b2482b5d160d9e874b356b4bf2f", "443b37a58c379b2482b5d160d9e874b356b4bf2f", "443b37a58c379b2482b5d160d9e874b356b4bf2f", "443b37a58c379b2482b5d160d9e874b356b4bf2f" ]
[ "examples/classification_modelnet40.py", "tests/python/sparse_tensor.py", "tests/cpp/utils.py", "tests/cpp/coordinate_map_gpu_test.py", "tests/python/norm.py" ]
[ "# Copyright (c) 2020 NVIDIA CORPORATION.\n# Copyright (c) 2018-2020 Chris Choy ([email protected]).\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# Please cite \"4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural\n# Networks\", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part\n# of the code.\nimport argparse\nimport sklearn.metrics as metrics\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport MinkowskiEngine as ME\nfrom examples.pointnet import (\n PointNet,\n MinkowskiPointNet,\n CoordinateTransformation,\n ModelNet40H5,\n stack_collate_fn,\n minkowski_collate_fn,\n)\nfrom examples.common import seed_all\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--voxel_size\", type=float, default=0.05)\nparser.add_argument(\"--max_steps\", type=int, default=100000)\nparser.add_argument(\"--val_freq\", type=int, default=1000)\nparser.add_argument(\"--batch_size\", default=32, type=int)\nparser.add_argument(\"--lr\", default=1e-1, type=float)\nparser.add_argument(\"--weight_decay\", type=float, default=1e-4)\nparser.add_argument(\"--num_workers\", type=int, default=2)\nparser.add_argument(\"--stat_freq\", type=int, default=100)\nparser.add_argument(\"--weights\", type=str, default=\"modelnet.pth\")\nparser.add_argument(\"--seed\", type=int, default=777)\nparser.add_argument(\"--translation\", type=float, default=0.2)\nparser.add_argument(\"--test_translation\", type=float, default=0.0)\nparser.add_argument(\n \"--network\",\n type=str,\n choices=[\"pointnet\", \"minkpointnet\", \"minkfcnn\", \"minksplatfcnn\"],\n default=\"minkfcnn\",\n)\n\n\nclass MinkowskiFCNN(ME.MinkowskiNetwork):\n def __init__(\n self,\n in_channel,\n out_channel,\n embedding_channel=1024,\n channels=(32, 48, 64, 96, 128),\n D=3,\n ):\n ME.MinkowskiNetwork.__init__(self, D)\n\n self.network_initialization(\n in_channel,\n out_channel,\n channels=channels,\n embedding_channel=embedding_channel,\n kernel_size=3,\n D=D,\n )\n self.weight_initialization()\n\n def get_mlp_block(self, in_channel, out_channel):\n return nn.Sequential(\n ME.MinkowskiLinear(in_channel, out_channel, bias=False),\n ME.MinkowskiBatchNorm(out_channel),\n ME.MinkowskiLeakyReLU(),\n )\n\n def get_conv_block(self, in_channel, out_channel, kernel_size, stride):\n return nn.Sequential(\n ME.MinkowskiConvolution(\n in_channel,\n out_channel,\n kernel_size=kernel_size,\n stride=stride,\n dimension=self.D,\n ),\n ME.MinkowskiBatchNorm(out_channel),\n ME.MinkowskiLeakyReLU(),\n )\n\n def network_initialization(\n self,\n in_channel,\n out_channel,\n channels,\n embedding_channel,\n kernel_size,\n D=3,\n ):\n self.mlp1 = self.get_mlp_block(in_channel, channels[0])\n self.conv1 = self.get_conv_block(\n channels[0],\n channels[1],\n kernel_size=kernel_size,\n stride=1,\n )\n self.conv2 = self.get_conv_block(\n channels[1],\n channels[2],\n kernel_size=kernel_size,\n stride=2,\n )\n\n self.conv3 = self.get_conv_block(\n channels[2],\n channels[3],\n kernel_size=kernel_size,\n stride=2,\n )\n\n self.conv4 = self.get_conv_block(\n channels[3],\n channels[4],\n kernel_size=kernel_size,\n stride=2,\n )\n self.conv5 = nn.Sequential(\n self.get_conv_block(\n channels[1] + channels[2] + channels[3] + channels[4],\n embedding_channel // 4,\n kernel_size=3,\n stride=2,\n ),\n self.get_conv_block(\n embedding_channel // 4,\n embedding_channel // 2,\n kernel_size=3,\n stride=2,\n ),\n self.get_conv_block(\n embedding_channel // 2,\n embedding_channel,\n kernel_size=3,\n stride=2,\n ),\n )\n\n self.pool = ME.MinkowskiMaxPooling(kernel_size=3, stride=2, dimension=D)\n\n self.global_max_pool = ME.MinkowskiGlobalMaxPooling()\n self.global_avg_pool = ME.MinkowskiGlobalAvgPooling()\n\n self.final = nn.Sequential(\n self.get_mlp_block(embedding_channel * 2, 512),\n ME.MinkowskiDropout(),\n self.get_mlp_block(512, 512),\n ME.MinkowskiLinear(512, out_channel, bias=True),\n )\n\n # No, Dropout, last 256 linear, AVG_POOLING 92%\n\n def weight_initialization(self):\n for m in self.modules():\n if isinstance(m, ME.MinkowskiConvolution):\n ME.utils.kaiming_normal_(m.kernel, mode=\"fan_out\", nonlinearity=\"relu\")\n\n if isinstance(m, ME.MinkowskiBatchNorm):\n nn.init.constant_(m.bn.weight, 1)\n nn.init.constant_(m.bn.bias, 0)\n\n def forward(self, x: ME.TensorField):\n x = self.mlp1(x)\n y = x.sparse()\n\n y = self.conv1(y)\n y1 = self.pool(y)\n\n y = self.conv2(y1)\n y2 = self.pool(y)\n\n y = self.conv3(y2)\n y3 = self.pool(y)\n\n y = self.conv4(y3)\n y4 = self.pool(y)\n\n x1 = y1.slice(x)\n x2 = y2.slice(x)\n x3 = y3.slice(x)\n x4 = y4.slice(x)\n\n x = ME.cat(x1, x2, x3, x4)\n\n y = self.conv5(x.sparse())\n x1 = self.global_max_pool(y)\n x2 = self.global_avg_pool(y)\n\n return self.final(ME.cat(x1, x2)).F\n\n\nclass GlobalMaxAvgPool(torch.nn.Module):\n def __init__(self):\n torch.nn.Module.__init__(self)\n self.global_max_pool = ME.MinkowskiGlobalMaxPooling()\n self.global_avg_pool = ME.MinkowskiGlobalAvgPooling()\n\n def forward(self, tensor):\n x = self.global_max_pool(tensor)\n y = self.global_avg_pool(tensor)\n return ME.cat(x, y)\n\n\nclass MinkowskiSplatFCNN(MinkowskiFCNN):\n def __init__(\n self,\n in_channel,\n out_channel,\n embedding_channel=1024,\n channels=(32, 48, 64, 96, 128),\n D=3,\n ):\n MinkowskiFCNN.__init__(\n self, in_channel, out_channel, embedding_channel, channels, D\n )\n\n def forward(self, x: ME.TensorField):\n x = self.mlp1(x)\n y = x.splat()\n\n y = self.conv1(y)\n y1 = self.pool(y)\n\n y = self.conv2(y1)\n y2 = self.pool(y)\n\n y = self.conv3(y2)\n y3 = self.pool(y)\n\n y = self.conv4(y3)\n y4 = self.pool(y)\n\n x1 = y1.interpolate(x)\n x2 = y2.interpolate(x)\n x3 = y3.interpolate(x)\n x4 = y4.interpolate(x)\n\n x = ME.cat(x1, x2, x3, x4)\n y = self.conv5(x.sparse())\n\n x1 = self.global_max_pool(y)\n x2 = self.global_avg_pool(y)\n\n return self.final(ME.cat(x1, x2)).F\n\n\nSTR2NETWORK = dict(\n pointnet=PointNet,\n minkpointnet=MinkowskiPointNet,\n minkfcnn=MinkowskiFCNN,\n minksplatfcnn=MinkowskiSplatFCNN,\n)\n\n\ndef create_input_batch(batch, is_minknet, device=\"cuda\", quantization_size=0.05):\n if is_minknet:\n batch[\"coordinates\"][:, 1:] = batch[\"coordinates\"][:, 1:] / quantization_size\n return ME.TensorField(\n coordinates=batch[\"coordinates\"],\n features=batch[\"features\"],\n device=device,\n )\n else:\n return batch[\"coordinates\"].permute(0, 2, 1).to(device)\n\n\nclass CoordinateTranslation:\n def __init__(self, translation):\n self.trans = translation\n\n def __call__(self, coords):\n if self.trans > 0:\n coords += np.random.uniform(low=-self.trans, high=self.trans, size=[1, 3])\n return coords\n\n\ndef make_data_loader(phase, is_minknet, config):\n assert phase in [\"train\", \"val\", \"test\"]\n is_train = phase == \"train\"\n dataset = ModelNet40H5(\n phase=phase,\n transform=CoordinateTransformation(trans=config.translation)\n if is_train\n else CoordinateTranslation(config.test_translation),\n data_root=\"modelnet40_ply_hdf5_2048\",\n )\n return DataLoader(\n dataset,\n num_workers=config.num_workers,\n shuffle=is_train,\n collate_fn=minkowski_collate_fn if is_minknet else stack_collate_fn,\n batch_size=config.batch_size,\n )\n\n\ndef test(net, device, config, phase=\"val\"):\n is_minknet = isinstance(net, ME.MinkowskiNetwork)\n data_loader = make_data_loader(\n \"test\",\n is_minknet,\n config=config,\n )\n\n net.eval()\n labels, preds = [], []\n with torch.no_grad():\n for batch in data_loader:\n input = create_input_batch(\n batch,\n is_minknet,\n device=device,\n quantization_size=config.voxel_size,\n )\n logit = net(input)\n pred = torch.argmax(logit, 1)\n labels.append(batch[\"labels\"].cpu().numpy())\n preds.append(pred.cpu().numpy())\n torch.cuda.empty_cache()\n return metrics.accuracy_score(np.concatenate(labels), np.concatenate(preds))\n\n\ndef criterion(pred, labels, smoothing=True):\n \"\"\"Calculate cross entropy loss, apply label smoothing if needed.\"\"\"\n\n labels = labels.contiguous().view(-1)\n if smoothing:\n eps = 0.2\n n_class = pred.size(1)\n\n one_hot = torch.zeros_like(pred).scatter(1, labels.view(-1, 1), 1)\n one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)\n log_prb = F.log_softmax(pred, dim=1)\n\n loss = -(one_hot * log_prb).sum(dim=1).mean()\n else:\n loss = F.cross_entropy(pred, labels, reduction=\"mean\")\n\n return loss\n\n\ndef train(net, device, config):\n is_minknet = isinstance(net, ME.MinkowskiNetwork)\n optimizer = optim.SGD(\n net.parameters(),\n lr=config.lr,\n momentum=0.9,\n weight_decay=config.weight_decay,\n )\n scheduler = optim.lr_scheduler.CosineAnnealingLR(\n optimizer,\n T_max=config.max_steps,\n )\n print(optimizer)\n print(scheduler)\n\n train_iter = iter(make_data_loader(\"train\", is_minknet, config))\n best_metric = 0\n net.train()\n for i in range(config.max_steps):\n optimizer.zero_grad()\n try:\n data_dict = train_iter.next()\n except StopIteration:\n train_iter = iter(make_data_loader(\"train\", is_minknet, config))\n data_dict = train_iter.next()\n input = create_input_batch(\n data_dict, is_minknet, device=device, quantization_size=config.voxel_size\n )\n logit = net(input)\n loss = criterion(logit, data_dict[\"labels\"].to(device))\n loss.backward()\n optimizer.step()\n scheduler.step()\n torch.cuda.empty_cache()\n\n if i % config.stat_freq == 0:\n print(f\"Iter: {i}, Loss: {loss.item():.3e}\")\n\n if i % config.val_freq == 0 and i > 0:\n torch.save(\n {\n \"state_dict\": net.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n \"scheduler\": scheduler.state_dict(),\n \"curr_iter\": i,\n },\n config.weights,\n )\n accuracy = test(net, device, config, phase=\"val\")\n if best_metric < accuracy:\n best_metric = accuracy\n print(f\"Validation accuracy: {accuracy}. Best accuracy: {best_metric}\")\n net.train()\n\n\nif __name__ == \"__main__\":\n config = parser.parse_args()\n seed_all(config.seed)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"===================ModelNet40 Dataset===================\")\n print(f\"Training with translation {config.translation}\")\n print(f\"Evaluating with translation {config.test_translation}\")\n print(\"=============================================\\n\\n\")\n\n net = STR2NETWORK[config.network](\n in_channel=3, out_channel=40, embedding_channel=1024\n ).to(device)\n print(\"===================Network===================\")\n print(net)\n print(\"=============================================\\n\\n\")\n\n train(net, device, config)\n accuracy = test(net, device, config, phase=\"test\")\n print(f\"Test accuracy: {accuracy}\")\n", "# Copyright (c) 2020 NVIDIA CORPORATION.\n# Copyright (c) 2018-2020 Chris Choy ([email protected]).\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# Please cite \"4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural\n# Networks\", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part\n# of the code.\nimport unittest\nimport numpy as np\nimport torch\n\nfrom MinkowskiEngine import (\n SparseTensor,\n SparseTensorOperationMode,\n SparseTensorQuantizationMode,\n set_sparse_tensor_operation_mode,\n clear_global_coordinate_manager,\n is_cuda_available,\n)\n\nfrom MinkowskiEngine.utils import batched_coordinates, sparse_quantize, sparse_collate\nfrom tests.python.common import data_loader, load_file\n\n\nclass SparseTensorTestCase(unittest.TestCase):\n def test(self):\n print(f\"{self.__class__.__name__}: test SparseTensor\")\n coords, feats, labels = data_loader(nchannel=2)\n input = SparseTensor(feats, coordinates=coords)\n print(input)\n\n def test_empty(self):\n print(f\"{self.__class__.__name__}: test_empty SparseTensor\")\n feats = torch.FloatTensor(0, 16)\n coords = torch.IntTensor(0, 4)\n input = SparseTensor(feats, coordinates=coords)\n print(input)\n\n def test_tensor_stride(self):\n print(f\"{self.__class__.__name__}: test_tensor_stride SparseTensor\")\n feats = torch.FloatTensor(4, 16)\n coords = torch.IntTensor(\n [[0, 4, 2, 1], [0, 4, 0, 0], [0, 4, 4, 4], [0, 4, 4, 7]]\n )\n print(coords)\n input = SparseTensor(feats, coordinates=coords, tensor_stride=4)\n self.assertEqual(input.tensor_stride, [4, 4, 4])\n print(input)\n\n def test_force_creation(self):\n print(f\"{self.__class__.__name__}: test_force_creation\")\n coords, feats, labels = data_loader(nchannel=2)\n input1 = SparseTensor(feats, coordinates=coords)\n input2 = SparseTensor(\n feats, coordinates=coords, coordinate_manager=input1.coordinate_manager\n )\n print(input1.coordinate_map_key, input2.coordinate_map_key)\n\n def test_device(self):\n print(f\"{self.__class__.__name__}: test_device SparseTensor\")\n if not is_cuda_available():\n return\n\n coords = torch.IntTensor(\n [[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]\n )\n feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T\n\n SparseTensor(feats.to(0), coords.to(0))\n feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T.to(0)\n st = SparseTensor(feats, coords, device=feats.device)\n print(st)\n\n def test_device_unique(self):\n print(f\"{self.__class__.__name__}: test_device_unique SparseTensor\")\n if not is_cuda_available():\n return\n\n coords = torch.IntTensor(\n [[0, 1], [0, 2], [0, 3], [0, 4], [1, 0], [1, 1], [1, 2]]\n )\n feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T\n SparseTensor(feats.to(0), coords.to(0))\n feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T.to(0)\n st = SparseTensor(feats, coords, device=feats.device)\n print(st)\n\n def test_device2(self):\n print(f\"{self.__class__.__name__}: test_device2 SparseTensor\")\n if not is_cuda_available():\n return\n\n coordinates = np.random.rand(8192,3) * 200\n quant_coordinates, quant_features = sparse_quantize(coordinates, coordinates)\n bcoords, bfeats = sparse_collate([quant_coordinates], [quant_features])\n bcoords, bfeats = bcoords.cuda(), bfeats.cuda()\n print(bcoords, bfeats)\n SparseTensor(bfeats, bcoords)\n\n def test_quantization(self):\n print(f\"{self.__class__.__name__}: test_quantization\")\n coords, feats, labels = data_loader(nchannel=2)\n # create duplicate coords\n coords[0] = coords[1]\n coords[2] = coords[3]\n input = SparseTensor(feats, coordinates=coords)\n self.assertTrue(len(input) == len(coords) - 2)\n input = SparseTensor(\n feats,\n coordinates=coords,\n quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,\n )\n self.assertTrue(len(coords) == 16)\n self.assertTrue(len(input) == 14)\n\n # 1D\n coords = torch.IntTensor(\n [[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]\n )\n feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T\n # 0.5, 2.5, 5.5, 7\n sinput = SparseTensor(\n coordinates=coords,\n features=feats,\n quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,\n )\n self.assertTrue(len(sinput) == 4)\n self.assertTrue(0.5 in sinput.features)\n self.assertTrue(2.5 in sinput.features)\n self.assertTrue(5.5 in sinput.features)\n self.assertTrue(7 in sinput.features)\n self.assertTrue(len(sinput.slice(sinput)) == len(coords))\n\n def test_quantization_gpu(self):\n print(f\"{self.__class__.__name__}: test_quantization_gpu\")\n coords, feats, labels = data_loader(nchannel=2)\n # create duplicate coords\n coords[0] = coords[1]\n coords[2] = coords[3]\n input = SparseTensor(feats, coordinates=coords)\n self.assertTrue(len(input) == len(coords) - 2)\n input = SparseTensor(\n feats,\n coordinates=coords,\n quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,\n device=\"cuda\",\n )\n self.assertTrue(len(coords) == 16)\n self.assertTrue(len(input) == 14)\n print(input)\n\n # 1D\n coords = torch.IntTensor(\n [[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]\n )\n feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T\n # 0.5, 2.5, 5.5, 7\n sinput = SparseTensor(\n coordinates=coords,\n features=feats,\n quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,\n device=\"cuda\",\n )\n print(sinput)\n self.assertTrue(len(sinput) == 4)\n self.assertTrue(0.5 in sinput.features)\n self.assertTrue(2.5 in sinput.features)\n self.assertTrue(5.5 in sinput.features)\n self.assertTrue(7 in sinput.features)\n self.assertTrue(len(sinput.slice(sinput)) == len(coords))\n\n def test_extraction(self):\n print(f\"{self.__class__.__name__}: test_extraction\")\n coords = torch.IntTensor([[0, 0], [0, 1], [0, 2], [2, 0], [2, 2]])\n feats = torch.FloatTensor([[1.1, 2.1, 3.1, 4.1, 5.1]]).t()\n X = SparseTensor(feats, coords)\n C0 = X.coordinates_at(0)\n F0 = X.features_at(0)\n self.assertTrue(0 in C0)\n self.assertTrue(1 in C0)\n self.assertTrue(2 in C0)\n\n self.assertTrue(1.1 in F0)\n self.assertTrue(2.1 in F0)\n self.assertTrue(3.1 in F0)\n\n CC0, FC0 = X.coordinates_and_features_at(0)\n self.assertTrue((C0 == CC0).all())\n self.assertTrue((F0 == FC0).all())\n\n coords, feats = X.decomposed_coordinates_and_features\n for c, f in zip(coords, feats):\n self.assertEqual(c.numel(), f.numel())\n print(c, f)\n self.assertEqual(len(coords[0]), 3)\n self.assertEqual(len(coords[1]), 0)\n self.assertEqual(len(coords[2]), 2)\n\n if not is_cuda_available():\n return\n\n coords = torch.IntTensor([[0, 0], [0, 1], [0, 2], [2, 0], [2, 2]])\n feats = torch.FloatTensor([[1.1, 2.1, 3.1, 4.1, 5.1]]).t()\n\n X = SparseTensor(feats, coords, device=0)\n coords, feats = X.decomposed_coordinates_and_features\n for c, f in zip(coords, feats):\n self.assertEqual(c.numel(), f.numel())\n print(c, f)\n\n self.assertEqual(len(coords[0]), 3)\n self.assertEqual(len(coords[1]), 0)\n self.assertEqual(len(coords[2]), 2)\n\n def test_features_at_coordinates(self):\n print(f\"{self.__class__.__name__}: test_features_at_coordinates\")\n coords = torch.IntTensor([[0, 0], [0, 1], [0, 2], [2, 0], [2, 2]])\n feats = torch.FloatTensor([[1.1, 2.1, 3.1, 4.1, 5.1]]).t()\n\n X = SparseTensor(features=feats, coordinates=coords)\n feats = X.features_at_coordinates(\n torch.FloatTensor([[0, 0], [0, 1], [0, 2], [2, 2], [0, 0], [0, 0.5]])\n ).flatten()\n\n self.assertTrue(feats[0] == 1.1)\n self.assertTrue(feats[3] == 5.1)\n self.assertTrue(feats[4] == 1.1)\n\n def test_decomposition(self):\n print(f\"{self.__class__.__name__}: test_decomposition\")\n coords, colors, pcd = load_file(\"1.ply\")\n colors = torch.from_numpy(colors)\n for batch_size in [1, 5, 10, 20, 40]:\n for voxel_size in [0.02]:\n dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()\n bcoords = batched_coordinates([dcoords for i in range(batch_size)])\n feats = torch.cat([colors for b in range(batch_size)], 0)\n sinput = SparseTensor(feats, bcoords)\n (\n decomposed_coords,\n decomposed_feats,\n ) = sinput.decomposed_coordinates_and_features\n print([len(c) for c in decomposed_coords])\n print([len(f) for f in decomposed_feats])\n self.assertEqual(len(decomposed_coords), batch_size)\n self.assertEqual(len(decomposed_feats), batch_size)\n\n def test_decomposition_gpu(self):\n print(f\"{self.__class__.__name__}: test_decomposition_gpu\")\n if not torch.cuda.is_available():\n return\n\n coords, colors, pcd = load_file(\"1.ply\")\n colors = torch.from_numpy(colors)\n\n for batch_size in [5, 10, 20, 40]:\n for voxel_size in [0.02]:\n dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()\n bcoords = batched_coordinates([dcoords for i in range(batch_size)])\n feats = torch.cat([colors for b in range(batch_size)], 0)\n sinput = SparseTensor(feats.to(0), bcoords.to(0))\n (\n decomposed_coords,\n decomposed_feats,\n ) = sinput.decomposed_coordinates_and_features\n print([len(c) for c in decomposed_coords])\n print([len(f) for f in decomposed_feats])\n self.assertEqual(len(decomposed_coords), batch_size)\n self.assertEqual(len(decomposed_feats), batch_size)\n\n def test_operation_mode(self):\n print(f\"{self.__class__.__name__}: test_operation_mode\")\n # Set to use the global sparse tensor coords manager by default\n set_sparse_tensor_operation_mode(\n SparseTensorOperationMode.SHARE_COORDINATE_MANAGER\n )\n\n coords, feats, labels = data_loader(nchannel=2)\n\n # Create a sparse tensor on two different coordinates.\n A = SparseTensor(torch.rand(feats.shape), coordinates=coords)\n B = SparseTensor(\n torch.rand(4, 2),\n coordinates=torch.IntTensor([[0, 0, 0], [1, 1, 1], [0, 1, 0], [1, 0, 1]]),\n )\n\n self.assertTrue(A.coordinate_manager == B.coordinate_manager)\n\n A.requires_grad_(True)\n B.requires_grad_(True)\n\n C = A + B\n\n C.F.sum().backward()\n\n self.assertTrue(torch.all(A.F.grad == 1).item())\n self.assertTrue(torch.all(B.F.grad == 1).item())\n\n C = A - B\n C = A * B\n C = A / B\n\n # Inplace\n A.requires_grad_(False)\n D = SparseTensor(\n torch.rand(feats.shape),\n coordinate_map_key=A.coordinate_map_key,\n coordinate_manager=A.coordinate_manager,\n )\n A -= D\n A *= D\n A /= D\n clear_global_coordinate_manager()\n set_sparse_tensor_operation_mode(\n SparseTensorOperationMode.SEPARATE_COORDINATE_MANAGER\n )\n", "# Copyright (c) 2020 NVIDIA CORPORATION.\n# Copyright (c) 2018-2020 Chris Choy ([email protected]).\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# Please cite \"4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural\n# Networks\", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part\n# of the code.\n\nimport os\nimport numpy as np\nimport collections\nfrom urllib.request import urlretrieve\n\nimport torch\n\ntry:\n import open3d as o3d\nexcept ImportError:\n raise ImportError(\"Please install open3d with `pip install open3d`.\")\n\nif not os.path.isfile(\"1.ply\"):\n urlretrieve(\"https://bit.ly/3c2iLhg\", \"1.ply\")\n\n\ndef load_file(file_name):\n pcd = o3d.io.read_point_cloud(file_name)\n coords = np.array(pcd.points)\n colors = np.array(pcd.colors)\n return coords, colors, pcd\n\n\ndef batched_coordinates(coords, dtype=torch.int32, device=None):\n r\"\"\"Create a `ME.SparseTensor` coordinates from a sequence of coordinates\n\n Given a list of either numpy or pytorch tensor coordinates, return the\n batched coordinates suitable for `ME.SparseTensor`.\n\n Args:\n :attr:`coords` (a sequence of `torch.Tensor` or `numpy.ndarray`): a\n list of coordinates.\n\n :attr:`dtype`: torch data type of the return tensor. torch.int32 by default.\n\n Returns:\n :attr:`batched_coordindates` (`torch.Tensor`): a batched coordinates.\n\n .. warning::\n\n From v0.4, the batch index will be prepended before all coordinates.\n\n \"\"\"\n assert isinstance(\n coords, collections.abc.Sequence\n ), \"The coordinates must be a sequence.\"\n assert np.array(\n [cs.ndim == 2 for cs in coords]\n ).all(), \"All coordinates must be in a 2D array.\"\n D = np.unique(np.array([cs.shape[1] for cs in coords]))\n assert len(D) == 1, f\"Dimension of the array mismatch. All dimensions: {D}\"\n D = D[0]\n if device is None:\n if isinstance(coords, torch.Tensor):\n device = coords[0].device\n else:\n device = \"cpu\"\n assert dtype in [\n torch.int32,\n torch.float32,\n ], \"Only torch.int32, torch.float32 supported for coordinates.\"\n\n # Create a batched coordinates\n N = np.array([len(cs) for cs in coords]).sum()\n bcoords = torch.zeros((N, D + 1), dtype=dtype, device=device) # uninitialized\n\n s = 0\n for b, cs in enumerate(coords):\n if dtype == torch.int32:\n if isinstance(cs, np.ndarray):\n cs = torch.from_numpy(np.floor(cs))\n elif not (\n isinstance(cs, torch.IntTensor) or isinstance(cs, torch.LongTensor)\n ):\n cs = cs.floor()\n\n cs = cs.int()\n else:\n if isinstance(cs, np.ndarray):\n cs = torch.from_numpy(cs)\n\n cn = len(cs)\n # BATCH_FIRST:\n bcoords[s : s + cn, 1:] = cs\n bcoords[s : s + cn, 0] = b\n s += cn\n return bcoords", "import numpy as np\nimport unittest\nimport time\n\nimport torch\nimport MinkowskiEngineTest._C\n\nfrom utils import load_file, batched_coordinates\n\n\nclass CoordinateMapTestCase(unittest.TestCase):\n def test_batch_insert(self):\n assert torch.cuda.is_available()\n coordinates = torch.IntTensor([[0, 1], [1, 2], [2, 3], [2, 3]]).to(0)\n num, _ = MinkowskiEngineTest._C.coordinate_map_batch_insert_test(coordinates)\n self.assertEqual(num, 3)\n\n def test_mapping(self):\n assert torch.cuda.is_available()\n coordinates = torch.IntTensor(\n [[0, 1], [1, 2], [2, 3], [2, 3], [3, 2], [3, 2]]\n ).to(0)\n (\n mapping,\n inverse_mapping,\n ) = MinkowskiEngineTest._C.coordinate_map_inverse_map_test(coordinates)\n print(mapping)\n print(inverse_mapping)\n self.assertEqual(len(mapping), 4)\n self.assertTrue(\n torch.all(\n coordinates[mapping.long()][inverse_mapping.long()] == coordinates\n )\n )\n\n def test_pcd_insert(self):\n coords, colors, pcd = load_file(\"1.ply\")\n BATCH_SIZE = 1\n voxel_size = 0.02\n bcoords = [np.floor(coords / voxel_size) for i in range(BATCH_SIZE)]\n bcoords = batched_coordinates(bcoords).to(0)\n num, _ = MinkowskiEngineTest._C.coordinate_map_batch_insert_test(bcoords)\n self.assertEqual(num, 161890)\n for batch_size in [1, 2, 4, 8, 16, 20, 40, 80, 160, 320]:\n for voxel_size in [0.02]:\n py_min_time = 1000\n dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()\n bcoords = batched_coordinates([dcoords for i in range(batch_size)])\n for i in range(10):\n s = time.time()\n bcoords = bcoords.to(0)\n (\n num,\n cpp_time,\n ) = MinkowskiEngineTest._C.coordinate_map_batch_insert_test(bcoords)\n py_min_time = min(time.time() - s, py_min_time)\n print(f\"{len(bcoords)}\\t{num}\\t{py_min_time}\\t{cpp_time}\")\n\n def test_batch_find(self):\n coordinates = torch.IntTensor([[0, 1], [1, 2], [2, 3], [2, 3]]).to(0)\n queries = torch.IntTensor([[-1, 1], [1, 2], [2, 3], [2, 3], [0, 0]]).to(0)\n (\n valid_query_index,\n query_value,\n ) = MinkowskiEngineTest._C.coordinate_map_batch_find_test(coordinates, queries)\n self.assertEqual(len(valid_query_index), len(query_value))\n self.assertEqual(len(valid_query_index), 3)\n\n self.assertEqual(valid_query_index[0], 1)\n self.assertEqual(valid_query_index[1], 2)\n self.assertEqual(valid_query_index[2], 3)\n\n self.assertEqual(query_value[0], 1)\n self.assertEqual(query_value[1], 2)\n self.assertEqual(query_value[2], 2)\n\n def test_stride(self):\n coordinates = torch.IntTensor([[0, 1], [0, 2], [0, 3], [0, 3]]).to(0)\n stride = [1]\n with self.assertRaises(TypeError):\n MinkowskiEngineTest._C.coordinate_map_stride_test(coordinates, stride)\n\n stride = torch.IntTensor([-1])\n with self.assertRaises(RuntimeError):\n MinkowskiEngineTest._C.coordinate_map_stride_test(coordinates, stride)\n\n stride = torch.IntTensor([1, 1])\n with self.assertRaises(RuntimeError):\n MinkowskiEngineTest._C.coordinate_map_stride_test(coordinates, stride)\n\n stride = torch.IntTensor([2])\n map_size, tensor_stride = MinkowskiEngineTest._C.coordinate_map_stride_test(\n coordinates, stride\n )\n self.assertEqual(map_size, 2)\n self.assertEqual(tensor_stride, [2])\n\n coordinates = torch.IntTensor(\n [[0, 1, 1], [0, 2, 1], [0, 1, 0], [1, 0, 3], [1, 0, 2]]\n )\n stride = torch.IntTensor([1])\n with self.assertRaises(RuntimeError):\n MinkowskiEngineTest._C.coordinate_map_stride_test(coordinates, stride)\n\n coordinates = torch.IntTensor(\n [[0, 1, 1], [0, 2, 1], [0, 1, 0], [1, 0, 3], [1, 0, 2]]\n ).to(0)\n stride = torch.IntTensor([1, 1])\n map_size, tensor_stride = MinkowskiEngineTest._C.coordinate_map_stride_test(\n coordinates, stride\n )\n self.assertEqual(map_size, 5)\n self.assertEqual(tensor_stride, [1, 1])\n\n stride = torch.IntTensor([2, 1])\n map_size, tensor_stride = MinkowskiEngineTest._C.coordinate_map_stride_test(\n coordinates, stride\n )\n self.assertEqual(map_size, 5)\n self.assertEqual(tensor_stride, [2, 1])\n\n stride = torch.IntTensor([4, 4])\n map_size, tensor_stride = MinkowskiEngineTest._C.coordinate_map_stride_test(\n coordinates, stride\n )\n self.assertEqual(map_size, 2)\n self.assertEqual(tensor_stride, [4, 4])\n\n coordinates = torch.IntTensor([[0, -1], [0, -2], [0, 1], [0, 0]]).to(0)\n stride = torch.IntTensor([2])\n map_size, tensor_stride = MinkowskiEngineTest._C.coordinate_map_stride_test(\n coordinates, stride\n )\n self.assertEqual(map_size, 2)\n self.assertEqual(tensor_stride, [2])\n", "# Copyright (c) 2020 NVIDIA CORPORATION.\n# Copyright (c) 2018-2020 Chris Choy ([email protected]).\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# Please cite \"4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural\n# Networks\", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part\n# of the code.\nimport torch\nimport unittest\n\nfrom MinkowskiEngine import (\n SparseTensor,\n MinkowskiInstanceNorm,\n MinkowskiInstanceNormFunction,\n)\nfrom utils.gradcheck import gradcheck\n\nfrom tests.python.common import data_loader\n\n\nclass TestNormalization(unittest.TestCase):\n def test_inst_norm(self):\n in_channels = 2\n coords, feats, labels = data_loader(in_channels)\n feats = feats.double()\n input = SparseTensor(feats, coords)\n input.F.requires_grad_()\n norm = MinkowskiInstanceNorm(num_features=in_channels).double()\n\n out = norm(input)\n print(out)\n\n fn = MinkowskiInstanceNormFunction()\n self.assertTrue(\n gradcheck(\n fn, (input.F, input.coordinate_map_key, None, input.coordinate_manager)\n )\n )\n\n def test_inst_norm_gpu(self):\n in_channels = 2\n coords, feats, labels = data_loader(in_channels)\n feats = feats.double()\n\n device = torch.device(\"cuda\")\n input = SparseTensor(feats, coords, device=device)\n input.F.requires_grad_()\n norm = MinkowskiInstanceNorm(num_features=in_channels).to(device).double()\n\n out = norm(input)\n print(out)\n\n fn = MinkowskiInstanceNormFunction()\n self.assertTrue(\n gradcheck(\n fn, (input.F, input.coordinate_map_key, None, input.coordinate_manager)\n )\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.nn.Module.__init__", "torch.optim.lr_scheduler.CosineAnnealingLR", "torch.nn.functional.log_softmax", "torch.nn.init.constant_", "torch.nn.functional.cross_entropy", "torch.utils.data.DataLoader", "torch.cuda.empty_cache", "torch.zeros_like", "numpy.concatenate", "torch.no_grad", "torch.cuda.is_available", "numpy.random.uniform", "torch.argmax" ], [ "torch.all", "torch.from_numpy", "torch.FloatTensor", "numpy.random.rand", "torch.cuda.is_available", "torch.IntTensor", "torch.rand", "numpy.floor" ], [ "numpy.array", "torch.from_numpy", "numpy.floor", "torch.zeros" ], [ "numpy.floor", "torch.cuda.is_available", "torch.IntTensor" ], [ "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
brunomarct/archetypal
[ "ce8daf4e18ef3ec92967e5d6837b392199caf83b" ]
[ "archetypal/schedule.py" ]
[ "################################################################################\n# Module: schedule.py\n# Description: Functions for handling conversion of EnergyPlus schedule objects\n# License: MIT, see full license in LICENSE.txt\n# Web: https://github.com/samuelduchesne/archetypal\n################################################################################\n\nimport functools\nimport io\nimport logging as lg\nfrom datetime import datetime, timedelta\n\nimport archetypal\nimport numpy as np\nimport pandas as pd\nfrom archetypal import log\n\n\nclass Schedule(object):\n \"\"\"An object designed to handle any EnergyPlys schedule object\"\"\"\n\n def __init__(self, sch_name, idf=None, start_day_of_the_week=0,\n strict=False, base_year=2018, schType=None, **kwargs):\n \"\"\"\n\n Args:\n idf (IDF): IDF object\n sch_name (str): The schedule name in the idf file\n start_day_of_the_week (int): 0-based day of week (Monday=0)\n strict (bool): if True, schedules that have the Field-Sets such\n as Holidays and CustomDay will raise an error if they are absent\n from the IDF file. If False, any missing qualifiers will be\n ignored.\n base_year (int): The base year of the schedule. Defaults to 2018\n since the first day of that year is a Monday.\n \"\"\"\n super(Schedule, self).__init__(**kwargs)\n self.strict = strict\n self.idf = idf\n self.schName = sch_name\n self.startDayOfTheWeek = self.get_sdow(start_day_of_the_week)\n self.year = base_year\n self.startDate = self.start_date()\n self.count = 0\n self.startHOY = 1\n self.endHOY = 24\n self.unit = \"unknown\"\n\n self.index_ = None\n self.values = None\n self.schType = schType\n _type = kwargs.get('Type', None)\n if _type is None:\n self.schTypeLimitsName = self.get_schedule_type_limits_name(\n sch_type=self.schType)\n else:\n self.schTypeLimitsName = _type\n\n @classmethod\n def constant_schedule(cls, hourly_value=1, Name='AlwaysOn', **kwargs):\n idftxt = \"VERSION, 8.9;\" # Not an emplty string. has just the\n # version number\n # we can make a file handle of a string\n fhandle = io.StringIO(idftxt)\n # initialize the IDF object with the file handle\n idf_scratch = archetypal.IDF(fhandle)\n\n idf_scratch.add_object(ep_object='Schedule:Constant'.upper(),\n **dict(Name=Name,\n Schedule_Type_Limits_Name='',\n Hourly_Value=hourly_value),\n save=False)\n\n sched = Schedule(sch_name=Name, idf=idf_scratch, **kwargs)\n return sched\n\n @property\n def all_values(self):\n \"\"\"returns the values array\"\"\"\n if self.values is None:\n self.values = self.get_schedule_values(sch_name=self.schName,\n sch_type=self.schType)\n return self.values\n else:\n return self.values\n\n @property\n def max(self):\n return max(self.all_values)\n\n @property\n def min(self):\n return min(self.all_values)\n\n @property\n def mean(self):\n return np.mean(self.all_values)\n\n @property\n def series(self):\n \"\"\"Returns the schedule values as a pd.Series object with a\n DateTimeIndex\"\"\"\n index = pd.date_range(start=self.startDate, periods=len(\n self.all_values), freq='1H')\n return pd.Series(self.all_values, index=index)\n\n def get_schedule_type_limits_name(self, sch_name=None, sch_type=None):\n \"\"\"Return the Schedule Type Limits name associated to a schedule\n name\"\"\"\n if sch_name is None:\n sch_name = self.schName\n if sch_type is None:\n schedule_values = self.idf.get_schedule_data_by_name(sch_name,\n sch_type=sch_type)\n try:\n schedule_limit_name = schedule_values.Schedule_Type_Limits_Name\n except:\n return 'unknown'\n else:\n return schedule_limit_name\n\n def get_schedule_type_limits_data(self, sch_name=None):\n \"\"\"Returns Schedule Type Limits data from schedule name\"\"\"\n\n if sch_name is None:\n sch_name = self.schName\n\n schedule_values = self.idf.get_schedule_data_by_name(sch_name)\n try:\n schedule_limit_name = schedule_values.Schedule_Type_Limits_Name\n except:\n # this schedule is probably a 'Schedule:Week:Daily' which does\n # not have a Schedule_Type_Limits_Name field\n return '', '', '', ''\n else:\n lower_limit, upper_limit, numeric_type, unit_type = \\\n self.idf.get_schedule_type_limits_data_by_name(\n schedule_limit_name)\n\n self.unit = unit_type\n if self.unit == \"unknown\":\n self.unit = numeric_type\n\n return lower_limit, upper_limit, numeric_type, unit_type\n\n def get_schedule_type(self, sch_name=None):\n \"\"\"Return the schedule type\"\"\"\n if sch_name is None:\n sch_name = self.schName\n\n schedule_values = self.idf.get_schedule_data_by_name(sch_name)\n sch_type = schedule_values.fieldvalues[0]\n\n return sch_type\n\n def start_date(self):\n \"\"\"The start date of the schedule. Satisfies `startDayOfTheWeek`\"\"\"\n import calendar\n c = calendar.Calendar(firstweekday=self.startDayOfTheWeek)\n start_date = c.monthdatescalendar(self.year, 1)[0][0]\n return datetime(start_date.year, start_date.month, start_date.day)\n\n def plot(self, slice=None, **kwargs):\n hourlyvalues = self.all_values\n index = pd.date_range(self.startDate, periods=len(\n hourlyvalues),\n freq='1H')\n series = pd.Series(hourlyvalues, index=index, dtype=float)\n if slice is None:\n slice = pd.IndexSlice[:]\n elif len(slice) > 1:\n slice = pd.IndexSlice[slice[0]:slice[1]]\n ax = series.loc[slice].plot(**kwargs, label=self.schName)\n return ax\n\n def get_interval_day_ep_schedule_values(self, sch_name=None):\n \"\"\"'Schedule:Day:Interval\"\"\"\n\n if sch_name is None:\n sch_name = self.schName\n\n values = self.idf.getobject('Schedule:Day:Interval'.upper(), sch_name)\n lower_limit, upper_limit, numeric_type, unit_type = \\\n self.get_schedule_type_limits_data(sch_name)\n\n number_of_day_sch = int((len(values.fieldvalues) - 3) / 2)\n\n hourly_values = np.arange(24)\n start_hour = 0\n for i in range(number_of_day_sch):\n value = float(values['Value_Until_Time_{}'.format(i + 1)])\n until_time = [int(s.strip()) for s in\n values['Time_{}'.format(i + 1)].split(\":\") if\n s.strip().isdigit()]\n end_hour = int(until_time[0] + until_time[1] / 60)\n for hour in range(start_hour, end_hour):\n hourly_values[hour] = value\n\n start_hour = end_hour\n\n if numeric_type.strip().lower() == \"discrete\":\n hourly_values = hourly_values.astype(int)\n\n return hourly_values\n\n def get_hourly_day_ep_schedule_values(self, sch_name=None):\n \"\"\"'Schedule:Day:Hourly'\"\"\"\n if sch_name is None:\n sch_name = self.schName\n\n values = self.idf.getobject('Schedule:Day:Hourly'.upper(), sch_name)\n\n fieldvalues_ = np.array(values.fieldvalues[3:])\n\n return fieldvalues_\n\n def get_compact_weekly_ep_schedule_values(self, sch_name=None,\n start_date=None, index=None):\n \"\"\"'schedule:week:compact'\"\"\"\n if start_date is None:\n start_date = self.startDate\n if index is None:\n idx = pd.date_range(start=start_date, periods=168, freq='1H')\n slicer_ = pd.Series([False] * (len(idx)), index=idx)\n else:\n slicer_ = pd.Series([False] * (len(index)), index=index)\n\n if sch_name is None:\n sch_name = self.schName\n values = self.idf.getobject('schedule:week:compact'.upper(), sch_name)\n\n weekly_schedules = pd.Series([0] * len(slicer_), index=slicer_.index)\n # update last day of schedule\n\n if self.count == 0:\n self.schType = values.key\n self.endHOY = 168\n\n num_of_daily_schedules = int(len(values.fieldvalues[2:]) / 2)\n\n for i in range(num_of_daily_schedules):\n day_type = values['DayType_List_{}'.format(i + 1)].lower()\n how = self.field_set(day_type, slicer_)\n if not weekly_schedules.loc[how].empty:\n # Loop through days and replace with day:schedule values\n days = []\n for name, day in weekly_schedules.loc[how].groupby(pd.Grouper(\n freq='D')):\n if not day.empty:\n ref = values.get_referenced_object(\n \"ScheduleDay_Name_{}\".format(i + 1))\n day.loc[:] = self.get_schedule_values(\n sch_name=ref.Name, sch_type=ref.key)\n days.append(day)\n new = pd.concat(days)\n slicer_.update(\n pd.Series([True] * len(new.index), index=new.index))\n slicer_ = slicer_.apply(lambda x: x == True)\n weekly_schedules.update(new)\n else:\n return weekly_schedules.values\n\n return weekly_schedules.values\n\n def get_daily_weekly_ep_schedule_values(self, sch_name=None):\n \"\"\"'schedule:week:daily'\"\"\"\n if sch_name is None:\n sch_name = self.schName\n\n values = self.idf.getobject('schedule:week:daily'.upper(), sch_name)\n\n # 7 list for 7 days of the week\n hourly_values = []\n for day in ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday']:\n ref = values.get_referenced_object(\n '{}_ScheduleDay_Name'.format(day))\n h = self.get_schedule_values(sch_name=ref.Name, sch_type=ref.key)\n hourly_values.append(h)\n hourly_values = np.array(hourly_values)\n # shift days earlier by self.startDayOfTheWeek\n hourly_values = np.roll(hourly_values, -self.startDayOfTheWeek, axis=0)\n\n return hourly_values.ravel()\n\n def get_list_day_ep_schedule_values(self, sch_name=None):\n \"\"\"'schedule:day:list'\"\"\"\n if sch_name is None:\n sch_name = self.schName\n\n values = self.idf.getobject('schedule:day:list'.upper(), sch_name)\n lower_limit, upper_limit, numeric_type, unit_type = \\\n self.get_schedule_type_limits_data(sch_name)\n\n import pandas as pd\n freq = int(values['Minutes_per_Item']) # Frequency of the values\n num_values = values.fieldvalues[5:] # List of values\n method = values['Interpolate_to_Timestep'] # How to resample\n\n # fill a list of available values and pad with zeros (this is safer\n # but should not occur)\n all_values = np.arange(int(24 * 60 / freq))\n for i in all_values:\n try:\n all_values[i] = num_values[i]\n except:\n all_values[i] = 0\n # create a fake index to help us with the resampling\n index = pd.date_range(start=self.startDate,\n periods=(24 * 60) / freq,\n freq='{}T'.format(freq))\n series = pd.Series(all_values, index=index)\n\n # resample series to hourly values and apply resampler function\n series = series.resample('1H').apply(_how(method))\n\n return series.values\n\n def get_constant_ep_schedule_values(self, sch_name=None):\n \"\"\"'schedule:constant'\"\"\"\n if sch_name is None:\n sch_name = self.schName\n\n values = self.idf.getobject('schedule:constant'.upper(), sch_name)\n lower_limit, upper_limit, numeric_type, unit_type = \\\n self.get_schedule_type_limits_data(sch_name)\n\n hourly_values = np.arange(8760)\n value = float(values['Hourly_Value'])\n for hour in hourly_values:\n hourly_values[hour] = value\n\n if numeric_type.strip().lower() == 'discrete':\n hourly_values = hourly_values.astype(int)\n\n return hourly_values\n\n def get_file_ep_schedule_values(self, sch_name=None):\n \"\"\"'schedule:file'\"\"\"\n if sch_name is None:\n sch_name = self.schName\n\n values = self.idf.getobject('schedule:file'.upper(), sch_name)\n lower_limit, upper_limit, numeric_type, unit_type = \\\n self.get_schedule_type_limits_data(sch_name)\n\n filename = values['File_Name']\n column = values['Column_Number']\n rows = values['Rows_to_Skip_at_Top']\n hours = values['Number_of_Hours_of_Data']\n sep = values['Column_Separator']\n interp = values['Interpolate_to_Timestep']\n\n import pandas as pd\n import os\n idfdir = os.path.dirname(self.idf.idfname)\n file = os.path.join(idfdir, filename)\n delimeter = _separator(sep)\n skip_rows = int(rows) - 1 # We want to keep the column\n col = [int(column) - 1] # zero-based\n values = pd.read_csv(file, delimiter=delimeter, skiprows=skip_rows,\n usecols=col)\n\n return values.iloc[:, 0].values\n\n def get_compact_ep_schedule_values(self, sch_name=None):\n \"\"\"'schedule:compact'\"\"\"\n\n if sch_name is None:\n sch_name = self.schName\n\n values = self.idf.getobject('schedule:compact'.upper(), sch_name)\n lower_limit, upper_limit, numeric_type, unit_type = \\\n self.get_schedule_type_limits_data(sch_name)\n\n field_sets = ['through', 'for', 'interpolate', 'until', 'value']\n fields = values.fieldvalues[3:]\n\n index = pd.date_range(start=self.startDate, periods=8760, freq='H')\n zeros = np.zeros(len(index))\n\n slicer_ = pd.Series([False] * len(index), index=index)\n series = pd.Series(zeros, index=index)\n\n from_day = self.startDate\n ep_from_day = datetime(self.year, 1, 1)\n from_time = '00:00'\n how_interpolate = None\n for field in fields:\n if any([spe in field.lower() for spe in field_sets]):\n f_set, hour, minute, value = self.field_interpreter(field)\n\n if f_set.lower() == 'through':\n # main condition. All sub-conditions must obey a\n # `Through` condition\n\n # First, initialize the slice (all False for now)\n through_conditions = self.invalidate_condition(series)\n\n # reset from_time\n from_time = '00:00'\n\n # Prepare ep_to_day variable\n ep_to_day = self.date_field_interpretation(value) + \\\n timedelta(days=1)\n\n # Calculate Timedelta in days\n days = (ep_to_day - ep_from_day).days\n # Add timedelta to start_date\n to_day = from_day + timedelta(days=days) + timedelta(\n hours=-1)\n\n # slice the conditions with the range and apply True\n through_conditions.loc[from_day:to_day] = True\n\n from_day = to_day + timedelta(hours=1)\n ep_from_day = ep_to_day\n elif f_set.lower() == 'for':\n # slice specific days\n # reset from_time\n from_time = '00:00'\n\n for_condition = self.invalidate_condition(series)\n values = value.split()\n if len(values) > 1:\n # if multiple `For`. eg.: For: Weekends Holidays,\n # Combine both conditions\n for value in values:\n if value.lower() == 'allotherdays':\n # Apply condition to slice\n how = self.field_set(value, slicer_)\n # Reset though condition\n through_conditions = how\n for_condition = how\n else:\n how = self.field_set(value, slicer_)\n for_condition.loc[how] = True\n elif value.lower() == 'allotherdays':\n # Apply condition to slice\n how = self.field_set(value, slicer_)\n # Reset though condition\n through_conditions = how\n for_condition = how\n else:\n # Apply condition to slice\n how = self.field_set(value)\n for_condition.loc[how] = True\n\n # Combine the for_condition with all_conditions\n all_conditions = through_conditions & for_condition\n\n # update in memory slice\n # self.sliced_day_.loc[all_conditions] = True\n elif 'interpolate' in f_set.lower():\n # we need to upsample to series to 8760 * 60 values\n new_idx = pd.date_range(start=self.startDate,\n periods=525600, closed='left',\n freq='T')\n series = series.resample('T').pad()\n series = series.reindex(new_idx)\n series.fillna(method='pad', inplace=True)\n through_conditions = through_conditions.resample('T').pad()\n through_conditions = through_conditions.reindex(new_idx)\n through_conditions.fillna(method='pad', inplace=True)\n for_condition = for_condition.resample('T').pad()\n for_condition = for_condition.reindex(new_idx)\n for_condition.fillna(method='pad', inplace=True)\n how_interpolate = value.lower()\n elif f_set.lower() == 'until':\n until_condition = self.invalidate_condition(series)\n if series.index.freq.name == 'T':\n # until_time = str(int(hour) - 1) + ':' + minute\n until_time = timedelta(hours=int(hour),\n minutes=int(minute)) - timedelta(\n minutes=1)\n\n else:\n until_time = str(int(hour) - 1) + ':' + minute\n until_condition.loc[until_condition.between_time(from_time,\n str(\n until_time)).index] = True\n all_conditions = for_condition & through_conditions & \\\n until_condition\n\n from_time = str(int(hour)) + ':' + minute\n elif f_set.lower() == 'value':\n # If the therm `Value: ` field is used, we will catch it\n # here.\n # update in memory slice\n slicer_.loc[all_conditions] = True\n series[all_conditions] = value\n else:\n # Do something here before looping to the next Field\n pass\n else:\n # If the term `Value: ` is not used; the variable is simply\n # passed in the Field\n value = float(field)\n series[all_conditions] = value\n\n # update in memory slice\n slicer_.loc[all_conditions] = True\n if how_interpolate:\n return series.resample('H').mean().values\n else:\n return series.values\n\n def field_interpreter(self, field):\n \"\"\"dealing with a Field-Set (Through, For, Interpolate,\n # Until, Value) and return the parsed string\"\"\"\n\n if 'through' in field.lower():\n # deal with through\n if ':' in field.lower():\n # parse colon\n f_set, statement = field.split(':')\n hour = None\n minute = None\n value = statement.strip()\n else:\n msg = 'The schedule \"{sch}\" contains a Field ' \\\n 'that is not understood: \"{field}\"'.format(\n sch=self.schName, field=field)\n raise NotImplementedError(msg)\n elif 'for' in field.lower():\n if ':' in field.lower():\n # parse colon\n f_set, statement = field.split(':')\n value = statement.strip()\n hour = None\n minute = None\n else:\n # parse without a colon\n msg = 'The schedule \"{sch}\" contains a Field ' \\\n 'that is not understood: \"{field}\"'.format(\n sch=self.schName, field=field)\n raise NotImplementedError(msg)\n elif 'interpolate' in field.lower():\n msg = 'The schedule \"{sch}\" contains sub-hourly values (' \\\n 'Field-Set=\"{field}\"). The average over the hour is ' \\\n 'taken'.format(sch=self.schName, field=field)\n log(msg, lg.WARNING)\n f_set, value = field.split(':')\n hour = None\n minute = None\n elif 'until' in field.lower():\n if ':' in field.lower():\n # parse colon\n try:\n f_set, hour, minute = field.split(':')\n hour = hour.strip() # remove trailing spaces\n minute = minute.strip() # remove trailing spaces\n value = None\n except:\n f_set = 'until'\n hour, minute = field.split(':')\n hour = hour[-2:].strip()\n minute = minute.strip()\n value = None\n else:\n msg = 'The schedule \"{sch}\" contains a Field ' \\\n 'that is not understood: \"{field}\"'.format(\n sch=self.schName, field=field)\n raise NotImplementedError(msg)\n elif 'value' in field.lower():\n if ':' in field.lower():\n # parse colon\n f_set, statement = field.split(':')\n value = statement.strip()\n hour = None\n minute = None\n else:\n msg = 'The schedule \"{sch}\" contains a Field ' \\\n 'that is not understood: \"{field}\"'.format(\n sch=self.schName, field=field)\n raise NotImplementedError(msg)\n else:\n # deal with the data value\n f_set = field\n hour = None\n minute = None\n value = field[len(field) + 1:].strip()\n\n return f_set, hour, minute, value\n\n @staticmethod\n def invalidate_condition(series):\n index = series.index\n periods = len(series)\n return pd.Series([False] * periods, index=index)\n\n def get_yearly_ep_schedule_values(self, sch_name=None):\n \"\"\"'schedule:year'\"\"\"\n # first week\n\n start_date = self.startDate\n idx = pd.date_range(start=start_date, periods=8760, freq='1H')\n hourly_values = pd.Series([0] * 8760, index=idx)\n\n # update last day of schedule\n self.endHOY = 8760\n\n if sch_name is None:\n sch_name = self.schName\n\n values = self.idf.getobject('schedule:year'.upper(), sch_name)\n\n # generate weekly schedules\n num_of_weekly_schedules = int(len(values.fieldvalues[3:]) / 5)\n\n for i in range(num_of_weekly_schedules):\n ref = values.get_referenced_object(\n 'ScheduleWeek_Name_{}'.format(i + 1))\n\n start_month = values['Start_Month_{}'.format(i + 1)]\n end_month = values['End_Month_{}'.format(i + 1)]\n start_day = values['Start_Day_{}'.format(i + 1)]\n end_day = values['End_Day_{}'.format(i + 1)]\n\n start = datetime.strptime(\n '{}/{}/{}'.format(self.year, start_month, start_day),\n '%Y/%m/%d')\n end = datetime.strptime(\n '{}/{}/{}'.format(self.year, end_month, end_day),\n '%Y/%m/%d')\n days = (end - start).days + 1\n\n end_date = start_date + timedelta(days=days) + timedelta(hours=23)\n how = pd.IndexSlice[start_date:end_date]\n\n weeks = []\n for name, week in hourly_values.loc[how].groupby(\n pd.Grouper(freq='168H')):\n if not week.empty:\n try:\n week.loc[:] = self.get_schedule_values(\n sch_name=ref.Name, start_date=week.index[0],\n index=week.index, sch_type=ref.key)\n except ValueError:\n week.loc[:] = self.get_schedule_values(\n ref.Name, week.index[0])[0:len(week)]\n finally:\n weeks.append(week)\n new = pd.concat(weeks)\n hourly_values.update(new)\n start_date += timedelta(days=days)\n\n return hourly_values.values\n\n def get_schedule_values(self, sch_name=None, start_date=None, index=None,\n sch_type=None):\n \"\"\"Main function that returns the schedule values\n\n Args:\n sch_type:\n index:\n start_date:\n \"\"\"\n\n if sch_name is None:\n sch_name = self.schName\n\n if sch_type is None:\n schedule_values = self.idf.get_schedule_data_by_name(sch_name)\n self.schType = schedule_values.key.upper()\n sch_type = self.schType\n if self.count == 0:\n # This is the first time, get the schedule type and the type limits.\n self.schTypeLimitsName = self.get_schedule_type_limits_name()\n self.count += 1\n\n if sch_type.upper() == \"schedule:year\".upper():\n hourly_values = self.get_yearly_ep_schedule_values(\n sch_name)\n elif sch_type.upper() == \"schedule:day:interval\".upper():\n hourly_values = self.get_interval_day_ep_schedule_values(\n sch_name)\n elif sch_type.upper() == \"schedule:day:hourly\".upper():\n hourly_values = self.get_hourly_day_ep_schedule_values(\n sch_name)\n elif sch_type.upper() == \"schedule:day:list\".upper():\n hourly_values = self.get_list_day_ep_schedule_values(\n sch_name)\n elif sch_type.upper() == \"schedule:week:compact\".upper():\n hourly_values = self.get_compact_weekly_ep_schedule_values(\n sch_name, start_date, index)\n elif sch_type.upper() == \"schedule:week:daily\".upper():\n hourly_values = self.get_daily_weekly_ep_schedule_values(\n sch_name)\n elif sch_type.upper() == \"schedule:constant\".upper():\n hourly_values = self.get_constant_ep_schedule_values(\n sch_name)\n elif sch_type.upper() == \"schedule:compact\".upper():\n hourly_values = self.get_compact_ep_schedule_values(\n sch_name)\n elif sch_type.upper() == \"schedule:file\".upper():\n hourly_values = self.get_file_ep_schedule_values(\n sch_name)\n else:\n log('Archetypal does not support \"{}\" currently'.format(\n self.schType), lg.WARNING)\n\n hourly_values = []\n\n return hourly_values\n\n def is_schedule(self, sch_name):\n \"\"\"Returns True if idfobject is one of 'schedule_types'\"\"\"\n if sch_name.upper() in self.idf.schedules_dict:\n return True\n else:\n return False\n\n def to_year_week_day(self):\n \"\"\"convert a Schedule Class to the 'Schedule:Year',\n 'Schedule:Week:Daily' and 'Schedule:Day:Hourly' representation\n\n Returns:\n 'Schedule:Year', list of ['Schedule:Week:Daily'],\n list of ['Schedule:Day:Hourly']\n \"\"\"\n\n full_year = np.array(self.all_values) # array of shape (8760,)\n values = full_year.reshape(-1, 24) # shape (365, 24)\n\n # create unique days\n unique_days, nds = np.unique(values, axis=0, return_inverse=True)\n\n ep_days = []\n dict_day = {}\n count_day = 0\n for unique_day in unique_days:\n name = 'd_' + self.schName + '_' + '%03d' % count_day\n name, count_day = archetypal.check_unique_name('d', count_day,\n name,\n archetypal.settings.unique_schedules,\n suffix=True)\n dict_day[name] = unique_day\n\n archetypal.settings.unique_schedules.append(name)\n\n # Create idf_objects for schedule:day:hourly\n ep_day = self.idf.add_object(\n ep_object='Schedule:Day:Hourly'.upper(),\n save=False,\n **dict(Name=name,\n Schedule_Type_Limits_Name=self.schType,\n **{'Hour_{}'.format(i + 1): unique_day[i]\n for i in range(24)})\n )\n ep_days.append(ep_day)\n\n # create unique weeks from unique days\n unique_weeks, nwsi, nws, count = np.unique(\n full_year[:364 * 24, ...].reshape(-1, 168), return_index=True,\n axis=0, return_inverse=True, return_counts=True)\n\n # Appending unique weeks in dictionary with name and values of weeks as\n # keys\n # {'name_week': {'dayName':[]}}\n dict_week = {}\n count_week = 0\n for unique_week in unique_weeks:\n week_id = 'w_' + self.schName + '_' + '%03d' % count_week\n week_id, count_week = archetypal.check_unique_name('w',\n count_week,\n week_id,\n archetypal.settings.unique_schedules,\n suffix=True)\n archetypal.settings.unique_schedules.append(week_id)\n\n dict_week[week_id] = {}\n for i in list(range(0, 7)):\n day_of_week = unique_week[..., i * 24:(i + 1) * 24]\n for key in dict_day:\n if (day_of_week == dict_day[key]).all():\n dict_week[week_id]['day_{}'.format(i)] = key\n\n # Create idf_objects for schedule:week:daily\n list_day_of_week = ['Sunday', 'Monday', 'Tuesday',\n 'Wednesday', 'Thursday', 'Friday', 'Saturday']\n ordered_day_n = np.array([6, 0, 1, 2, 3, 4, 5])\n ordered_day_n = np.roll(ordered_day_n, self.startDayOfTheWeek)\n ep_weeks = []\n for week_id in dict_week:\n ep_week = self.idf.add_object(\n ep_object='Schedule:Week:Daily'.upper(),\n save=False,\n **dict(Name=week_id,\n **{'{}_ScheduleDay_Name'.format(\n weekday): dict_week[week_id][\n 'day_{}'.format(i)] for\n i, weekday in\n zip(ordered_day_n, list_day_of_week)\n },\n Holiday_ScheduleDay_Name=\n dict_week[week_id]['day_6'],\n SummerDesignDay_ScheduleDay_Name=\n dict_week[week_id]['day_1'],\n WinterDesignDay_ScheduleDay_Name=\n dict_week[week_id]['day_1'],\n CustomDay1_ScheduleDay_Name=\n dict_week[week_id]['day_2'],\n CustomDay2_ScheduleDay_Name=\n dict_week[week_id]['day_5'])\n )\n ep_weeks.append(ep_week)\n\n import itertools\n blocks = {}\n from_date = datetime(self.year, 1, 1)\n bincount = [sum(1 for _ in group)\n for key, group in itertools.groupby(nws + 1) if key]\n week_order = {i: v for i, v in enumerate(np.array(\n [key for key, group in itertools.groupby(nws + 1) if key]) - 1)}\n for i, (week_n, count) in enumerate(\n zip(week_order, bincount)):\n week_id = list(dict_week)[week_order[i]]\n to_date = from_date + timedelta(days=int(count * 7), hours=-1)\n blocks[i] = {}\n blocks[i]['week_id'] = week_id\n blocks[i]['from_day'] = from_date.day\n blocks[i]['end_day'] = to_date.day\n blocks[i]['from_month'] = from_date.month\n blocks[i]['end_month'] = to_date.month\n from_date = to_date + timedelta(hours=1)\n\n # If this is the last block, force end of year\n if i == len(bincount) - 1:\n blocks[i]['end_day'] = 31\n blocks[i]['end_month'] = 12\n\n new_dict = dict(Name=self.schName + '_',\n Schedule_Type_Limits_Name=self.schTypeLimitsName)\n for i in blocks:\n new_dict.update({\"ScheduleWeek_Name_{}\".format(i + 1):\n blocks[i]['week_id'],\n \"Start_Month_{}\".format(i + 1):\n blocks[i]['from_month'],\n \"Start_Day_{}\".format(i + 1):\n blocks[i]['from_day'],\n \"End_Month_{}\".format(i + 1):\n blocks[i]['end_month'],\n \"End_Day_{}\".format(i + 1):\n blocks[i]['end_day']})\n\n ep_year = self.idf.add_object(ep_object='Schedule:Year'.upper(),\n save=False, **new_dict)\n return ep_year, ep_weeks, ep_days\n\n def date_field_interpretation(self, field):\n \"\"\"Date Field Interpretation\n\n Args:\n field (str): The EnergyPlus Field Contents\n\n Returns:\n (datetime): The datetime object\n\n Info:\n See EnergyPlus documentation for more details:\n 1.6.8.1.2 Field: Start Date (Table 1.4: Date Field Interpretation)\n \"\"\"\n # < number > Weekday in Month\n formats = ['%m/%d', '%d %B', '%B %d', '%d %b', '%b %d']\n date = None\n for format_str in formats:\n # Tru to parse using each defined formats\n try:\n date = datetime.strptime(field, format_str)\n except:\n pass\n else:\n date = datetime(self.year, date.month, date.day)\n if date is None:\n # if the defined formats did not work, try the fancy parse\n try:\n date = self.parse_fancy_string(field)\n except:\n msg = \"the schedule '{sch}' contains a \" \\\n \"Field that is not understood: '{field}'\".format(\n sch=self.schName,\n field=field)\n raise ValueError(msg)\n else:\n return date\n else:\n return date\n\n def parse_fancy_string(self, field):\n \"\"\"Will try to parse cases such as `3rd Monday in February` or `Last\n Weekday In Month`\n\n Args:\n field (str): The EnergyPlus Field Contents\n\n Returns:\n (datetime): The datetime object\n \"\"\"\n import re\n\n # split the string at the term ' in '\n time, month = field.lower().split(' in ')\n month = datetime.strptime(month, '%B').month\n\n # split the first part into nth and dayofweek\n nth, dayofweek = time.split(' ')\n if 'last' in nth:\n nth = -1 # Use the last one\n else:\n nth = re.findall(r'\\d+', nth) # use the nth one\n nth = int(nth[0]) - 1 # python is zero-based\n\n weekday = {'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3,\n 'friday': 4, 'saturday': 5, 'sunday': 6}\n\n # parse the dayofweek eg. monday\n dayofweek = weekday.get(dayofweek, 6)\n\n # create list of possible days using Calendar\n import calendar\n c = calendar.Calendar(firstweekday=self.startDayOfTheWeek)\n monthcal = c.monthdatescalendar(self.year, month)\n\n # iterate though the month and get the nth weekday\n date = [day for week in monthcal for day in week if \\\n day.weekday() == dayofweek and \\\n day.month == month][nth]\n return datetime(date.year, date.month, date.day)\n\n def field_set(self, field, slicer_=None):\n \"\"\"helper function to return the proper slicer depending on the\n field_set value.\n\n Available values are:\n Weekdays, Weekends, Holidays, Alldays, SummerDesignDay,\n WinterDesignDay, Sunday, Monday, Tuesday, Wednesday, Thursday,\n Friday, Saturday, CustomDay1, CustomDay2, AllOtherDays\n\n Args:\n field (str): The EnergyPlus field set value.\n slicer_ (pd.Series): The persistent slicer for this schedule\n\n Returns:\n (indexer-like): Returns the appropriate indexer for the series.\n \"\"\"\n\n if field.lower() == 'weekdays':\n # return only days of weeks\n return lambda x: x.index.dayofweek < 5\n elif field.lower() == 'weekends':\n # return only weekends\n return lambda x: x.index.dayofweek >= 5\n elif field.lower() == 'alldays':\n log('For schedule \"{}\", the field-set \"AllDays\" may be overridden '\n 'by the \"AllOtherDays\" field-set'.format(\n self.schName), lg.WARNING)\n # return all days := equivalenet to .loc[:]\n return pd.IndexSlice[:]\n elif field.lower() == 'allotherdays':\n # return unused days (including special days). Uses the global\n # variable `slicer_`\n import operator\n if slicer_ is not None:\n return _conjunction(*[self.special_day(field, slicer_),\n ~slicer_], logical=operator.or_)\n else:\n raise NotImplementedError\n elif field.lower() == 'sunday':\n # return only sundays\n return lambda x: x.index.dayofweek == 6\n elif field.lower() == 'monday':\n # return only mondays\n return lambda x: x.index.dayofweek == 0\n elif field.lower() == 'tuesday':\n # return only Tuesdays\n return lambda x: x.index.dayofweek == 1\n elif field.lower() == 'wednesday':\n # return only Wednesdays\n return lambda x: x.index.dayofweek == 2\n elif field.lower() == 'thursday':\n # return only Thursdays\n return lambda x: x.index.dayofweek == 3\n elif field.lower() == 'friday':\n # return only Fridays\n return lambda x: x.index.dayofweek == 4\n elif field.lower() == 'saturday':\n # return only Saturdays\n return lambda x: x.index.dayofweek == 5\n elif field.lower() == 'summerdesignday':\n # return design_day(self, field)\n return None\n elif field.lower() == 'winterdesignday':\n # return design_day(self, field)\n return None\n elif field.lower() == 'holiday' or field.lower() == 'holidays':\n field = 'holiday'\n return self.special_day(field, slicer_)\n elif not self.strict:\n # If not strict, ignore missing field-sets such as CustomDay1\n return pd.IndexSlice[:]\n else:\n raise NotImplementedError(\n 'Archetypal does not yet support The '\n 'Field_set \"{}\"'.format(field))\n\n def __len__(self):\n \"\"\"returns the length of all values of the schedule\"\"\"\n return len(self.all_values)\n\n def __eq__(self, other):\n \"\"\"Overrides the default implementation\"\"\"\n if isinstance(other, Schedule):\n return self.all_values == other.all_values\n else:\n raise NotImplementedError\n\n def __ne__(self, other):\n return ~(self.__eq__(other))\n\n def __add__(self, other):\n if isinstance(other, Schedule):\n return self.all_values + other.all_values\n elif isinstance(other, list):\n return self.all_values + other\n else:\n raise NotImplementedError\n\n def __sub__(self, other):\n if isinstance(other, Schedule):\n return self.all_values - other.all_values\n elif isinstance(other, list):\n return self.all_values - other\n else:\n raise NotImplementedError\n\n def __mul__(self, other):\n if isinstance(other, Schedule):\n return self.all_values * other.all_values\n elif isinstance(other, list):\n return self.all_values * other\n else:\n raise NotImplementedError\n\n def get_sdow(self, start_day_of_week):\n \"\"\"Returns the start day of the week\"\"\"\n if start_day_of_week is None:\n return self.idf.day_of_week_for_start_day\n else:\n return start_day_of_week\n\n def special_day(self, field, slicer_):\n \"\"\"try to get the RunPeriodControl:SpecialDays for the corresponding\n Day Type\"\"\"\n sp_slicer_ = slicer_.copy()\n sp_slicer_.loc[:] = False\n special_day_types = ['holiday', 'customday1', 'customday2']\n\n dds = self.idf.idfobjects['RunPeriodControl:SpecialDays'.upper()]\n dd = [dd for dd in dds if dd.Special_Day_Type.lower() == field\n or dd.Special_Day_Type.lower() in special_day_types]\n if len(dd) > 0:\n slice = []\n for dd in dd:\n # can have more than one special day types\n data = dd.Start_Date\n ep_start_date = self.date_field_interpretation(data)\n ep_orig = datetime(self.year, 1, 1)\n days_to_speciald = (ep_start_date - ep_orig).days\n duration = int(dd.Duration)\n from_date = self.startDate + timedelta(days=days_to_speciald)\n to_date = from_date + timedelta(days=duration) + timedelta(\n hours=-1)\n\n sp_slicer_.loc[from_date:to_date] = True\n return sp_slicer_\n elif not self.strict:\n return sp_slicer_\n else:\n msg = 'Could not find a \"SizingPeriod:DesignDay\" object ' \\\n 'needed for schedule \"{}\" with Day Type \"{}\"'.format(\n self.schName, field.capitalize()\n )\n raise ValueError(msg)\n\n\ndef design_day(schedule, field):\n # try to get the SizingPeriod:DesignDay for the corresponding Day Type\n dds = schedule.idf.idfobjects['SizingPeriod:DesignDay'.upper()]\n dd = [dd for dd in dds if dd.Day_Type.lower() == field]\n if len(dd) > 0:\n # should have found only one design day matching the Day Type\n\n data = [dd[0].Month, dd[0].Day_of_Month]\n date = '/'.join([str(item).zfill(2) for item in data])\n date = schedule.date_field_interpretation(date)\n return lambda x: x.index == date\n else:\n msg = 'Could not find a \"SizingPeriod:DesignDay\" object ' \\\n 'needed for schedule \"{}\" with Day Type \"{}\"'.format(\n schedule.schName, field.capitalize()\n )\n raise ValueError(msg)\n\n\ndef _conjunction(*conditions, logical=np.logical_and):\n \"\"\"Applies a logical function on n conditions\"\"\"\n return functools.reduce(logical, conditions)\n\n\ndef _separator(sep):\n \"\"\"helper function to return the correct delimiter\"\"\"\n if sep == 'Comma':\n return ','\n elif sep == 'Tab':\n return '\\t'\n elif sep == 'Fixed':\n return None\n elif sep == 'Semicolon':\n return ';'\n else:\n return ','\n\n\ndef _how(how):\n \"\"\"Helper function to return the correct resampler\"\"\"\n if how.lower() == 'average':\n return 'mean'\n elif how.lower() == 'linear':\n return 'interpolate'\n elif how.lower() == 'no':\n return 'max'\n else:\n return 'max'\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.Series", "numpy.unique", "numpy.arange", "pandas.Grouper", "numpy.mean", "pandas.date_range", "numpy.array", "numpy.roll" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
sleep-yearning/magenta
[ "a03a14ef5a691ee9e3d336aa621281028dc5af32", "a03a14ef5a691ee9e3d336aa621281028dc5af32", "a03a14ef5a691ee9e3d336aa621281028dc5af32", "a03a14ef5a691ee9e3d336aa621281028dc5af32", "a03a14ef5a691ee9e3d336aa621281028dc5af32" ]
[ "magenta/models/score2perf/music_encoders_test.py", "magenta/models/drums_rnn/drums_rnn_generate.py", "magenta/models/shared/sequence_generator.py", "magenta/models/svg_vae/svg_utils.py", "magenta/models/onsets_frames_transcription/audio_label_data_utils.py" ]
[ "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for Score2Perf music encoders.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tempfile\n\nimport magenta\nfrom magenta.models.score2perf import music_encoders\nfrom magenta.music import testing_lib\nfrom magenta.music.protobuf import music_pb2\nimport tensorflow.compat.v1 as tf\n\n\nclass MidiPerformanceEncoderTest(tf.test.TestCase):\n\n def testNumReservedIds(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108)\n self.assertEqual(2, encoder.num_reserved_ids)\n\n def testEncodeEmptyNoteSequence(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108)\n ids = encoder.encode_note_sequence(music_pb2.NoteSequence())\n self.assertEqual([], ids)\n\n def testEncodeEmptyNoteSequenceAddEos(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108,\n add_eos=True)\n ids = encoder.encode_note_sequence(music_pb2.NoteSequence())\n self.assertEqual([1], ids)\n\n def testEncodeNoteSequence(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108)\n\n ns = music_pb2.NoteSequence()\n testing_lib.add_track_to_sequence(\n ns, 0, [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 127, 1.0, 2.0)])\n ids = encoder.encode_note_sequence(ns)\n\n expected_ids = [\n 302, # VELOCITY(25)\n 41, # NOTE-ON(60)\n 45, # NOTE-ON(64)\n 277, # TIME-SHIFT(100)\n 309, # VELOCITY(32)\n 48, # NOTE-ON(67)\n 277, # TIME-SHIFT(100)\n 136, # NOTE-OFF(67)\n 277, # TIME-SHIFT(100)\n 133, # NOTE-OFF(64\n 277, # TIME-SHIFT(100)\n 129 # NOTE-OFF(60)\n ]\n\n self.assertEqual(expected_ids, ids)\n\n def testEncodeNoteSequenceAddEos(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108,\n add_eos=True)\n\n ns = music_pb2.NoteSequence()\n testing_lib.add_track_to_sequence(\n ns, 0, [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 127, 1.0, 2.0)])\n ids = encoder.encode_note_sequence(ns)\n\n expected_ids = [\n 302, # VELOCITY(25)\n 41, # NOTE-ON(60)\n 45, # NOTE-ON(64)\n 277, # TIME-SHIFT(100)\n 309, # VELOCITY(32)\n 48, # NOTE-ON(67)\n 277, # TIME-SHIFT(100)\n 136, # NOTE-OFF(67)\n 277, # TIME-SHIFT(100)\n 133, # NOTE-OFF(64\n 277, # TIME-SHIFT(100)\n 129, # NOTE-OFF(60)\n 1 # EOS\n ]\n\n self.assertEqual(expected_ids, ids)\n\n def testEncodeNoteSequenceNGrams(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108,\n ngrams=[(41, 45), (277, 309, 300), (309, 48), (277, 129, 130)])\n\n ns = music_pb2.NoteSequence()\n testing_lib.add_track_to_sequence(\n ns, 0, [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 127, 1.0, 2.0)])\n ids = encoder.encode_note_sequence(ns)\n\n expected_ids = [\n 302, # VELOCITY(25)\n 310, # NOTE-ON(60), NOTE-ON(64)\n 277, # TIME-SHIFT(100)\n 312, # VELOCITY(32), NOTE-ON(67)\n 277, # TIME-SHIFT(100)\n 136, # NOTE-OFF(67)\n 277, # TIME-SHIFT(100)\n 133, # NOTE-OFF(64\n 277, # TIME-SHIFT(100)\n 129 # NOTE-OFF(60)\n ]\n\n self.assertEqual(expected_ids, ids)\n\n def testEncode(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108,\n ngrams=[(277, 129)])\n\n ns = music_pb2.NoteSequence()\n testing_lib.add_track_to_sequence(ns, 0, [(60, 97, 0.0, 1.0)])\n\n # Write NoteSequence to MIDI file as encoder takes in filename.\n with tempfile.NamedTemporaryFile(suffix='.mid') as f:\n magenta.music.sequence_proto_to_midi_file(ns, f.name)\n ids = encoder.encode(f.name)\n\n expected_ids = [\n 302, # VELOCITY(25)\n 41, # NOTE-ON(60)\n 310 # TIME-SHIFT(100), NOTE-OFF(60)\n ]\n\n self.assertEqual(expected_ids, ids)\n\n def testDecode(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108,\n ngrams=[(277, 129)])\n\n ids = [\n 302, # VELOCITY(25)\n 41, # NOTE-ON(60)\n 310 # TIME-SHIFT(100), NOTE-OFF(60)\n ]\n\n # Decode method returns MIDI filename, read and convert to NoteSequence.\n filename = encoder.decode(ids)\n ns = magenta.music.midi_file_to_sequence_proto(filename)\n\n # Remove default tempo & time signature.\n del ns.tempos[:]\n del ns.time_signatures[:]\n\n expected_ns = music_pb2.NoteSequence(ticks_per_quarter=220)\n testing_lib.add_track_to_sequence(expected_ns, 0, [(60, 97, 0.0, 1.0)])\n\n # Add source info fields.\n expected_ns.source_info.encoding_type = (\n music_pb2.NoteSequence.SourceInfo.MIDI)\n expected_ns.source_info.parser = (\n music_pb2.NoteSequence.SourceInfo.PRETTY_MIDI)\n\n self.assertEqual(expected_ns, ns)\n\n def testVocabSize(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108)\n self.assertEqual(310, encoder.vocab_size)\n\n def testVocabSizeNGrams(self):\n encoder = music_encoders.MidiPerformanceEncoder(\n steps_per_second=100, num_velocity_bins=32, min_pitch=21, max_pitch=108,\n ngrams=[(41, 45), (277, 309, 300), (309, 48), (277, 129, 130)])\n self.assertEqual(314, encoder.vocab_size)\n\n\nclass TextChordsEncoderTest(tf.test.TestCase):\n\n def testEncodeNoteSequence(self):\n encoder = music_encoders.TextChordsEncoder(steps_per_quarter=1)\n\n ns = music_pb2.NoteSequence()\n ns.tempos.add(qpm=60)\n testing_lib.add_chords_to_sequence(\n ns, [('C', 1), ('Dm', 3), ('Bdim', 4)])\n ns.total_time = 5.0\n ids = encoder.encode_note_sequence(ns)\n\n expected_ids = [\n 2, # no-chord\n 3, # C major\n 3, # C major\n 17, # D minor\n 50 # B diminished\n ]\n\n self.assertEqual(expected_ids, ids)\n\n def testEncode(self):\n encoder = music_encoders.TextChordsEncoder(steps_per_quarter=1)\n\n ids = encoder.encode('C G Am F')\n expected_ids = [\n 3, # C major\n 10, # G major\n 24, # A minor\n 8 # F major\n ]\n\n self.assertEqual(expected_ids, ids)\n\n def testVocabSize(self):\n encoder = music_encoders.TextChordsEncoder(steps_per_quarter=1)\n self.assertEqual(51, encoder.vocab_size)\n\n\nclass TextMelodyEncoderTest(tf.test.TestCase):\n\n def testEncodeNoteSequence(self):\n encoder = music_encoders.TextMelodyEncoder(\n steps_per_quarter=4, min_pitch=21, max_pitch=108)\n encoder_absolute = music_encoders.TextMelodyEncoderAbsolute(\n steps_per_second=4, min_pitch=21, max_pitch=108)\n\n ns = music_pb2.NoteSequence()\n ns.tempos.add(qpm=60)\n testing_lib.add_track_to_sequence(\n ns, 0,\n [(60, 127, 0.0, 0.25), (62, 127, 0.25, 0.75), (64, 127, 1.25, 2.0)])\n ids = encoder.encode_note_sequence(ns)\n ids_absolute = encoder_absolute.encode_note_sequence(ns)\n\n expected_ids = [\n 43, # ON(60)\n 45, # ON(62)\n 2, # HOLD(62)\n 3, # OFF(62)\n 2, # REST\n 47, # ON(64)\n 2, # HOLD(64)\n 2 # HOLD(64)\n ]\n\n self.assertEqual(expected_ids, ids)\n self.assertEqual(expected_ids, ids_absolute)\n\n def testEncode(self):\n encoder = music_encoders.TextMelodyEncoder(\n steps_per_quarter=4, min_pitch=21, max_pitch=108)\n\n ids = encoder.encode('60 -2 62 -1 64 -2')\n expected_ids = [\n 43, # ON(60)\n 2, # HOLD(60)\n 45, # ON(62)\n 3, # OFF(62)\n 47, # ON(64)\n 2 # HOLD(64)\n ]\n\n self.assertEqual(expected_ids, ids)\n\n def testVocabSize(self):\n encoder = music_encoders.TextMelodyEncoder(\n steps_per_quarter=4, min_pitch=21, max_pitch=108)\n self.assertEqual(92, encoder.vocab_size)\n\n\nclass FlattenedTextMelodyEncoderTest(tf.test.TestCase):\n\n def testEncodeNoteSequence(self):\n encoder = music_encoders.FlattenedTextMelodyEncoderAbsolute(\n steps_per_second=4, num_velocity_bins=127)\n\n ns = music_pb2.NoteSequence()\n ns.tempos.add(qpm=60)\n testing_lib.add_track_to_sequence(\n ns, 0,\n [(60, 127, 0.0, 0.25), (62, 15, 0.25, 0.75), (64, 32, 1.25, 2.0)])\n ids = encoder.encode_note_sequence(ns)\n expected_ids = [\n 130, # ON(vel=127)\n 18, # ON(vel=15)\n 2, # HOLD(62)\n 2, # REST\n 2, # REST\n 35, # ON(vel=32)\n 2, # HOLD(64)\n 2 # HOLD(64)\n ]\n\n self.assertEqual(expected_ids, ids)\n\n def testVocabSize(self):\n num_vel_bins = 12\n encoder = music_encoders.FlattenedTextMelodyEncoderAbsolute(\n steps_per_second=4, num_velocity_bins=num_vel_bins)\n expected = num_vel_bins + encoder.num_reserved_ids + 2\n self.assertEqual(expected, encoder.vocab_size)\n\n\nclass CompositeScoreEncoderTest(tf.test.TestCase):\n\n def testEncodeNoteSequence(self):\n encoder = music_encoders.CompositeScoreEncoder([\n music_encoders.TextChordsEncoder(steps_per_quarter=4),\n music_encoders.TextMelodyEncoder(\n steps_per_quarter=4, min_pitch=21, max_pitch=108)\n ])\n\n ns = music_pb2.NoteSequence()\n ns.tempos.add(qpm=60)\n testing_lib.add_chords_to_sequence(ns, [('C', 0.5), ('Dm', 1.0)])\n testing_lib.add_track_to_sequence(\n ns, 0,\n [(60, 127, 0.0, 0.25), (62, 127, 0.25, 0.75), (64, 127, 1.25, 2.0)])\n chord_ids, melody_ids = zip(*encoder.encode_note_sequence(ns))\n\n expected_chord_ids = [\n 2, # no-chord\n 2, # no-chord\n 3, # C major\n 3, # C major\n 17, # D minor\n 17, # D minor\n 17, # D minor\n 17 # D minor\n ]\n\n expected_melody_ids = [\n 43, # ON(60)\n 45, # ON(62)\n 2, # HOLD(62)\n 3, # OFF(62)\n 2, # REST\n 47, # ON(64)\n 2, # HOLD(64)\n 2 # HOLD(64)\n ]\n\n self.assertEqual(expected_chord_ids, list(chord_ids))\n self.assertEqual(expected_melody_ids, list(melody_ids))\n\n # TODO(iansimon): also test MusicXML encoding\n\n def testVocabSize(self):\n encoder = music_encoders.CompositeScoreEncoder([\n music_encoders.TextChordsEncoder(steps_per_quarter=4),\n music_encoders.TextMelodyEncoder(\n steps_per_quarter=4, min_pitch=21, max_pitch=108)\n ])\n self.assertEqual([51, 92], encoder.vocab_size)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Generate drum tracks from a trained checkpoint of a drums RNN model.\n\nUses flags to define operation.\n\"\"\"\n\nimport ast\nimport os\nimport time\n\nimport magenta\nfrom magenta.models.drums_rnn import drums_rnn_config_flags\nfrom magenta.models.drums_rnn import drums_rnn_model\nfrom magenta.models.drums_rnn import drums_rnn_sequence_generator\nfrom magenta.models.shared import sequence_generator\nfrom magenta.models.shared import sequence_generator_bundle\nfrom magenta.music.protobuf import generator_pb2\nfrom magenta.music.protobuf import music_pb2\nimport tensorflow.compat.v1 as tf\n\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string(\n 'run_dir', None,\n 'Path to the directory where the latest checkpoint will be loaded from.')\ntf.app.flags.DEFINE_string(\n 'checkpoint_file', None,\n 'Path to the checkpoint file. run_dir will take priority over this flag.')\ntf.app.flags.DEFINE_string(\n 'bundle_file', None,\n 'Path to the bundle file. If specified, this will take priority over '\n 'run_dir and checkpoint_file, unless save_generator_bundle is True, in '\n 'which case both this flag and either run_dir or checkpoint_file are '\n 'required')\ntf.app.flags.DEFINE_boolean(\n 'save_generator_bundle', False,\n 'If true, instead of generating a sequence, will save this generator as a '\n 'bundle file in the location specified by the bundle_file flag')\ntf.app.flags.DEFINE_string(\n 'bundle_description', None,\n 'A short, human-readable text description of the bundle (e.g., training '\n 'data, hyper parameters, etc.).')\ntf.app.flags.DEFINE_string(\n 'output_dir', '/tmp/drums_rnn/generated',\n 'The directory where MIDI files will be saved to.')\ntf.app.flags.DEFINE_integer(\n 'num_outputs', 10,\n 'The number of drum tracks to generate. One MIDI file will be created for '\n 'each.')\ntf.app.flags.DEFINE_integer(\n 'num_steps', 128,\n 'The total number of steps the generated drum tracks should be, priming '\n 'drum track length + generated steps. Each step is a 16th of a bar.')\ntf.app.flags.DEFINE_string(\n 'primer_drums', '',\n 'A string representation of a Python list of tuples containing drum pitch '\n 'values. For example: '\n '\"[(36,42),(),(),(),(42,),(),(),()]\". If specified, this drum track will '\n 'be used as the priming drum track. If a priming drum track is not '\n 'specified, drum tracks will be generated from scratch.')\ntf.app.flags.DEFINE_string(\n 'primer_midi', '',\n 'The path to a MIDI file containing a drum track that will be used as a '\n 'priming drum track. If a primer drum track is not specified, drum tracks '\n 'will be generated from scratch.')\ntf.app.flags.DEFINE_float(\n 'qpm', None,\n 'The quarters per minute to play generated output at. If a primer MIDI is '\n 'given, the qpm from that will override this flag. If qpm is None, qpm '\n 'will default to 120.')\ntf.app.flags.DEFINE_float(\n 'temperature', 1.0,\n 'The randomness of the generated drum tracks. 1.0 uses the unaltered '\n 'softmax probabilities, greater than 1.0 makes tracks more random, less '\n 'than 1.0 makes tracks less random.')\ntf.app.flags.DEFINE_integer(\n 'beam_size', 1,\n 'The beam size to use for beam search when generating drum tracks.')\ntf.app.flags.DEFINE_integer(\n 'branch_factor', 1,\n 'The branch factor to use for beam search when generating drum tracks.')\ntf.app.flags.DEFINE_integer(\n 'steps_per_iteration', 1,\n 'The number of steps to take per beam search iteration.')\ntf.app.flags.DEFINE_string(\n 'log', 'INFO',\n 'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '\n 'or FATAL.')\n\n\ndef get_checkpoint():\n \"\"\"Get the training dir or checkpoint path to be used by the model.\"\"\"\n if ((FLAGS.run_dir or FLAGS.checkpoint_file) and\n FLAGS.bundle_file and not FLAGS.save_generator_bundle):\n raise sequence_generator.SequenceGeneratorError(\n 'Cannot specify both bundle_file and run_dir or checkpoint_file')\n if FLAGS.run_dir:\n train_dir = os.path.join(os.path.expanduser(FLAGS.run_dir), 'train')\n return train_dir\n elif FLAGS.checkpoint_file:\n return os.path.expanduser(FLAGS.checkpoint_file)\n else:\n return None\n\n\ndef get_bundle():\n \"\"\"Returns a generator_pb2.GeneratorBundle object based read from bundle_file.\n\n Returns:\n Either a generator_pb2.GeneratorBundle or None if the bundle_file flag is\n not set or the save_generator_bundle flag is set.\n \"\"\"\n if FLAGS.save_generator_bundle:\n return None\n if FLAGS.bundle_file is None:\n return None\n bundle_file = os.path.expanduser(FLAGS.bundle_file)\n return sequence_generator_bundle.read_bundle_file(bundle_file)\n\n\ndef run_with_flags(generator):\n \"\"\"Generates drum tracks and saves them as MIDI files.\n\n Uses the options specified by the flags defined in this module.\n\n Args:\n generator: The DrumsRnnSequenceGenerator to use for generation.\n \"\"\"\n if not FLAGS.output_dir:\n tf.logging.fatal('--output_dir required')\n return\n FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)\n\n primer_midi = None\n if FLAGS.primer_midi:\n primer_midi = os.path.expanduser(FLAGS.primer_midi)\n\n if not tf.gfile.Exists(FLAGS.output_dir):\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n primer_sequence = None\n qpm = FLAGS.qpm if FLAGS.qpm else magenta.music.DEFAULT_QUARTERS_PER_MINUTE\n if FLAGS.primer_drums:\n primer_drums = magenta.music.DrumTrack(\n [frozenset(pitches)\n for pitches in ast.literal_eval(FLAGS.primer_drums)])\n primer_sequence = primer_drums.to_sequence(qpm=qpm)\n elif primer_midi:\n primer_sequence = magenta.music.midi_file_to_sequence_proto(primer_midi)\n if primer_sequence.tempos and primer_sequence.tempos[0].qpm:\n qpm = primer_sequence.tempos[0].qpm\n else:\n tf.logging.warning(\n 'No priming sequence specified. Defaulting to a single bass drum hit.')\n primer_drums = magenta.music.DrumTrack([frozenset([36])])\n primer_sequence = primer_drums.to_sequence(qpm=qpm)\n\n # Derive the total number of seconds to generate based on the QPM of the\n # priming sequence and the num_steps flag.\n seconds_per_step = 60.0 / qpm / generator.steps_per_quarter\n total_seconds = FLAGS.num_steps * seconds_per_step\n\n # Specify start/stop time for generation based on starting generation at the\n # end of the priming sequence and continuing until the sequence is num_steps\n # long.\n generator_options = generator_pb2.GeneratorOptions()\n if primer_sequence:\n input_sequence = primer_sequence\n # Set the start time to begin on the next step after the last note ends.\n if primer_sequence.notes:\n last_end_time = max(n.end_time for n in primer_sequence.notes)\n else:\n last_end_time = 0\n generate_section = generator_options.generate_sections.add(\n start_time=last_end_time + seconds_per_step,\n end_time=total_seconds)\n\n if generate_section.start_time >= generate_section.end_time:\n tf.logging.fatal(\n 'Priming sequence is longer than the total number of steps '\n 'requested: Priming sequence length: %s, Generation length '\n 'requested: %s',\n generate_section.start_time, total_seconds)\n return\n else:\n input_sequence = music_pb2.NoteSequence()\n input_sequence.tempos.add().qpm = qpm\n generate_section = generator_options.generate_sections.add(\n start_time=0,\n end_time=total_seconds)\n generator_options.args['temperature'].float_value = FLAGS.temperature\n generator_options.args['beam_size'].int_value = FLAGS.beam_size\n generator_options.args['branch_factor'].int_value = FLAGS.branch_factor\n generator_options.args[\n 'steps_per_iteration'].int_value = FLAGS.steps_per_iteration\n tf.logging.debug('input_sequence: %s', input_sequence)\n tf.logging.debug('generator_options: %s', generator_options)\n\n # Make the generate request num_outputs times and save the output as midi\n # files.\n date_and_time = time.strftime('%Y-%m-%d_%H%M%S')\n digits = len(str(FLAGS.num_outputs))\n for i in range(FLAGS.num_outputs):\n generated_sequence = generator.generate(input_sequence, generator_options)\n\n midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))\n midi_path = os.path.join(FLAGS.output_dir, midi_filename)\n magenta.music.sequence_proto_to_midi_file(generated_sequence, midi_path)\n\n tf.logging.info('Wrote %d MIDI files to %s',\n FLAGS.num_outputs, FLAGS.output_dir)\n\n\ndef main(unused_argv):\n \"\"\"Saves bundle or runs generator based on flags.\"\"\"\n tf.logging.set_verbosity(FLAGS.log)\n\n bundle = get_bundle()\n\n if bundle:\n config_id = bundle.generator_details.id\n config = drums_rnn_model.default_configs[config_id]\n config.hparams.parse(FLAGS.hparams)\n else:\n config = drums_rnn_config_flags.config_from_flags()\n # Having too large of a batch size will slow generation down unnecessarily.\n config.hparams.batch_size = min(\n config.hparams.batch_size, FLAGS.beam_size * FLAGS.branch_factor)\n\n generator = drums_rnn_sequence_generator.DrumsRnnSequenceGenerator(\n model=drums_rnn_model.DrumsRnnModel(config),\n details=config.details,\n steps_per_quarter=config.steps_per_quarter,\n checkpoint=get_checkpoint(),\n bundle=bundle)\n\n if FLAGS.save_generator_bundle:\n bundle_filename = os.path.expanduser(FLAGS.bundle_file)\n if FLAGS.bundle_description is None:\n tf.logging.warning('No bundle description provided.')\n tf.logging.info('Saving generator bundle to %s', bundle_filename)\n generator.create_bundle_file(bundle_filename, FLAGS.bundle_description)\n else:\n run_with_flags(generator)\n\n\ndef console_entry_point():\n tf.app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n", "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Abstract class for sequence generators.\n\nProvides a uniform interface for interacting with generators for any model.\n\"\"\"\n\nimport abc\nimport os\nimport tempfile\n\nfrom magenta.music.protobuf import generator_pb2\nimport tensorflow.compat.v1 as tf\n\n\nclass SequenceGeneratorError(Exception): # pylint:disable=g-bad-exception-name\n \"\"\"Generic exception for sequence generation errors.\"\"\"\n pass\n\n\n# TODO(adarob): Replace with tf.saver.checkpoint_file_exists when released.\ndef _checkpoint_file_exists(checkpoint_file_or_prefix):\n \"\"\"Returns True if checkpoint file or files (for V2) exist.\"\"\"\n return (tf.gfile.Exists(checkpoint_file_or_prefix) or\n tf.gfile.Exists(checkpoint_file_or_prefix + '.index'))\n\n\nclass BaseSequenceGenerator(object):\n \"\"\"Abstract class for generators.\"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, model, details, checkpoint, bundle):\n \"\"\"Constructs a BaseSequenceGenerator.\n\n Args:\n model: An instance of BaseModel.\n details: A generator_pb2.GeneratorDetails for this generator.\n checkpoint: Where to look for the most recent model checkpoint. Either a\n directory to be used with tf.train.latest_checkpoint or the path to a\n single checkpoint file. Or None if a bundle should be used.\n bundle: A generator_pb2.GeneratorBundle object that contains both a\n checkpoint and a metagraph. Or None if a checkpoint should be used.\n\n Raises:\n SequenceGeneratorError: if neither checkpoint nor bundle is set.\n \"\"\"\n self._model = model\n self._details = details\n self._checkpoint = checkpoint\n self._bundle = bundle\n\n if self._checkpoint is None and self._bundle is None:\n raise SequenceGeneratorError(\n 'Either checkpoint or bundle must be set')\n if self._checkpoint is not None and self._bundle is not None:\n raise SequenceGeneratorError(\n 'Checkpoint and bundle cannot both be set')\n\n if self._bundle:\n if self._bundle.generator_details.id != self._details.id:\n raise SequenceGeneratorError(\n 'Generator id in bundle (%s) does not match this generator\\'s id '\n '(%s)' % (self._bundle.generator_details.id,\n self._details.id))\n\n self._initialized = False\n\n @property\n def details(self):\n \"\"\"Returns a GeneratorDetails description of this generator.\"\"\"\n return self._details\n\n @property\n def bundle_details(self):\n \"\"\"Returns the BundleDetails or None if checkpoint was used.\"\"\"\n if self._bundle is None:\n return None\n return self._bundle.bundle_details\n\n @abc.abstractmethod\n def _generate(self, input_sequence, generator_options):\n \"\"\"Implementation for sequence generation based on sequence and options.\n\n The implementation can assume that _initialize has been called before this\n method is called.\n\n Args:\n input_sequence: An input NoteSequence to base the generation on.\n generator_options: A GeneratorOptions proto with options to use for\n generation.\n Returns:\n The generated NoteSequence proto.\n \"\"\"\n pass\n\n def initialize(self):\n \"\"\"Builds the TF graph and loads the checkpoint.\n\n If the graph has already been initialized, this is a no-op.\n\n Raises:\n SequenceGeneratorError: If the checkpoint cannot be found.\n \"\"\"\n if self._initialized:\n return\n\n # Either self._checkpoint or self._bundle should be set.\n # This is enforced by the constructor.\n if self._checkpoint is not None:\n # Check if the checkpoint file exists.\n if not _checkpoint_file_exists(self._checkpoint):\n raise SequenceGeneratorError(\n 'Checkpoint path does not exist: %s' % (self._checkpoint))\n checkpoint_file = self._checkpoint\n # If this is a directory, try to determine the latest checkpoint in it.\n if tf.gfile.IsDirectory(checkpoint_file):\n checkpoint_file = tf.train.latest_checkpoint(checkpoint_file)\n if checkpoint_file is None:\n raise SequenceGeneratorError(\n 'No checkpoint file found in directory: %s' % self._checkpoint)\n if (not _checkpoint_file_exists(self._checkpoint) or\n tf.gfile.IsDirectory(checkpoint_file)):\n raise SequenceGeneratorError(\n 'Checkpoint path is not a file: %s (supplied path: %s)' % (\n checkpoint_file, self._checkpoint))\n self._model.initialize_with_checkpoint(checkpoint_file)\n else:\n # Write checkpoint and metagraph files to a temp dir.\n tempdir = None\n try:\n tempdir = tempfile.mkdtemp()\n checkpoint_filename = os.path.join(tempdir, 'model.ckpt')\n with tf.gfile.Open(checkpoint_filename, 'wb') as f:\n # For now, we support only 1 checkpoint file.\n # If needed, we can later change this to support sharded checkpoints.\n f.write(self._bundle.checkpoint_file[0])\n metagraph_filename = os.path.join(tempdir, 'model.ckpt.meta')\n with tf.gfile.Open(metagraph_filename, 'wb') as f:\n f.write(self._bundle.metagraph_file)\n\n self._model.initialize_with_checkpoint_and_metagraph(\n checkpoint_filename, metagraph_filename)\n finally:\n # Clean up the temp dir.\n if tempdir is not None:\n tf.gfile.DeleteRecursively(tempdir)\n self._initialized = True\n\n def close(self):\n \"\"\"Closes the TF session.\n\n If the session was already closed, this is a no-op.\n \"\"\"\n if self._initialized:\n self._model.close()\n self._initialized = False\n\n def __enter__(self):\n \"\"\"When used as a context manager, initializes the TF session.\"\"\"\n self.initialize()\n return self\n\n def __exit__(self, *args):\n \"\"\"When used as a context manager, closes the TF session.\"\"\"\n self.close()\n\n def generate(self, input_sequence, generator_options):\n \"\"\"Generates a sequence from the model based on sequence and options.\n\n Also initializes the TF graph if not yet initialized.\n\n Args:\n input_sequence: An input NoteSequence to base the generation on.\n generator_options: A GeneratorOptions proto with options to use for\n generation.\n\n Returns:\n The generated NoteSequence proto.\n \"\"\"\n self.initialize()\n return self._generate(input_sequence, generator_options)\n\n def create_bundle_file(self, bundle_file, bundle_description=None):\n \"\"\"Writes a generator_pb2.GeneratorBundle file in the specified location.\n\n Saves the checkpoint, metagraph, and generator id in one file.\n\n Args:\n bundle_file: Location to write the bundle file.\n bundle_description: A short, human-readable string description of this\n bundle.\n\n Raises:\n SequenceGeneratorError: if there is an error creating the bundle file.\n \"\"\"\n if not bundle_file:\n raise SequenceGeneratorError('Bundle file location not specified.')\n if not self.details.id:\n raise SequenceGeneratorError(\n 'Generator id must be included in GeneratorDetails when creating '\n 'a bundle file.')\n\n if not self.details.description:\n tf.logging.warn('Writing bundle file with no generator description.')\n if not bundle_description:\n tf.logging.warn('Writing bundle file with no bundle description.')\n\n self.initialize()\n\n tempdir = None\n try:\n tempdir = tempfile.mkdtemp()\n checkpoint_filename = os.path.join(tempdir, 'model.ckpt')\n\n self._model.write_checkpoint_with_metagraph(checkpoint_filename)\n\n if not os.path.isfile(checkpoint_filename):\n raise SequenceGeneratorError(\n 'Could not read checkpoint file: %s' % (checkpoint_filename))\n metagraph_filename = checkpoint_filename + '.meta'\n if not os.path.isfile(metagraph_filename):\n raise SequenceGeneratorError(\n 'Could not read metagraph file: %s' % (metagraph_filename))\n\n bundle = generator_pb2.GeneratorBundle()\n bundle.generator_details.CopyFrom(self.details)\n if bundle_description:\n bundle.bundle_details.description = bundle_description\n with tf.gfile.Open(checkpoint_filename, 'rb') as f:\n bundle.checkpoint_file.append(f.read())\n with tf.gfile.Open(metagraph_filename, 'rb') as f:\n bundle.metagraph_file = f.read()\n\n with tf.gfile.Open(bundle_file, 'wb') as f:\n f.write(bundle.SerializeToString())\n finally:\n if tempdir is not None:\n tf.gfile.DeleteRecursively(tempdir)\n", "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Defines the Material Design Icons Problem.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport io\nimport itertools\nimport numpy as np\n\nfrom PIL import Image\nfrom skimage import draw\n\nimport tensorflow.compat.v1 as tf\n\nSVG_PREFIX_BIG = ('<svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"'\n 'http://www.w3.org/1999/xlink\" width=\"256px\" height=\"256px\"'\n ' style=\"-ms-transform: rotate(360deg); -webkit-transform:'\n ' rotate(360deg); transform: rotate(360deg);\" '\n 'preserveAspectRatio=\"xMidYMid meet\" viewBox=\"0 0 24 24\">')\nPATH_PREFIX_1 = '<path d=\"'\nPATH_POSFIX_1 = '\" fill=\"currentColor\"/>'\nSVG_POSFIX = '</svg>'\n\nNUM_ARGS = {'v': 1, 'V': 1, 'h': 1, 'H': 1, 'a': 7, 'A': 7, 'l': 2, 'L': 2,\n 't': 2, 'T': 2, 'c': 6, 'C': 6, 'm': 2, 'M': 2, 's': 4, 'S': 4,\n 'q': 4, 'Q': 4, 'z': 0}\n# in order of arg complexity, with absolutes clustered\n# recall we don't handle all commands (see docstring)\nCMDS_LIST = 'zhvmltsqcaHVMLTSQCA'\nCMD_MAPPING = {cmd: i for i, cmd in enumerate(CMDS_LIST)}\n\n\n############################### GENERAL UTILS #################################\ndef grouper(iterable, batch_size, fill_value=None):\n \"\"\"Helper method for returning batches of size batch_size of a dataset.\"\"\"\n # grouper('ABCDEF', 3) -> 'ABC', 'DEF'\n args = [iter(iterable)] * batch_size\n return itertools.izip_longest(*args, fillvalue=fill_value)\n\n\ndef map_uni_to_alphanum(uni):\n \"\"\"Maps [0-9 A-Z a-z] to numbers 0-62.\"\"\"\n if 48 <= uni <= 57:\n return uni - 48\n elif 65 <= uni <= 90:\n return uni - 65 + 10\n return uni - 97 + 36\n\n\n############# UTILS FOR CONVERTING SFD/SPLINESETS TO SVG PATHS ################\ndef _get_spline(sfd):\n if 'SplineSet' not in sfd:\n return ''\n pro = sfd[sfd.index('SplineSet') + 10:]\n pro = pro[:pro.index('EndSplineSet')]\n return pro\n\n\ndef _spline_to_path_list(spline, height, replace_with_prev=False):\n \"\"\"Converts SplineSet to a list of tokenized commands in svg path.\"\"\"\n path = []\n prev_xy = []\n for line in spline.splitlines():\n if not line:\n continue\n tokens = line.split(' ')\n cmd = tokens[-2]\n if cmd not in 'cml':\n # COMMAND NOT RECOGNIZED.\n return []\n # assert cmd in 'cml', 'Command not recognized: {}'.format(cmd)\n args = tokens[:-2]\n args = [float(x) for x in args if x]\n\n if replace_with_prev and cmd in 'c':\n args[:2] = prev_xy\n prev_xy = args[-2:]\n\n new_y_args = []\n for i, a in enumerate(args):\n if i % 2 == 1:\n new_y_args.append((height - a))\n else:\n new_y_args.append((a))\n\n path.append([cmd.upper()] + new_y_args)\n return path\n\n\ndef sfd_to_path_list(single, replace_with_prev=False):\n \"\"\"Converts the given SFD glyph into a path.\"\"\"\n return _spline_to_path_list(_get_spline(single['sfd']),\n single['vwidth'], replace_with_prev)\n\n\n#################### UTILS FOR PROCESSING TOKENIZED PATHS #####################\ndef add_missing_cmds(path, remove_zs=False):\n \"\"\"Adds missing cmd tags to the commands in the svg.\"\"\"\n # For instance, the command 'a' takes 7 arguments, but some SVGs declare:\n # a 1 2 3 4 5 6 7 8 9 10 11 12 13 14\n # Which is 14 arguments. This function converts the above to the equivalent:\n # a 1 2 3 4 5 6 7 a 8 9 10 11 12 13 14\n #\n # Note: if remove_zs is True, this also removes any occurences of z commands.\n new_path = []\n for cmd in path:\n if not remove_zs or cmd[0] not in 'Zz':\n for new_cmd in add_missing_cmd(cmd):\n new_path.append(new_cmd)\n return new_path\n\n\ndef add_missing_cmd(command_list):\n \"\"\"Adds missing cmd tags to the given command list.\"\"\"\n # E.g.: given:\n # ['a', '0', '0', '0', '0', '0', '0', '0',\n # '0', '0', '0', '0', '0', '0', '0']\n # Converts to:\n # [['a', '0', '0', '0', '0', '0', '0', '0'],\n # ['a', '0', '0', '0', '0', '0', '0', '0']]\n # And returns a string that joins these elements with spaces.\n cmd_tag = command_list[0]\n args = command_list[1:]\n\n final_cmds = []\n for arg_batch in grouper(args, NUM_ARGS[cmd_tag]):\n final_cmds.append([cmd_tag] + list(arg_batch))\n\n if not final_cmds:\n # command has no args (e.g.: 'z')\n final_cmds = [[cmd_tag]]\n\n return final_cmds\n\n\ndef _normalize_args(arglist, norm, add=None, flip=False):\n \"\"\"Normalize the given args with the given norm value.\"\"\"\n new_arglist = []\n for i, arg in enumerate(arglist):\n new_arg = float(arg)\n\n if add is not None:\n add_to_x, add_to_y = add\n\n # This argument is an x-coordinate if even, y-coordinate if odd\n # except when flip == True\n if i % 2 == 0:\n new_arg += add_to_y if flip else add_to_x\n else:\n new_arg += add_to_x if flip else add_to_y\n\n new_arglist.append(str(24 * new_arg / norm))\n return new_arglist\n\n\ndef normalize_based_on_viewbox(path, viewbox):\n \"\"\"Normalizes all args in a path to a standard 24x24 viewbox.\"\"\"\n # Each SVG lives in a 2D plane. The viewbox determines the region of that\n # plane that gets rendered. For instance, some designers may work with a\n # viewbox that's 24x24, others with one that's 100x100, etc.\n\n # Suppose I design the the letter \"h\" in the Arial style using a 100x100\n # viewbox (let's call it icon A). Let's suppose the icon has height 75. Then,\n # I design the same character using a 20x20 viewbox (call this icon B), with\n # height 15 (=75% of 20). This means that, when rendered, both icons with look\n # exactly the same, but the scale of the commands each icon is using is\n # different. For instance, if icon A has a command like \"lineTo 100 100\", the\n # equivalent command in icon B will be \"lineTo 20 20\".\n\n # In order to avoid this problem and bring all real values to the same scale,\n # I scale all icons' commands to use a 24x24 viewbox. This function does this:\n # it converts a path that exists in the given viewbox into a standard 24x24\n # viewbox.\n viewbox = viewbox.split(' ')\n norm = max(int(viewbox[-1]), int(viewbox[-2]))\n\n if int(viewbox[-1]) > int(viewbox[-2]):\n add_to_y = 0\n add_to_x = abs(int(viewbox[-1]) - int(viewbox[-2])) / 2\n else:\n add_to_y = abs(int(viewbox[-1]) - int(viewbox[-2])) / 2\n add_to_x = 0\n\n new_path = []\n for command in path:\n if command[0] == 'a':\n new_path.append([command[0]] +\n _normalize_args(command[1:3], norm) +\n command[3:6] +\n _normalize_args(command[6:], norm))\n elif command[0] == 'A':\n new_path.append(\n [command[0]] +\n _normalize_args(command[1:3], norm) +\n command[3:6] +\n _normalize_args(command[6:], norm, add=(add_to_x, add_to_y)))\n elif command[0] == 'V':\n new_path.append(\n [command[0]] +\n _normalize_args(command[1:], norm, add=(add_to_x, add_to_y),\n flip=True))\n elif command[0] == command[0].upper():\n new_path.append(\n [command[0]] +\n _normalize_args(command[1:], norm, add=(add_to_x, add_to_y)))\n elif command[0] in 'zZ':\n new_path.append([command[0]])\n else:\n new_path.append([command[0]] + _normalize_args(command[1:], norm))\n\n return new_path\n\n\ndef _convert_args(args, curr_pos, cmd):\n \"\"\"Converts given args to relative values.\"\"\"\n # NOTE: glyphs only use a very small subset of commands (L, C, M, and Z -- I\n # believe). So I'm not handling A and H for now.\n if cmd in 'AH':\n raise NotImplementedError('These commands have >6 args (not supported).')\n\n new_args = []\n for i, arg in enumerate(args):\n x_or_y = i % 2\n if cmd == 'H':\n x_or_y = (i + 1) % 2\n new_args.append(str(float(arg) - curr_pos[x_or_y]))\n\n return new_args\n\n\ndef _update_curr_pos(curr_pos, cmd, start_of_path):\n \"\"\"Calculate the position of the pen after cmd is applied.\"\"\"\n if cmd[0] in 'ml':\n curr_pos = [curr_pos[0] + float(cmd[1]), curr_pos[1] + float(cmd[2])]\n if cmd[0] == 'm':\n start_of_path = curr_pos\n elif cmd[0] in 'z':\n curr_pos = start_of_path\n elif cmd[0] in 'h':\n curr_pos = [curr_pos[0] + float(cmd[1]), curr_pos[1]]\n elif cmd[0] in 'v':\n curr_pos = [curr_pos[0], curr_pos[1] + float(cmd[1])]\n elif cmd[0] in 'ctsqa':\n curr_pos = [curr_pos[0] + float(cmd[-2]), curr_pos[1] + float(cmd[-1])]\n\n return curr_pos, start_of_path\n\n\ndef make_relative(cmds):\n \"\"\"Convert commands in a path to relative positioning.\"\"\"\n curr_pos = (0.0, 0.0)\n start_of_path = (0.0, 0.0)\n new_cmds = []\n for cmd in cmds:\n if cmd[0].lower() == cmd[0]:\n new_cmd = cmd\n elif cmd[0].lower() == 'z':\n new_cmd = [cmd[0].lower()]\n else:\n new_cmd = [cmd[0].lower()] + _convert_args(cmd[1:], curr_pos, cmd=cmd[0])\n new_cmds.append(new_cmd)\n curr_pos, start_of_path = _update_curr_pos(curr_pos, new_cmd, start_of_path)\n return new_cmds\n\n\ndef _is_to_left_of(pt1, pt2):\n pt1_norm = (pt1[0] ** 2 + pt1[1] ** 2)\n pt2_norm = (pt2[0] ** 2 + pt2[1] ** 2)\n return pt1[1] < pt2[1] or (pt1_norm == pt2_norm and pt1[0] < pt2[0])\n\n\ndef _get_leftmost_point(path):\n \"\"\"Returns the leftmost, topmost point of the path.\"\"\"\n leftmost = (float('inf'), float('inf'))\n idx = -1\n\n for i, cmd in enumerate(path):\n if len(cmd) > 1:\n endpoint = cmd[-2:]\n if _is_to_left_of(endpoint, leftmost):\n leftmost = endpoint\n idx = i\n\n return leftmost, idx\n\n\ndef _separate_substructures(path):\n \"\"\"Returns a list of subpaths, each representing substructures the glyph.\"\"\"\n substructures = []\n curr = []\n for cmd in path:\n if cmd[0] in 'mM' and curr:\n substructures.append(curr)\n curr = []\n curr.append(cmd)\n if curr:\n substructures.append(curr)\n return substructures\n\n\ndef _is_clockwise(subpath):\n \"\"\"Returns whether the given subpath is clockwise-oriented.\"\"\"\n pts = [cmd[-2:] for cmd in subpath]\n det = 0\n for i in range(len(pts) - 1):\n det += np.linalg.det(pts[i:i + 2])\n return det > 0\n\n\ndef _make_clockwise(subpath):\n \"\"\"Inverts the cardinality of the given subpath.\"\"\"\n new_path = [subpath[0]]\n other_cmds = list(reversed(subpath[1:]))\n for i, cmd in enumerate(other_cmds):\n if i + 1 == len(other_cmds):\n where_we_were = subpath[0][-2:]\n else:\n where_we_were = other_cmds[i + 1][-2:]\n\n if len(cmd) > 3:\n new_cmd = [cmd[0], cmd[3], cmd[4], cmd[1], cmd[2],\n where_we_were[0], where_we_were[1]]\n else:\n new_cmd = [cmd[0], where_we_were[0], where_we_were[1]]\n\n new_path.append(new_cmd)\n return new_path\n\n\ndef canonicalize(path):\n \"\"\"Makes all paths start at top left, and go clockwise first.\"\"\"\n # convert args to floats\n path = [[x[0]] + map(float, x[1:]) for x in path]\n\n # canonicalize each subpath separately\n new_substructures = []\n for subpath in _separate_substructures(path):\n leftmost_point, leftmost_idx = _get_leftmost_point(subpath)\n reordered = ([['M', leftmost_point[0], leftmost_point[1]]] +\n subpath[leftmost_idx + 1:] + subpath[1:leftmost_idx + 1])\n new_substructures.append((reordered, leftmost_point))\n\n new_path = []\n first_substructure_done = False\n should_flip_cardinality = False\n for sp, _ in sorted(new_substructures, key=lambda x: (x[1][1], x[1][0])):\n if not first_substructure_done:\n # we're looking at the first substructure now, we can determine whether we\n # will flip the cardniality of the whole icon or not\n should_flip_cardinality = not _is_clockwise(sp)\n first_substructure_done = True\n\n if should_flip_cardinality:\n sp = _make_clockwise(sp)\n\n new_path.extend(sp)\n\n # convert args to strs\n path = [[x[0]] + map(str, x[1:]) for x in new_path]\n return path\n\n\n########## UTILS FOR CONVERTING TOKENIZED PATHS TO VECTORS ###########\ndef path_to_vector(path, categorical=False):\n \"\"\"Converts path's commands to a series of vectors.\"\"\"\n # Notes:\n # - The SimpleSVG dataset does not have any 't', 'q', 'Z', 'T', or 'Q'.\n # Thus, we don't handle those here.\n # - We also removed all 'z's.\n # - The x-axis-rotation argument to a commands is always 0 in this\n # dataset, so we ignore it\n\n # Many commands have args that correspond to args in other commands.\n # v __,__ _______________ ______________,_________ __,__ __,__ _,y\n # h __,__ _______________ ______________,_________ __,__ __,__ x,_\n # z __,__ _______________ ______________,_________ __,__ __,__ _,_\n # a rx,ry x-axis-rotation large-arc-flag,sweepflag __,__ __,__ x,y\n # l __,__ _______________ ______________,_________ __,__ __,__ x,y\n # c __,__ _______________ ______________,_________ x1,y1 x2,y2 x,y\n # m __,__ _______________ ______________,_________ __,__ __,__ x,y\n # s __,__ _______________ ______________,_________ __,__ x2,y2 x,y\n\n # So each command will be converted to a vector where the dimension is the\n # minimal number of arguments to all commands:\n # [rx, ry, large-arc-flag, sweepflag, x1, y1, x2, y2, x, y]\n # If a command does not output a certain arg, it is set to 0.\n # \"l 5,5\" becomes [0, 0, 0, 0, 0, 0, 0, 0, 5, 5]\n\n # Also note, as of now we also output an extra dimension at index 0, which\n # indicates which command is being outputted (integer).\n new_path = []\n for cmd in path:\n new_path.append(cmd_to_vector(cmd, categorical=categorical))\n return new_path\n\n\ndef cmd_to_vector(cmd_list, categorical=False):\n \"\"\"Converts the given command (given as a list) into a vector.\"\"\"\n # For description of how this conversion happens, see\n # path_to_vector docstring.\n cmd = cmd_list[0]\n args = cmd_list[1:]\n\n if not categorical:\n # integer, for MSE\n command = [float(CMD_MAPPING[cmd])]\n else:\n # one hot + 1 dim for EOS.\n command = [0.0] * (len(CMDS_LIST) + 1)\n command[CMD_MAPPING[cmd] + 1] = 1.0\n\n arguments = [0.0] * 10\n if cmd in 'hH':\n arguments[8] = float(args[0]) # x\n elif cmd in 'vV':\n arguments[9] = float(args[0]) # y\n elif cmd in 'mMlLtT':\n arguments[8] = float(args[0]) # x\n arguments[9] = float(args[1]) # y\n elif cmd in 'sSqQ':\n arguments[6] = float(args[0]) # x2\n arguments[7] = float(args[1]) # y2\n arguments[8] = float(args[2]) # x\n arguments[9] = float(args[3]) # y\n elif cmd in 'cC':\n arguments[4] = float(args[0]) # x1\n arguments[5] = float(args[1]) # y1\n arguments[6] = float(args[2]) # x2\n arguments[7] = float(args[3]) # y2\n arguments[8] = float(args[4]) # x\n arguments[9] = float(args[5]) # y\n elif cmd in 'aA':\n arguments[0] = float(args[0]) # rx\n arguments[1] = float(args[1]) # ry\n # we skip x-axis-rotation\n arguments[2] = float(args[3]) # large-arc-flag\n arguments[3] = float(args[4]) # sweep-flag\n # a does not have x1, y1, x2, y2 args\n arguments[8] = float(args[5]) # x\n arguments[9] = float(args[6]) # y\n\n return command + arguments\n\n\n################# UTILS FOR RENDERING PATH INTO IMAGE #################\ndef _cubicbezier(x0, y0, x1, y1, x2, y2, x3, y3, n=40):\n \"\"\"Return n points along cubiz bezier with given control points.\"\"\"\n # from http://rosettacode.org/wiki/Bitmap/B%C3%A9zier_curves/Cubic\n pts = []\n for i in range(n + 1):\n t = float(i) / float(n)\n a = (1. - t) ** 3\n b = 3. * t * (1. - t) ** 2\n c = 3.0 * t ** 2 * (1.0 - t)\n d = t ** 3\n\n x = float(a * x0 + b * x1 + c * x2 + d * x3)\n y = float(a * y0 + b * y1 + c * y2 + d * y3)\n pts.append((x, y))\n return zip(*pts)\n\n\ndef _update_pos(curr_pos, end_pos, absolute):\n if absolute:\n return end_pos\n return curr_pos[0] + end_pos[0], curr_pos[1] + end_pos[1]\n\n\ndef constant_color(*unused_args):\n return np.array([255, 255, 255])\n\n\ndef _render_cubic(canvas, curr_pos, c_args, absolute, color):\n \"\"\"Renders a cubic bezier curve in the given canvas.\"\"\"\n if not absolute:\n c_args[0] += curr_pos[0]\n c_args[1] += curr_pos[1]\n c_args[2] += curr_pos[0]\n c_args[3] += curr_pos[1]\n c_args[4] += curr_pos[0]\n c_args[5] += curr_pos[1]\n x, y = _cubicbezier(curr_pos[0], curr_pos[1],\n c_args[0], c_args[1],\n c_args[2], c_args[3],\n c_args[4], c_args[5])\n max_possible = len(canvas)\n x = [int(round(x_)) for x_ in x]\n y = [int(round(y_)) for y_ in y]\n within_range = lambda x: 0 <= x < max_possible\n filtered = [(x_, y_) for x_, y_ in zip(x, y)\n if within_range(x_) and within_range(y_)]\n if not filtered:\n return\n x, y = zip(*filtered)\n canvas[y, x, :] = color\n\n\ndef _render_line(canvas, curr_pos, l_args, absolute, color):\n \"\"\"Renders a line in the given canvas.\"\"\"\n end_point = l_args\n if not absolute:\n end_point[0] += curr_pos[0]\n end_point[1] += curr_pos[1]\n rr, cc, val = draw.line_aa(int(curr_pos[0]), int(curr_pos[1]),\n int(end_point[0]), int(end_point[1]))\n\n max_possible = len(canvas)\n within_range = lambda x: 0 <= x < max_possible\n filtered = [(x, y, v) for x, y, v in zip(rr, cc, val)\n if within_range(x) and within_range(y)]\n if not filtered:\n return\n rr, cc, val = zip(*filtered)\n val = [(v * color) for v in val]\n canvas[cc, rr, :] = val\n\n\ndef per_step_render(path, absolute=False, color=constant_color):\n \"\"\"Render the icon's edges, given its path.\"\"\"\n to_canvas_size = lambda l: [float(f) * (64. / 24.) for f in l]\n\n canvas = np.zeros((64, 64, 3))\n curr_pos = (0.0, 0.0)\n for i, cmd in enumerate(path):\n if not cmd: continue\n if cmd[0] in 'mM':\n curr_pos = _update_pos(curr_pos, to_canvas_size(cmd[-2:]), absolute)\n elif cmd[0] in 'cC':\n _render_cubic(canvas, curr_pos, to_canvas_size(cmd[1:]), absolute,\n color(i, 55))\n curr_pos = _update_pos(curr_pos, to_canvas_size(cmd[-2:]), absolute)\n elif cmd[0] in 'lL':\n _render_line(canvas, curr_pos, to_canvas_size(cmd[1:]), absolute,\n color(i, 55))\n curr_pos = _update_pos(curr_pos, to_canvas_size(cmd[1:]), absolute)\n\n return canvas\n\n\ndef zoom_out(path_list, add_baseline=0., per=22):\n \"\"\"Makes glyph slightly smaller in viewbox, makes some descenders visible.\"\"\"\n # assumes tensor is already unnormalized, and in long form\n new_path = []\n for command in path_list:\n args = []\n is_even = False\n for arg in command[1:]:\n if is_even:\n args.append(str(float(arg) - ((24. - per) / 24.) * 64. / 4.))\n is_even = False\n else:\n args.append(str(float(arg) - add_baseline))\n is_even = True\n new_path.append([command[0]] + args)\n return new_path\n\n\n##################### UTILS FOR PROCESSING VECTORS ################\ndef append_eos(sample, categorical, feature_dim):\n if not categorical:\n eos = -1 * np.ones(feature_dim)\n else:\n eos = np.zeros(feature_dim)\n eos[0] = 1.0\n sample.append(eos)\n return sample\n\n\ndef make_simple_cmds_long(out):\n \"\"\"Converts svg decoder output to format required by some render functions.\"\"\"\n # out has 10 dims\n # the first 4 are respectively dims 0, 4, 5, 9 of the full 20-dim onehot vec\n # the latter 6 are the 6 last dims of the 10-dim arg vec\n shape_minus_dim = list(np.shape(out))[:-1]\n return np.concatenate([out[..., :1],\n np.zeros(shape_minus_dim + [3]),\n out[..., 1:3],\n np.zeros(shape_minus_dim + [3]),\n out[..., 3:4],\n np.zeros(shape_minus_dim + [14]),\n out[..., 4:]], -1)\n\n\n################# UTILS FOR CONVERTING VECTORS TO SVGS ########################\ndef vector_to_svg(vectors, stop_at_eos=False, categorical=False):\n \"\"\"Tranforms a given vector to an svg string.\"\"\"\n new_path = []\n for vector in vectors:\n if stop_at_eos:\n if categorical:\n try:\n is_eos = np.argmax(vector[:len(CMDS_LIST) + 1]) == 0\n except:\n raise Exception(vector)\n else:\n is_eos = vector[0] < -0.5\n\n if is_eos:\n break\n new_path.append(' '.join(vector_to_cmd(vector, categorical=categorical)))\n new_path = ' '.join(new_path)\n return SVG_PREFIX_BIG + PATH_PREFIX_1 + new_path + PATH_POSFIX_1 + SVG_POSFIX\n\n\ndef vector_to_cmd(vector, categorical=False, return_floats=False):\n \"\"\"Does the inverse transformation as cmd_to_vector().\"\"\"\n cast_fn = float if return_floats else str\n if categorical:\n command = vector[:len(CMDS_LIST) + 1],\n arguments = vector[len(CMDS_LIST) + 1:]\n cmd_idx = np.argmax(command) - 1\n else:\n command, arguments = vector[:1], vector[1:]\n cmd_idx = int(round(command[0]))\n\n if cmd_idx < -0.5:\n # EOS\n return []\n if cmd_idx >= len(CMDS_LIST):\n cmd_idx = len(CMDS_LIST) - 1\n\n cmd = CMDS_LIST[cmd_idx]\n cmd_list = [cmd]\n\n if cmd in 'hH':\n cmd_list.append(cast_fn(arguments[8])) # x\n elif cmd in 'vV':\n cmd_list.append(cast_fn(arguments[9])) # y\n elif cmd in 'mMlLtT':\n cmd_list.append(cast_fn(arguments[8])) # x\n cmd_list.append(cast_fn(arguments[9])) # y\n elif cmd in 'sSqQ':\n cmd_list.append(cast_fn(arguments[6])) # x2\n cmd_list.append(cast_fn(arguments[7])) # y2\n cmd_list.append(cast_fn(arguments[8])) # x\n cmd_list.append(cast_fn(arguments[9])) # y\n elif cmd in 'cC':\n cmd_list.append(cast_fn(arguments[4])) # x1\n cmd_list.append(cast_fn(arguments[5])) # y1\n cmd_list.append(cast_fn(arguments[6])) # x2\n cmd_list.append(cast_fn(arguments[7])) # y2\n cmd_list.append(cast_fn(arguments[8])) # x\n cmd_list.append(cast_fn(arguments[9])) # y\n elif cmd in 'aA':\n cmd_list.append(cast_fn(arguments[0])) # rx\n cmd_list.append(cast_fn(arguments[1])) # ry\n # x-axis-rotation is always 0\n cmd_list.append(cast_fn('0'))\n # the following two flags are binary.\n cmd_list.append(cast_fn(1 if arguments[2] > 0.5 else 0)) # large-arc-flag\n cmd_list.append(cast_fn(1 if arguments[3] > 0.5 else 0)) # sweep-flag\n cmd_list.append(cast_fn(arguments[8])) # x\n cmd_list.append(cast_fn(arguments[9])) # y\n\n return cmd_list\n\n\n############## UTILS FOR CONVERTING SVGS/VECTORS TO IMAGES ###################\ndef create_image_conversion_fn(max_outputs, categorical=False):\n \"\"\"Binds the number of outputs to the image conversion fn (to svg or png).\"\"\"\n\n def convert_to_svg(decoder_output):\n converted = []\n for example in decoder_output:\n if len(converted) == max_outputs:\n break\n converted.append(vector_to_svg(example, True, categorical=categorical))\n return np.array(converted)\n\n return convert_to_svg\n\n\n################### UTILS FOR CREATING TF SUMMARIES ##########################\ndef _make_encoded_image(img_tensor):\n pil_img = Image.fromarray(np.squeeze(img_tensor * 255).astype(np.uint8),\n mode='L')\n buff = io.BytesIO()\n pil_img.save(buff, format='png')\n encoded_image = buff.getvalue()\n return encoded_image\n\n\ndef make_text_summary_value(svg, tag):\n \"\"\"Converts the given str to a text tf.summary.Summary.Value.\"\"\"\n svg_proto = tf.make_tensor_proto(svg, tf.string)\n value = tf.summary.Summary.Value(tag=tag, tensor=svg_proto)\n value.metadata.plugin_data.plugin_name = 'text'\n return value\n\n\ndef make_image_summary(image_tensor, tag):\n \"\"\"Converts the given image tensor to a tf.summary.Summary.Image.\"\"\"\n encoded_image = _make_encoded_image(image_tensor)\n image_sum = tf.summary.Summary.Image(encoded_image_string=encoded_image)\n return tf.summary.Summary.Value(tag=tag, image=image_sum)\n", "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Utilities for managing wav files and labels for transcription.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport bisect\nimport math\n\nimport librosa\n\nfrom magenta.music import audio_io\nfrom magenta.music import constants\nfrom magenta.music import sequences_lib\nfrom magenta.music.protobuf import music_pb2\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\n\ndef velocity_range_from_sequence(ns):\n \"\"\"Derive a VelocityRange proto from a NoteSequence.\"\"\"\n velocities = [note.velocity for note in ns.notes]\n velocity_max = np.max(velocities) if velocities else 0\n velocity_min = np.min(velocities) if velocities else 0\n velocity_range = music_pb2.VelocityRange(min=velocity_min, max=velocity_max)\n return velocity_range\n\n\ndef find_inactive_ranges(note_sequence):\n \"\"\"Returns ranges where no notes are active in the note_sequence.\"\"\"\n start_sequence = sorted(\n note_sequence.notes, key=lambda note: note.start_time, reverse=True)\n end_sequence = sorted(\n note_sequence.notes, key=lambda note: note.end_time, reverse=True)\n\n notes_active = 0\n\n time = start_sequence[-1].start_time\n inactive_ranges = []\n if time > 0:\n inactive_ranges.append(0.)\n inactive_ranges.append(time)\n start_sequence.pop()\n notes_active += 1\n # Iterate through all note on events\n while start_sequence or end_sequence:\n if start_sequence and (start_sequence[-1].start_time <\n end_sequence[-1].end_time):\n if notes_active == 0:\n time = start_sequence[-1].start_time\n inactive_ranges.append(time)\n notes_active += 1\n start_sequence.pop()\n else:\n notes_active -= 1\n if notes_active == 0:\n time = end_sequence[-1].end_time\n inactive_ranges.append(time)\n end_sequence.pop()\n\n # if the last note is the same time as the end, don't add it\n # remove the start instead of creating a sequence with 0 length\n if inactive_ranges[-1] < note_sequence.total_time:\n inactive_ranges.append(note_sequence.total_time)\n else:\n inactive_ranges.pop()\n\n assert len(inactive_ranges) % 2 == 0\n\n inactive_ranges = [(inactive_ranges[2 * i], inactive_ranges[2 * i + 1])\n for i in range(len(inactive_ranges) // 2)]\n return inactive_ranges\n\n\ndef _last_zero_crossing(samples, start, end):\n \"\"\"Returns the last zero crossing in the window [start, end).\"\"\"\n samples_greater_than_zero = samples[start:end] > 0\n samples_less_than_zero = samples[start:end] < 0\n samples_greater_than_equal_zero = samples[start:end] >= 0\n samples_less_than_equal_zero = samples[start:end] <= 0\n\n # use np instead of python for loop for speed\n xings = np.logical_or(\n np.logical_and(samples_greater_than_zero[:-1],\n samples_less_than_equal_zero[1:]),\n np.logical_and(samples_less_than_zero[:-1],\n samples_greater_than_equal_zero[1:])).nonzero()[0]\n\n return xings[-1] + start if xings.size > 0 else None\n\n\ndef find_split_points(note_sequence, samples, sample_rate, min_length,\n max_length):\n \"\"\"Returns times at which there are no notes.\n\n The general strategy employed is to first check if there are places in the\n sustained pianoroll where no notes are active within the max_length window;\n if so the middle of the last gap is chosen as the split point.\n\n If not, then it checks if there are places in the pianoroll without sustain\n where no notes are active and then finds last zero crossing of the wav file\n and chooses that as the split point.\n\n If neither of those is true, then it chooses the last zero crossing within\n the max_length window as the split point.\n\n If there are no zero crossings in the entire window, then it basically gives\n up and advances time forward by max_length.\n\n Args:\n note_sequence: The NoteSequence to split.\n samples: The audio file as samples.\n sample_rate: The sample rate (samples/second) of the audio file.\n min_length: Minimum number of seconds in a split.\n max_length: Maximum number of seconds in a split.\n\n Returns:\n A list of split points in seconds from the beginning of the file.\n \"\"\"\n\n if not note_sequence.notes:\n return []\n\n end_time = note_sequence.total_time\n\n note_sequence_sustain = sequences_lib.apply_sustain_control_changes(\n note_sequence)\n\n ranges_nosustain = find_inactive_ranges(note_sequence)\n ranges_sustain = find_inactive_ranges(note_sequence_sustain)\n\n nosustain_starts = [x[0] for x in ranges_nosustain]\n sustain_starts = [x[0] for x in ranges_sustain]\n\n nosustain_ends = [x[1] for x in ranges_nosustain]\n sustain_ends = [x[1] for x in ranges_sustain]\n\n split_points = [0.]\n\n while end_time - split_points[-1] > max_length:\n max_advance = split_points[-1] + max_length\n\n # check for interval in sustained sequence\n pos = bisect.bisect_right(sustain_ends, max_advance)\n if pos < len(sustain_starts) and max_advance > sustain_starts[pos]:\n split_points.append(max_advance)\n\n # if no interval, or we didn't fit, try the unmodified sequence\n elif pos == 0 or sustain_starts[pos - 1] <= split_points[-1] + min_length:\n # no splits available, use non sustain notes and find close zero crossing\n pos = bisect.bisect_right(nosustain_ends, max_advance)\n\n if pos < len(nosustain_starts) and max_advance > nosustain_starts[pos]:\n # we fit, great, try to split at a zero crossing\n zxc_start = nosustain_starts[pos]\n zxc_end = max_advance\n last_zero_xing = _last_zero_crossing(\n samples, int(math.floor(zxc_start * sample_rate)),\n int(math.ceil(zxc_end * sample_rate)))\n if last_zero_xing:\n last_zero_xing = float(last_zero_xing) / sample_rate\n split_points.append(last_zero_xing)\n else:\n # give up and just return where there are at least no notes\n split_points.append(max_advance)\n\n else:\n # there are no good places to cut, so just pick the last zero crossing\n # check the entire valid range for zero crossings\n start_sample = int(\n math.ceil((split_points[-1] + min_length) * sample_rate)) + 1\n end_sample = start_sample + (max_length - min_length) * sample_rate\n last_zero_xing = _last_zero_crossing(samples, start_sample, end_sample)\n\n if last_zero_xing:\n last_zero_xing = float(last_zero_xing) / sample_rate\n split_points.append(last_zero_xing)\n else:\n # give up and advance by max amount\n split_points.append(max_advance)\n else:\n # only advance as far as max_length\n new_time = min(np.mean(ranges_sustain[pos - 1]), max_advance)\n split_points.append(new_time)\n\n if split_points[-1] != end_time:\n split_points.append(end_time)\n\n # ensure that we've generated a valid sequence of splits\n for prev, curr in zip(split_points[:-1], split_points[1:]):\n assert curr > prev\n assert curr - prev <= max_length + 1e-8\n if curr < end_time:\n assert curr - prev >= min_length - 1e-8\n assert end_time - split_points[-1] < max_length\n\n return split_points\n\n\ndef create_example(example_id, ns, wav_data, velocity_range=None):\n \"\"\"Creates a tf.train.Example proto for training or testing.\"\"\"\n if velocity_range is None:\n velocity_range = velocity_range_from_sequence(ns)\n\n # Ensure that all sequences for training and evaluation have gone through\n # sustain processing.\n sus_ns = sequences_lib.apply_sustain_control_changes(ns)\n\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'id':\n tf.train.Feature(\n bytes_list=tf.train.BytesList(\n value=[example_id.encode('utf-8')])),\n 'sequence':\n tf.train.Feature(\n bytes_list=tf.train.BytesList(\n value=[sus_ns.SerializeToString()])),\n 'audio':\n tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[wav_data])),\n 'velocity_range':\n tf.train.Feature(\n bytes_list=tf.train.BytesList(\n value=[velocity_range.SerializeToString()])),\n }))\n return example\n\n\ndef process_record(wav_data,\n ns,\n example_id,\n min_length=5,\n max_length=20,\n sample_rate=16000,\n allow_empty_notesequence=False,\n load_audio_with_librosa=False):\n \"\"\"Split a record into chunks and create an example proto.\n\n To use the full length audio and notesequence, set min_length=0 and\n max_length=-1.\n\n Args:\n wav_data: audio data in WAV format.\n ns: corresponding NoteSequence.\n example_id: id for the example proto\n min_length: minimum length in seconds for audio chunks.\n max_length: maximum length in seconds for audio chunks.\n sample_rate: desired audio sample rate.\n allow_empty_notesequence: whether an empty NoteSequence is allowed.\n load_audio_with_librosa: Use librosa for sampling. Works with 24-bit wavs.\n\n Yields:\n Example protos.\n \"\"\"\n try:\n if load_audio_with_librosa:\n samples = audio_io.wav_data_to_samples_librosa(wav_data, sample_rate)\n else:\n samples = audio_io.wav_data_to_samples(wav_data, sample_rate)\n except audio_io.AudioIOReadError as e:\n print('Exception %s', e)\n return\n samples = librosa.util.normalize(samples, norm=np.inf)\n\n # Add padding to samples if notesequence is longer.\n pad_to_samples = int(math.ceil(ns.total_time * sample_rate))\n padding_needed = pad_to_samples - samples.shape[0]\n if padding_needed > 5 * sample_rate:\n raise ValueError(\n 'Would have padded {} more than 5 seconds to match note sequence total '\n 'time. ({} original samples, {} sample rate, {} sample seconds, '\n '{} sequence seconds) This likely indicates a problem with the source '\n 'data.'.format(\n example_id, samples.shape[0], sample_rate,\n samples.shape[0] / sample_rate, ns.total_time))\n samples = np.pad(samples, (0, max(0, padding_needed)), 'constant')\n\n if max_length == min_length:\n splits = np.arange(0, ns.total_time, max_length)\n elif max_length > 0:\n splits = find_split_points(ns, samples, sample_rate, min_length, max_length)\n else:\n splits = [0, ns.total_time]\n velocity_range = velocity_range_from_sequence(ns)\n\n for start, end in zip(splits[:-1], splits[1:]):\n if end - start < min_length:\n continue\n\n if start == 0 and end == ns.total_time:\n new_ns = ns\n else:\n new_ns = sequences_lib.extract_subsequence(ns, start, end)\n\n if not new_ns.notes and not allow_empty_notesequence:\n tf.logging.warning('skipping empty sequence')\n continue\n\n if start == 0 and end == ns.total_time:\n new_samples = samples\n else:\n # the resampling that happen in crop_wav_data is really slow\n # and we've already done it once, avoid doing it twice\n new_samples = audio_io.crop_samples(samples, sample_rate, start,\n end - start)\n new_wav_data = audio_io.samples_to_wav_data(new_samples, sample_rate)\n yield create_example(\n example_id, new_ns, new_wav_data, velocity_range=velocity_range)\n\n\ndef mix_sequences(individual_samples, sample_rate, individual_sequences):\n \"\"\"Mix multiple audio/notesequence pairs together.\n\n All sequences will be repeated until they are as long as the longest sequence.\n\n Note that the mixed sequence will contain only the (sustain-processed) notes\n from the individual sequences. All other control changes and metadata will not\n be preserved.\n\n Args:\n individual_samples: A list of audio samples to mix.\n sample_rate: Rate at which to interpret the samples\n individual_sequences: A list of NoteSequences to mix.\n\n Returns:\n mixed_samples: The mixed audio.\n mixed_sequence: The mixed NoteSequence.\n \"\"\"\n # Normalize samples and sequence velocities before mixing.\n # This ensures that the velocities/loudness of the individual samples\n # are treated equally.\n for i, samples in enumerate(individual_samples):\n individual_samples[i] = librosa.util.normalize(samples, norm=np.inf)\n for sequence in individual_sequences:\n velocities = [note.velocity for note in sequence.notes]\n velocity_max = np.max(velocities)\n for note in sequence.notes:\n note.velocity = int(\n (note.velocity / velocity_max) * constants.MAX_MIDI_VELOCITY)\n\n # Ensure that samples are always at least as long as their paired sequences.\n for i, (samples, sequence) in enumerate(\n zip(individual_samples, individual_sequences)):\n if len(samples) / sample_rate < sequence.total_time:\n padding = int(math.ceil(\n (sequence.total_time - len(samples) / sample_rate) * sample_rate))\n individual_samples[i] = np.pad(samples, [0, padding], 'constant')\n\n # Repeat each ns/wav pair to be as long as the longest wav.\n max_duration = np.max([len(s) for s in individual_samples]) / sample_rate\n\n extended_samples = []\n extended_sequences = []\n for samples, sequence in zip(individual_samples, individual_sequences):\n extended_samples.append(\n audio_io.repeat_samples_to_duration(samples, sample_rate, max_duration))\n extended_sequences.append(\n sequences_lib.repeat_sequence_to_duration(\n sequence, max_duration,\n sequence_duration=len(samples) / sample_rate))\n\n # Mix samples and sequences together\n mixed_samples = np.zeros_like(extended_samples[0])\n for samples in extended_samples:\n mixed_samples += samples / len(extended_samples)\n\n mixed_sequence = music_pb2.NoteSequence()\n mixed_sequence.ticks_per_quarter = constants.STANDARD_PPQ\n del mixed_sequence.notes[:]\n for sequence in extended_sequences:\n # Process sustain changes before copying notes.\n sus_sequence = sequences_lib.apply_sustain_control_changes(sequence)\n if sus_sequence.total_time > mixed_sequence.total_time:\n mixed_sequence.total_time = sus_sequence.total_time\n # TODO(fjord): Manage instrument/program numbers.\n mixed_sequence.notes.extend(sus_sequence.notes)\n\n return mixed_samples, mixed_sequence\n" ]
[ [ "tensorflow.compat.v1.test.main" ], [ "tensorflow.compat.v1.logging.fatal", "tensorflow.compat.v1.logging.debug", "tensorflow.compat.v1.gfile.Exists", "tensorflow.compat.v1.logging.set_verbosity", "tensorflow.compat.v1.app.flags.DEFINE_string", "tensorflow.compat.v1.app.flags.DEFINE_integer", "tensorflow.compat.v1.logging.warning", "tensorflow.compat.v1.logging.info", "tensorflow.compat.v1.gfile.MakeDirs", "tensorflow.compat.v1.app.flags.DEFINE_boolean", "tensorflow.compat.v1.app.flags.DEFINE_float", "tensorflow.compat.v1.app.run" ], [ "tensorflow.compat.v1.gfile.Exists", "tensorflow.compat.v1.logging.warn", "tensorflow.compat.v1.gfile.Open", "tensorflow.compat.v1.gfile.IsDirectory", "tensorflow.compat.v1.gfile.DeleteRecursively", "tensorflow.compat.v1.train.latest_checkpoint" ], [ "tensorflow.compat.v1.summary.Summary.Value", "numpy.squeeze", "tensorflow.compat.v1.make_tensor_proto", "numpy.ones", "numpy.linalg.det", "numpy.argmax", "numpy.shape", "tensorflow.compat.v1.summary.Summary.Image", "numpy.array", "numpy.zeros" ], [ "tensorflow.compat.v1.train.BytesList", "numpy.pad", "numpy.min", "numpy.arange", "tensorflow.compat.v1.logging.warning", "numpy.max", "numpy.zeros_like", "numpy.mean", "numpy.logical_and" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
energyinpython/pre-pyrepo
[ "92e44594e12d1110247f011e51734e5ce1fe0b8e", "92e44594e12d1110247f011e51734e5ce1fe0b8e" ]
[ "tests/test_correlations.py", "src/pyrepo/distance_metrics.py" ]
[ "from pyrepo import correlations as corrs\nfrom scipy.stats import pearsonr\nimport unittest\nimport numpy as np\n\n\n# Test for Spearman rank correlation coefficient\nclass Test_Spearman(unittest.TestCase):\n\n def test_spearman(self):\n \"\"\"Test based on paper Sałabun, W., & Urbaniak, K. (2020, June). A new coefficient of rankings similarity \n in decision-making problems. In International Conference on Computational Science \n (pp. 632-645). Springer, Cham.\"\"\"\n\n R = np.array([1, 2, 3, 4, 5])\n Q = np.array([1, 3, 2, 4, 5])\n test_result = corrs.spearman(R, Q)\n real_result = 0.9\n self.assertEqual(test_result, real_result)\n\n\n# Test for Weighted Spearman rank correlation coefficient\nclass Test_Weighted_Spearman(unittest.TestCase):\n\n def test_weighted_spearman(self):\n \"\"\"Test based on paper Sałabun, W., & Urbaniak, K. (2020, June). A new coefficient of rankings similarity \n in decision-making problems. In International Conference on Computational Science \n (pp. 632-645). Springer, Cham.\"\"\"\n\n R = np.array([1, 2, 3, 4, 5])\n Q = np.array([1, 3, 2, 4, 5])\n test_result = corrs.weighted_spearman(R, Q)\n real_result = 0.8833\n self.assertEqual(np.round(test_result, 4), real_result)\n\n\n# Test for Similarity rank coefficient WS\nclass Test_WS(unittest.TestCase):\n\n def test_ws(self):\n \"\"\"Test based on paper Sałabun, W., & Urbaniak, K. (2020, June). A new coefficient of rankings similarity \n in decision-making problems. In International Conference on Computational Science \n (pp. 632-645). Springer, Cham.\"\"\"\n\n R = np.array([1, 2, 3, 4, 5])\n Q = np.array([1, 3, 2, 4, 5])\n test_result = corrs.WS_coeff(R, Q)\n real_result = 0.8542\n self.assertEqual(np.round(test_result, 4), real_result)\n\n\n# Test for Pearson correlation coefficient\nclass Test_Pearson(unittest.TestCase):\n\n def test_pearson(self):\n \"\"\"Test based on paper Sałabun, W., & Urbaniak, K. (2020, June). A new coefficient of rankings similarity \n in decision-making problems. In International Conference on Computational Science \n (pp. 632-645). Springer, Cham.\"\"\"\n\n R = np.array([1, 2, 3, 4, 5])\n Q = np.array([1, 3, 2, 4, 5])\n test_result = corrs.pearson_coeff(R, Q)\n real_result, _ = pearsonr(R, Q)\n self.assertEqual(test_result, real_result)\n\n\ndef main():\n test_spearman_coeff = Test_Spearman()\n test_spearman_coeff.test_spearman()\n\n test_weighted_spearman_coeff = Test_Weighted_Spearman()\n test_weighted_spearman_coeff.test_weighted_spearman()\n\n test_pearson_coeff = Test_Pearson()\n test_pearson_coeff.test_pearson()\n\n test_ws = Test_WS()\n test_ws.test_ws()\n\n\nif __name__ == '__main__':\n main()", "import numpy as np\nimport copy\nimport itertools\n\n\n# euclidean distance\ndef euclidean(A, B):\n \"\"\"\n Calculate Euclidean distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n tmp = np.sum(np.square(A - B))\n return np.sqrt(tmp)\n\n# manhattan distance\ndef manhattan(A, B):\n \"\"\"\n Calculate Manhattan (Taxicab) distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n tmp = np.sum(np.abs(A - B))\n return tmp\n\n\n# for hausdorff distance\ndef hausdorff_distance(A, B):\n min_h = np.inf\n for i, j in itertools.product(range(len(A)), range(len(B))):\n d = euclidean(A[i], B[j])\n if d < min_h:\n min_h = d\n min_ind = j\n\n max_h = -np.inf\n for i in range(len(A)):\n d = euclidean(A[i], B[min_ind])\n if d > max_h:\n max_h = d\n\n return max_h\n\n\n# hausdorff distance\n\"\"\"\n Calculate Hausdorff distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\ndef hausdorff(A, B):\n ah = hausdorff_distance(A, B)\n bh = hausdorff_distance(B, A)\n return max(ah, bh)\n\n\n# correlation distance\ndef correlation(A, B):\n \"\"\"\n Calculate Correlation distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n numerator = np.sum((A - np.mean(A)) * (B - np.mean(B)))\n denominator = np.sqrt(np.sum((A - np.mean(A)) ** 2)) * np.sqrt(np.sum((B - np.mean(B)) ** 2))\n if denominator == 0:\n denominator = 1\n return 1 - (numerator / denominator)\n\n\n# chebyshev distance\ndef chebyshev(A, B):\n \"\"\"\n Calculate Chebyshev distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n max_h = -np.inf\n for i, j in itertools.product(range(len(A)), range(len(B))):\n d = np.abs(A[i] - B[j])\n if d > max_h:\n max_h = d\n\n return max_h\n\n\n# standardized euclidean distance\ndef std_euclidean(A, B):\n \"\"\"\n Calculate Standardized Euclidean distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n tab_std = np.vstack((A, B))\n stdv = np.sum(np.square(tab_std - np.mean(tab_std, axis = 0)), axis = 0)\n stdv = np.sqrt(stdv / tab_std.shape[0])\n stdv[stdv == 0] = 1\n tmp = np.sum(np.square((A - B) / stdv))\n return np.sqrt(tmp)\n\n\n# cosine distance\ndef cosine(A, B):\n \"\"\"\n Calculate Cosine distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n numerator = np.sum(A * B)\n denominator = (np.sqrt(np.sum(np.square(A)))) * (np.sqrt(np.sum(np.square(B))))\n if denominator == 0:\n denominator = 1\n return 1 - (numerator / denominator)\n\n\n# cosine similarity measure\ndef csm(A, B):\n \"\"\"\n Calculate Cosine similarity measure of distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n numerator = np.sum(A * B)\n denominator = (np.sqrt(np.sum(A))) * (np.sqrt(np.sum(B)))\n if denominator == 0:\n denominator = 1\n return numerator / denominator\n\n\n# squared euclidean distance\ndef squared_euclidean(A, B):\n \"\"\"\n Calculate Squared Euclidean distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n tmp = np.sum(np.square(A - B))\n return tmp\n\n\n# sorensen or bray-curtis distance\ndef bray_curtis(A, B):\n \"\"\"\n Calculate Bray-Curtis distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n numerator = np.sum(np.abs(A - B))\n denominator = np.sum(A + B)\n if denominator == 0:\n denominator = 1\n return numerator / denominator\n\n\n# canberra distance\ndef canberra(A, B):\n \"\"\"\n Calculate Canberra distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n numerator = np.abs(A - B)\n denominator = A + B\n denominator[denominator == 0] = 1\n tmp = np.sum(numerator / denominator)\n return tmp\n\n\n# lorentzian distance\ndef lorentzian(A, B):\n \"\"\"\n Calculate Lorentzian distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n tmp = np.sum(np.log(1 + np.abs(A - B)))\n return tmp\n\n\n# jaccard distance\ndef jaccard(A, B):\n \"\"\"\n Calculate Jaccard distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n numerator = np.sum(np.square(A - B))\n denominator = np.sum(A ** 2) + np.sum(B ** 2) - np.sum(A * B)\n if denominator == 0:\n denominator = 1\n return numerator / denominator\n\n\n# dice distance\ndef dice(A, B):\n \"\"\"\n Calculate Dice distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n numerator = np.sum(np.square(A - B))\n denominator = np.sum(A ** 2) + np.sum(B ** 2)\n if denominator == 0:\n denominator = 1\n return numerator / denominator\n\n\n# bhattacharyya distance\ndef bhattacharyya(A, B):\n \"\"\"\n Calculate Bhattacharyya distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n value = (np.sum(np.sqrt(A * B)))**2\n if value == 0:\n tmp = 0\n else:\n tmp = -np.log(value)\n return tmp\n\n\n# hellinger distance\ndef hellinger(A, B):\n \"\"\"\n Calculate Hellinger distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n value = 1 - np.sum(np.sqrt(A * B))\n if value < 0:\n value = 0\n return 2 * np.sqrt(value)\n\n\n# matusita distance\ndef matusita(A, B):\n \"\"\"\n Calculate Matusita distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n value = 2 - 2 * (np.sum(np.sqrt(A * B)))\n if value < 0:\n value = 0\n return np.sqrt(value)\n\n\n# squared-chord distance\ndef squared_chord(A, B):\n \"\"\"\n Calculate Squared-Chord distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n tmp = np.sum(np.square(np.sqrt(A) - np.sqrt(B)))\n return tmp\n\n\n# pearson chi-square distance\ndef pearson_chi_square(A, B):\n \"\"\"\n Calculate Pearson Chi Square distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n numerator = np.square(A - B)\n denominator = copy.deepcopy(B)\n denominator[denominator == 0] = 1\n tmp = np.sum(numerator / denominator)\n return tmp\n\n# squared chi-square distance\ndef squared_chi_square(A, B):\n \"\"\"\n Calculate Squared Chi Sqaure distance between two vectors `A` and `B`.\n\n Parameters\n ----------\n A : ndarray\n First vector containing values\n B : ndarray\n Second vector containing values\n\n Returns\n -------\n float\n distance value between two vetors\n \"\"\"\n numerator = np.square(A - B)\n denominator = A + B\n denominator[denominator == 0] = 1\n tmp = np.sum(numerator / denominator)\n return tmp" ]
[ [ "numpy.round", "scipy.stats.pearsonr", "numpy.array" ], [ "numpy.square", "numpy.log", "numpy.abs", "numpy.sqrt", "numpy.mean", "numpy.sum", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
YifanQie/Deep_Learning_for_Manufacturing
[ "9ba19e41f69c561b04b8573ab9c52c0969f45bfd", "9ba19e41f69c561b04b8573ab9c52c0969f45bfd" ]
[ "core/assembly_system.py", "core/model_deployment.py" ]
[ "import numpy as np\nimport pandas as pd\n\"\"\" Contains core classes and methods for initializing a Assembly System, the inputs are provided in assemblyconfig file in utilities\"\"\"\n\nclass AssemblySystem:\n\t\"\"\"Assembly System Class\n\n\t\t:param assembly_type: Type of assembly Single-Station/Multi-Station\n\t\t:type assembly_system: str (required)\n\n\t\t:param assembly_kccs: Number of KCCs for the assembly\n\t\t:type assembly_kccs: int (required)\n\n\t\t:param assembly_kpis: Number of Kpis for the assembly\n\t\t:type assembly_kpis: int (required) \n\t\"\"\"\n\tdef __init__(self,assembly_type,assembly_kccs,assembly_kpis):\n\t\tself.assembly_type=assembly_type\n\t\tself.assembly_kccs=assembly_kccs\n\t\tself.assembly_kpis=assembly_kpis\n\nclass PartType(AssemblySystem):\n\t\"\"\"Part System Class, inherits the Assembly System Class, additional parameters for this class include\n\t\t\n\t\t:param voxel_dim: Dimension of the voxel\n\t\t:type assembly_system: int (required)\n\n\t\t:param voxel_dim: Dimension of the voxel Channel, single channel output - 1 or multi channel - 2,3 (use 1 for deviations in one direction, 2 or 3 if data for multiple deviation directions are present)\n\t\t:type assembly_system: int (required)\n\n\t\t:param voxel_dim: Dimension of the voxel\n\t\t:type assembly_system: int (required)\n\n\t\tThe class contains two functions - get_nominal_cop and get_nominal_cop_database\n\t\"\"\"\n\tdef __init__(self,assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim):\n\t\tsuper().__init__(assembly_type,assembly_kccs,assembly_kpis)\n\t\tself.part_name=part_name\n\t\tself.part_type=part_type\n\t\tself.voxel_dim=voxel_dim\n\t\tself.voxel_channels=voxel_channels\n\t\tself.point_dim=point_dim\n\t\t\n\n\tdef get_nominal_cop(self,file_name):\n\t\t\"\"\"Import nominal cloud-of-point of the assembly from a text/csv file\n\n\t\t\t:param file_name: Name of the input file\n\t\t\t:type file_name: str (required)\n\n\t\t\t:returns: numpy array of nominal COP\n\t\t\t:rtype: numpy.array [point_dim,3]\n\t\t\"\"\"\n\t\tdf=pd.read_csv(file_name, sep=',',header=None)\n\t\tnominal_cop=df.values\n\t\treturn nominal_cop\n\n\tdef get_nominal_cop_database(self,conn_str,table_name):\n\t\t\"\"\"Import nominal cloud-of-point of the assembly from a SQL database assumes the table only contains three columns of the nominal COPs in order of the Node IDs\t\t\n\t\t\t\n\t\t\t:param conn_str: Connection String for Database\n\t\t\t:type conn_str: str (required)\n\n\t\t\t:param table_name: Name of table in the database\n\t\t\t:type table_name: str (required)\n\n\t\t\t:returns: numpy array of dim points * 3\n\t\t\t:rtype: numpy.array [point_dim,3]\n\t\t\"\"\"\n\t\tengine = create_engine(conn_str)\n\t\tsquery ='select * from '+table_name\n\t\tdf_nom = pd.read_sql_query(squery,con=engine)\n\t\tdf_nom = df_nom.values\n\t\treturn df_nom\n\nclass VRMSimulationModel(PartType):\n\t\n\t\"\"\"VRM Simulation Model class inherits the part type class, additional parameters of this class include\n\n\t\t:param noise_level: The level of artificial noise to be added to simulated data, typically set to 0.1 mm from the measurement system class depending on the scanner\n\t\t:type noise_level: float (required)\n\n\t\t:param noise_type: The type of noise to be added, can be Gaussian or uniform , for Gaussian noise_level is set as standard deviation and mean as zero for uniform the min and max are set -noise_level and +noise_level respectively\n\t\t:type noise_type: str (optional)\n\n\t\t:param convergency_flag: Flag to denote if the simulation model had converged while simulating, is set to 1 by default\n\t\t:type convergency_flag: int (optional)\n\n\t\tThe class contains one function kpi_calculator that needs to be defined by the user depending on the assembly output\n\n\t\"\"\"\n\tdef __init__(self,assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim,noise_level,noise_type='uniform',convergency_flag=1):\n\t\tsuper().__init__(assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim)\n\t\tself.noise_level=noise_level\n\t\tself.noise_type=noise_type\n\t\tself.convergency_flag=convergency_flag\n\n\tdef kpi_calculator(self,cop_data,kpi_params=[]):\n\t\t\"\"\" User defined function to calculate KPI from Cloud of Point Data [KPI]=f(Cop)\n\n\t\t\t:param cop_data: CoP data for a given sample\n\t\t\t:type cop_data: np_array [point_dim,3] (required)\n\n\t\t\t:param kpi_params: Various parameters required to calculate the KPI, can be blank if no parameters are required to calculate KPI from CoP\n\t\t\t:type kpi_params: list (optional)\n\n\t\t\t:returns: list of multivariate KPIs for the given CoP\n\t\t\t:rtype: list\n\n\t\t\"\"\"\n\t\t\n\t\tkpi=[None]*self.assembly_kpis\n\n\t\t#define function here \n\t\treturn kpi", "\"\"\" The model deploy file is used to leverage a trained model to perform inference on unknown set of node deviations.\n\"\"\"\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport sys\ncurrent_path=os.path.dirname(__file__)\nparentdir = os.path.dirname(current_path)\n\n#Adding Path to various Modules\nsys.path.append(\"../core\")\nsys.path.append(\"../visualization\")\nsys.path.append(\"../utilities\")\nsys.path.append(\"../datasets\")\nsys.path.append(\"../trained_models\")\nsys.path.append(\"../config\")\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport csv\nimport logging\ntf.get_logger().setLevel(logging.ERROR)\n\nfrom tensorflow.keras.models import load_model\n\n\n#Importing Config files\nimport assembly_config as config\nimport model_config as cftrain\nimport measurement_config as mscofig\n\n#Importing required modules from the package\nfrom measurement_system import HexagonWlsScanner\nfrom assembly_system import VRMSimulationModel\nfrom assembly_system import PartType\nfrom wls400a_system import GetInferenceData\nfrom metrics_eval import MetricsEval\nfrom data_import import GetTrainData\n#from cam_viz import CamViz\n#from cop_viz import CopViz\n\nclass DeployModel:\n\t\"\"\"The Deploy Model class is used to import a trained model and use it to infer on unknown data\n\n\t\"\"\"\n\tdef get_model(self,model_path):\n\t\t\"\"\"get_model method is is used to retrieve the trained model from a given path\n\t\t\t\t\n\t\t\t\t:param model_path: Path to the trained model, ideally it should be same as the train model path output\n\t\t\t\t:type model_path: str (required)\n\t\t\"\"\"\n\t\tfrom tensorflow.keras.models import load_model\n\t\ttry:\n\t\t\tinference_model=load_model(model_path)\n\t\t\tprint('Deep Learning Model found and loaded')\n\t\texcept AssertionError as error:\n\t\t\tprint(error)\n\t\t\tprint('Model not found at this path ',model_path, ' Update path in config file if required')\n\n\t\treturn inference_model\n\n\tdef model_inference(self,inference_data,inference_model,deploy_path,print_result=0,plot_result=0,get_cam_data=0,append_result=0):\n\t\t\"\"\"model_inference method is used to infer from unknown sample(s) using the trained model \n\t\t\t\t\n\t\t\t\t:param inference_data: Unknown dataset having same structure as the train dataset\n\t\t\t\t:type inference_data: numpy.array [samples*voxel_dim*voxel_dim*voxel_dim*deviation_channels] (required) (required)\n\n\t\t\t\t:param inference_model: Trained model\n\t\t\t\t:type inference_model: keras.model (required)\n\t\t\t\t\n\t\t\t\t:param print_result: Flag to indicate if the result needs to be printed, 0 by default, change to 1 in case the results need to be printed on the console\n\t\t\t\t:type print_result: int\n\n\t\t\"\"\"\t\t\n\t\tresult=inference_model.predict(inference_data)\n\t\tdescription=\"The Process Parameters variations are inferred from the obtained measurement data and the trained CNN based model\"\n\t\tprint('The model estimates are: ')\n\t\trounded_result=np.round(result,2)\n\t\t\n\t\tif(print_result==1):\n\t\t\tprint(rounded_result)\n\t\t\n\t\tif(append_result==1):\n\t\t\twith open (\"user_preds.csv\",'a',newline='') as filedata:\n\t\t\t\t#fieldnames = ['kcc1','kcc2','kcc3','kcc4','kcc5','kcc6'] \n\t\t\t\twriter = csv.writer(filedata, delimiter=',')\n\t\t\t\twriter.writerow(rounded_result[0,:].tolist())\n\t\t\t\t#writer.writerow(dict(zip(fieldnames, rounded_result[0,:].tolist()))) \n\t\t\t\t#filedata.write(rounded_result[0,:].tolist())\n\t\t\n\t\tif(plot_result==1):\n\t\t\tprint(\"Plotting Results in HTML...\")\n\t\t\timport plotly.graph_objects as go\n\t\t\timport plotly as py\n\t\t\tresult_str = [\"%.2f\" % number for number in rounded_result[0,:]]\n\n\t\t\tkcc_str=[]\n\t\t\tfor i in range(rounded_result.shape[1]):\n\t\t\t\tkcc_str.append(\"X(\"+str(i)+\"): \")\n\t\t\t#kcc_str=[\"X(1): \",\"X(2): \", \"X(3): \", \"X(4): \", \"X(5): \", \"X(6): \"]\t\n\t\t\t\n\t\t\tdisplay_str=np.core.defchararray.add(kcc_str, result_str)\t\n\t\t\tprint(display_str)\n\t\t\tfig = go.Figure(data=go.Scatter(y=rounded_result[0,:], marker=dict(\n\t\t\tsize=30,color=100), mode='markers+text',text=display_str,x=kcc_str))\n\t\t\tfig.update_traces( textfont_size=20,textposition='top center')\n\t\t\tfig.update_layout(title_text='Deep Learning for Manufacturing - Model Estimates')\n\t\t\tpy.offline.plot(fig, filename=deploy_path+\"results.html\")\n\n\t\tif(get_cam_data==1):\n\t\t\t#print(inference_model.summary())\n\t\t\tfrom cam_viz import CamViz\n\t\t\tfrom cop_viz import CopViz\n\t\t\tinput_conv_data=inference_data\n\t\t\tbase_cop=input_conv_data[0,:,:,:,0]+input_conv_data[0,:,:,:,1]+input_conv_data[0,:,:,:,2]\n\t\t\tbase_cop[base_cop!=0]=0.6\n\n\t\t\tprocess_parameter_id=np.argmax(abs(result[0,:]))\n\t\t\tprint(\"Plotting Gradient based Class Activation Map for Process Parameter: \",process_parameter_id)\n\t\t\tcamviz=CamViz(inference_model,'conv_block_9')\n\t\t\t#For explicit plotting change ID here\n\t\t\t#process_parameter_id=0\n\t\t\tcop_input=input_conv_data[0:1,:,:,:,:]\n\t\t\tfmap_eval, grad_wrt_fmap_eval=camviz.grad_cam_3d(cop_input,process_parameter_id)\n\t\t\talpha_k_c= grad_wrt_fmap_eval.mean(axis=(0,1,2,3)).reshape((1,1,1,-1))\n\t\t\tLc_Grad_CAM = np.maximum(np.sum(fmap_eval*alpha_k_c,axis=-1),0).squeeze()\n\t\t\tscale_factor = np.array(cop_input.shape[1:4])/np.array(Lc_Grad_CAM.shape)\n\n\t\t\tfrom scipy.ndimage.interpolation import zoom\n\t\t\timport tensorflow.keras.backend as K\n\t\t\t\n\t\t\t_grad_CAM = zoom(Lc_Grad_CAM,scale_factor)\n\t\t\tarr_min, arr_max = np.min(_grad_CAM), np.max(_grad_CAM)\n\t\t\tgrad_CAM = (_grad_CAM - arr_min) / (arr_max - arr_min + K.epsilon())\n\n\t\t\t#Code for Grad CAM Plotting\n\t\t\timport plotly.graph_objects as go\n\t\t\timport plotly as py\n\t\t\timport plotly.express as px\n\t\t\tX, Y, Z = np.mgrid[0:len(base_cop), 0:len(base_cop), 0:len(base_cop)]\n\t\t\t#input_conv_data[0,:,:,:,0]=0.2\n\t\t\tvalues_cop = base_cop\n\t\t\tvalues_grad_cam=grad_CAM\n\n\t\t\ttrace1=go.Volume(\n\t\t\t\tx=X.flatten(),\n\t\t\t\ty=Y.flatten(),\n\t\t\t\tz=Z.flatten(),\n\t\t\t\tvalue=values_cop.flatten(),\n\t\t\t\tisomin=0,\n\t\t\t\tisomax=1,\n\t\t\t\topacity=0.1, # needs to be small to see through all surfaces\n\t\t\t\tsurface_count=17, # needs to be a large number for good volume rendering\n\t\t\t\tcolorscale='Greens'\n\t\t\t\t)\n\n\t\t\ttrace2=go.Volume(\n\t\t\t\tx=X.flatten(),\n\t\t\t\ty=Y.flatten(),\n\t\t\t\tz=Z.flatten(),\n\t\t\t\tvalue=values_grad_cam.flatten(),\n\t\t\t\tisomin=0,\n\t\t\t\tisomax=1,\n\t\t\t\topacity=0.1, # needs to be small to see through all surfaces\n\t\t\t\tsurface_count=17,\n\t\t\t\tcolorscale='orrd' # needs to be a large number for good volume rendering\n\t\t\t\t)\n\t\t\tdata = [trace1,trace2]\n\t\t\t\n\t\t\tlayout = go.Layout(\n\t\t\t\tmargin=dict(\n\t\t\t\t\tl=0,\n\t\t\t\t\tr=0,\n\t\t\t\t\tb=0,\n\t\t\t\t\tt=0\n\t\t\t\t)\n\t\t\t)\n\t\t\t\n\t\t\tfig = go.Figure(data=data,layout=layout)\n\t\t\tplot_file_name=deploy_path+'voxel_grad_cam.html'\n\t\t\tpy.offline.plot(fig, filename=plot_file_name)\n\n\n\t\treturn result\n\nif __name__ == '__main__':\n\t\n\tprint(\"Welcome to Deep Learning for Manufacturing (dlmfg)...\")\n\tprint('Parsing from Assembly Config File....')\n\n\tdata_type=config.assembly_system['data_type']\n\tapplication=config.assembly_system['application']\n\tpart_type=config.assembly_system['part_type']\n\tpart_name=config.assembly_system['part_name']\n\tdata_format=config.assembly_system['data_format']\n\tassembly_type=config.assembly_system['assembly_type']\n\tassembly_kccs=config.assembly_system['assembly_kccs']\t\n\tassembly_kpis=config.assembly_system['assembly_kpis']\n\tvoxel_dim=config.assembly_system['voxel_dim']\n\tpoint_dim=config.assembly_system['point_dim']\n\tvoxel_channels=config.assembly_system['voxel_channels']\n\tnoise_type=config.assembly_system['noise_type']\n\tmapping_index=config.assembly_system['mapping_index']\n\tfile_names_x=config.assembly_system['test_data_files_x']\n\tfile_names_y=config.assembly_system['test_data_files_y']\n\tfile_names_z=config.assembly_system['test_data_files_z']\n\tsystem_noise=config.assembly_system['system_noise']\n\taritifical_noise=config.assembly_system['aritifical_noise']\n\tdata_folder=config.assembly_system['data_folder']\n\tkcc_folder=config.assembly_system['kcc_folder']\n\tkcc_files=config.assembly_system['test_kcc_files']\n\t\n\n\tprint('Initializing the Assembly System and Measurement System....')\n\tmeasurement_system=HexagonWlsScanner(data_type,application,system_noise,part_type,data_format)\n\tvrm_system=VRMSimulationModel(assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim,aritifical_noise)\n\tdeploy_model=DeployModel()\n\t\n\t#Generate Paths\n\ttrain_path='../trained_models/'+part_type\n\tmodel_path=train_path+'/model'+'/trained_model_0.h5'\n\tlogs_path=train_path+'/logs'\n\tdeploy_path=train_path+'/deploy/'\n\n\t#Voxel Mapping File\n\n\tget_data=GetTrainData();\n\t\n\tprint('Importing and Preprocessing Cloud-of-Point Data')\n\tdataset=[]\n\tdataset.append(get_data.data_import(file_names_x,data_folder))\n\tdataset.append(get_data.data_import(file_names_y,data_folder))\n\tdataset.append(get_data.data_import(file_names_z,data_folder))\n\tpoint_index=get_data.load_mapping_index(mapping_index)\n\n\n\t#Make an Object of the Measurement System Class\n\tmeasurement_system=HexagonWlsScanner(data_type,application, system_noise,part_type,data_format)\n\t#Make an Object of the Assembly System Class\n\tassembly_system=PartType(assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim)\n\n\t\n\t#Inference from simulated data\n\tinference_model=deploy_model.get_model(model_path)\n\tprint(inference_model.summary())\n\t\n\tinput_conv_data, kcc_subset_dump,kpi_subset_dump=get_data.data_convert_voxel_mc(vrm_system,dataset,point_index)\n\n\ty_pred=deploy_model.model_inference(input_conv_data,inference_model,deploy_path,print_result=1,plot_result=1);\n\n\tevalerror=1\n\n\tif(evalerror==1):\n\t\tkcc_dataset=get_data.data_import(kcc_files,kcc_folder)\n\t\tmetrics_eval=MetricsEval();\n\t\teval_metrics,accuracy_metrics_df=metrics_eval.metrics_eval_base(y_pred,kcc_dataset,logs_path)\n\t\t\n\t\tprint('Evaluation Metrics: ',eval_metrics)\n\t\taccuracy_metrics_df.to_csv(logs_path+'/metrics_test.csv')\n\t\t\n\t\tnp.savetxt((deploy_path+\"predicted.csv\"), y_pred, delimiter=\",\")\n\t\tprint('Predicted Values saved to disk...')\n\n\t#Inference from Measurement Data\n\n\t#measurement_files=mscofig.ms_parameters['measurement_files']\n\t\n\t#Make an object of Get Data Class\n\t#get_data=GetInferenceData();\n\t\n\t#Call functions of the get Data Class\n\t#for measurement_file in measurement_files:\t\n\t\t#measurement_path=deploy_path+measurement_file\n\t\t#measurement_data=get_data.load_measurement_file(measurement_path)\n\t\t#voxel_point_index=get_data.load_mapping_index(voxel_path)\n\t\t#y_dev_data_filtered=get_data.data_pre_processing(measurement_data,voxel_channels)\n\t\t#input_conv_data=get_data.voxel_mapping(y_dev_data_filtered,voxel_point_index,point_dim,voxel_dim,voxel_channels)\n\t\t#y_pred=deploy_model.model_inference(input_conv_data,inference_model);\n\t\t#print('KCCs for: ',measurement_file)\n\t\t#print(y_pred)\n\n\t#Code for Voxel Vizvalization\n\n\t#Code for CAM Visualization\n\tviz=0\n\tif(viz==1):\n\t\tprint(inference_model.summary())\n\t\tcamviz=CamViz(inference_model,'conv3d_3')\n\n\t\tgrads=camviz.grad_cam_3d(input_conv_data[1:2,:,:,:,:],1)" ]
[ [ "pandas.read_sql_query", "pandas.read_csv" ], [ "tensorflow.keras.models.load_model", "numpy.core.defchararray.add", "numpy.min", "tensorflow.get_logger", "numpy.round", "numpy.max", "tensorflow.keras.backend.epsilon", "numpy.savetxt", "numpy.array", "scipy.ndimage.interpolation.zoom", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
dipdeb/DAT210x
[ "9103844fa7f76052bdcc5a4ec60e8afbc91a9f6b", "9103844fa7f76052bdcc5a4ec60e8afbc91a9f6b", "9103844fa7f76052bdcc5a4ec60e8afbc91a9f6b" ]
[ "Module3/assignment2.py", "Module2/assignment2.py", "Module4/assignment2.py" ]
[ "import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\n\n# Look pretty...\n# matplotlib.style.use('ggplot')\nplt.style.use('ggplot')\n\n\n#\n# TODO: Load up the Seeds Dataset into a Dataframe\n# It's located at 'Datasets/wheat.data'\n# \nwheat_df = pd.read_csv('/home/dipanjan/DAT210x/Module3/Datasets/wheat.data', index_col=0);\n\n\n#\n# TODO: Create a 2d scatter plot that graphs the\n# area and perimeter features\n# \n# .. your code here ..\nwheat_df.plot.scatter(x='area', y='perimeter')\n\n#\n# TODO: Create a 2d scatter plot that graphs the\n# groove and asymmetry features\n# \n# .. your code here ..\nwheat_df.plot.scatter(x='groove', y='asymmetry')\n\n#\n# TODO: Create a 2d scatter plot that graphs the\n# compactness and width features\n# \n# .. your code here ..\nwheat_df.plot.scatter(x='compactness', y='width')\n\n\n# BONUS TODO:\n# After completing the above, go ahead and run your program\n# Check out the results, and see what happens when you add\n# in the optional display parameter marker with values of\n# either '^', '.', or 'o'.\nwheat_df.plot.scatter(x='compactness', y='width', marker='o')\n\nplt.show()\n\n\n", "import pandas as pd\n\n# TODO: Load up the 'tutorial.csv' dataset\n#\ndf = pd.read_csv('/home/dipanjan/DAT210x/Module2/Datasets/tutorial.csv')\n\n\n\n# TODO: Print the results of the .describe() method\n#\nprint(df.describe())\n\n\n\n# TODO: Figure out which indexing method you need to\n# use in order to index your dataframe with: [2:4,'col3']\n# And print the results\n#\nprint(df.loc[2:4, 'col3'])\n\n", "import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport assignment2_helper as helper\n\n# Look pretty...\n# matplotlib.style.use('ggplot')\nplt.style.use('ggplot')\n\n\n# Do * NOT * alter this line, until instructed!\n#scaleFeatures = False\nscaleFeatures = True\n\n\n# TODO: Load up the dataset and remove any and all\n# Rows that have a nan. You should be a pro at this\n# by now ;-)\n#\n# QUESTION: Should the id column be included as a\n# feature?\n#\n# .. your code here ..\ndf = pd.read_csv('/home/dipanjan/DAT210x/Module4/Datasets/kidney_disease.csv');\ndf = df.dropna() \n\n\n# Create some color coded labels; the actual label feature\n# will be removed prior to executing PCA, since it's unsupervised.\n# You're only labeling by color so you can see the effects of PCA\nlabels = ['red' if i=='ckd' else 'green' for i in df.classification]\n\n\n# TODO: Use an indexer to select only the following columns:\n# ['bgr','wc','rc']\n#\n# .. your code here ..\ndf = df[['bgr', 'rc', 'wc']] \n\n\n# TODO: Print out and check your dataframe's dtypes. You'll might\n# want to set a breakpoint after you print it out so you can stop the\n# program's execution.\n#\n# You can either take a look at the dataset webpage in the attribute info\n# section: https://archive.ics.uci.edu/ml/datasets/Chronic_Kidney_Disease\n# or you can actually peek through the dataframe by printing a few rows.\n# What kind of data type should these three columns be? If Pandas didn't\n# properly detect and convert them to that data type for you, then use\n# an appropriate command to coerce these features into the right type.\n#\n# .. your code here ..\n#df.dtypes\ndf['rc'] = pd.to_numeric(df['rc'], errors='coerce')\ndf['wc'] = pd.to_numeric(df['wc'], errors='coerce')\n#df.dtypes\n\n\n# TODO: PCA Operates based on variance. The variable with the greatest\n# variance will dominate. Go ahead and peek into your data using a\n# command that will check the variance of every feature in your dataset.\n# Print out the results. Also print out the results of running .describe\n# on your dataset.\n#\n# Hint: If you don't see all three variables: 'bgr','wc' and 'rc', then\n# you probably didn't complete the previous step properly.\n#\n# .. your code here ..\nprint(df.var())\nprint(df.describe())\n \n\n# TODO: This method assumes your dataframe is called df. If it isn't,\n# make the appropriate changes. Don't alter the code in scaleFeatures()\n# just yet though!\n#\n# .. your code adjustment here ..\nif scaleFeatures: df = helper.scaleFeatures(df)\n\n\n\n# TODO: Run PCA on your dataset and reduce it to 2 components\n# Ensure your PCA instance is saved in a variable called 'pca',\n# and that the results of your transformation are saved in 'T'.\n#\n# .. your code here ..\nfrom sklearn.decomposition import PCA\npca = PCA(n_components=2)\npca.fit(df)\nT = pca.transform(df)\n\n# Plot the transformed data as a scatter plot. Recall that transforming\n# the data will result in a NumPy NDArray. You can either use MatPlotLib\n# to graph it directly, or you can convert it to DataFrame and have pandas\n# do it for you.\n#\n# Since we've already demonstrated how to plot directly with MatPlotLib in\n# Module4/assignment1.py, this time we'll convert to a Pandas Dataframe.\n#\n# Since we transformed via PCA, we no longer have column names. We know we\n# are in P.C. space, so we'll just define the coordinates accordingly:\nax = helper.drawVectors(T, pca.components_, df.columns.values, plt, scaleFeatures)\nT = pd.DataFrame(T)\nT.columns = ['component1', 'component2']\nT.plot.scatter(x='component1', y='component2', marker='o', c=labels, alpha=0.75, ax=ax)\nplt.show()\n\n\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.style.use", "matplotlib.pyplot.show" ], [ "pandas.read_csv" ], [ "pandas.read_csv", "matplotlib.pyplot.style.use", "pandas.DataFrame", "matplotlib.pyplot.show", "pandas.to_numeric", "sklearn.decomposition.PCA" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
hongzhouye/sigma-SCF
[ "62e2dce538d1e68c4dc3c72fdf27beb1911e544f" ]
[ "scf/scf_utils.py" ]
[ "import numpy as np\nimport os, sys\nsys.path.append(os.path.dirname(__file__))\nfrom diis_solver import diis_solver, diis_solver_uhf\nsys.path.pop()\nimport jk\nimport xform\n\n\ndef homo_lumo_mix(C, nocc, beta):\n \"\"\"\n Mix a portion of LUMO to HOMO.\n Used when generating spin-unrestricted guess.\n \"\"\"\n if beta < 0. or beta > 1.:\n raise Exception(\"Mixing beta must be in [0, 1]\")\n Cb = C.copy()\n homo = C[:, nocc - 1]\n lumo = C[:, nocc]\n Cb[:, nocc - 1] = (1. - beta) ** 0.5 * homo + beta ** 0.5 * lumo\n return Cb\n\n\ndef get_dm(C, nel):\n D = C[:, :nel]\n D = D @ D.T\n return D\n\n\ndef get_JK(is_fitted, g, D):\n if(is_fitted):\n # FINISH LATER\n X = np.einsum(\"Pls,ls->P\", g, D)\n J = np.einsum(\"mnP,P->mn\", np.swapaxes(g, 0, 2), X)\n Z = np.einsum(\"Pns,ls->Pnl\", g, D)\n K = np.einsum('mlP,Pnl->mn', np.swapaxes(g, 0, 2), Z)\n return (J, K)\n else:\n #J = np.einsum(\"pqrs,rs->pq\", g, D)\n #K = np.einsum(\"prqs,rs->pq\", g, D)\n J, K = jk.getJK_np_Dshift(g, D - np.diag(np.diag(D) * 0.5))\n return (J, K)\n\n\ndef get_JK_uhf(is_fitted, g, Ds):\n \"\"\"\n Ds = [Da, Db]\n \"\"\"\n Da, Db = Ds[0], Ds[1]\n Dtot = Da + Db\n if (is_fitted == True):\n X = np.einsum(\"Pls,ls->P\", g, Dtot)\n Jtot = np.einsum(\"mnP,P->mn\", np.swapaxes(g, 0, 2), X)\n Za = np.einsum(\"Pns,ls->Pnl\", g, Da)\n Ka = np.einsum('mlP,Pnl->mn', np.swapaxes(g, 0, 2), Za)\n Zb = np.einsum(\"Pns,ls->Pnl\", g, Db)\n Kb = np.einsum('mlP,Pnl->mn', np.swapaxes(g, 0, 2), Zb)\n return Jtot, Ka, Kb\n else:\n Jtot = np.einsum(\"pqrs, rs -> pq\", g, Dtot)\n Ka = np.einsum(\"prqs, rs -> pq\", g, Da)\n Kb = np.einsum(\"prqs, rs -> pq\", g, Db)\n return Jtot, Ka, Kb\n\n\ndef get_fock(H, g, D):\n J, K = get_JK(len(g.shape) == 3, g, D)\n return H + 2 * J - K\n\n\ndef diis_update(F_prev_list, r_prev_list):\n c = diis_solver(r_prev_list) # GET THE COEFFICIENTS!!\n out = 0 * F_prev_list[0]\n for i, element in enumerate(F_prev_list):\n out += c[i] * element\n return out\n\n\ndef oda_update(dF, dD, dE):\n \"\"\"\n ODA update:\n lbd = 0.5 - dE / E_deriv\n \"\"\"\n E_deriv = np.sum(dF * dD)\n lbd = 0.5 * (1. - dE / E_deriv)\n if lbd < 0 or lbd > 1:\n lbd = 0.9999 if dE < 0 else 1.e-4\n return lbd\n\n\ndef get_fock_uhf(H, g, Ds):\n \"\"\"\n DIIS update given previous Fock matrices and error vectors.\n Note that if there are less than two F's, return normal F.\n \"\"\"\n Jtot, Ka, Kb = get_JK_uhf(len(g.shape) == 3, g, Ds)\n return H + Jtot - Ka, H + Jtot - Kb\n\n\ndef diis_update_uhf(F_prev_lists, r_prev_lists):\n c = diis_solver_uhf(r_prev_lists[0], r_prev_lists[1])\n Fa = 0 * F_prev_lists[0][0]\n for i, element in enumerate(F_prev_lists[0]):\n Fa += c[i] * element\n Fb = 0 * F_prev_lists[0][0]\n for i, element in enumerate(F_prev_lists[1]):\n Fb += c[i] * element\n return Fa, Fb\n\n\ndef oda_update_uhf(dFs, dDs, dE):\n \"\"\"\n ODA update:\n lbd = 0.5 - dE / E_deriv\n \"\"\"\n if type(dFs) is not list:\n raise Exception(\"arg1 and arg2 are list of alpha/beta matrices.\")\n E_deriv = np.sum(dFs[0] * dDs[0] + dFs[1] * dDs[1])\n lbd = 0.5 * (1. - dE / E_deriv)\n if lbd < 0 or lbd > 1:\n lbd = 0.9999 if dE < 0 else 1.e-4\n return lbd\n\n\ndef diag(F, A):\n Fp = A.T @ F @ A\n eps, Cp = np.linalg.eigh(Fp)\n C = A @ Cp\n return eps, C\n\n\ndef get_SCF_err(S, D, F):\n err_v = S @ D @ F - F @ D @ S\n err = np.mean(err_v ** 2) ** 0.5\n return err, err_v\n\n\ndef get_SCF_energy(H, F, D, unrestricted):\n \"\"\"\n Calculates the energy.\n \"\"\"\n if unrestricted == True:\n if type(F) is not list or type(D) is not list:\n raise Exception(\"For UHF, F and D must have type list.\")\n Fa, Fb = F[0], F[1]\n Da, Db = D[0], D[1]\n Dtot = Da + Db\n return np.sum(Dtot * H + Da * Fa + Db * Fb) * 0.5\n else:\n return np.sum((H + F) * D)\n\n\ndef xform_2(H, A):\n \"\"\"\n Basis xform for 2-tensor\n \"\"\"\n if len(H.shape) != 2:\n raise Exception(\"Dimension error: arg1 should be a matrix\")\n\n return A.T @ H @ A\n\n\ndef xform_4(g, A):\n \"\"\"\n Basis xform for 4-tensor\n \"\"\"\n if len(g.shape) != 4:\n raise Exception(\"\"\"\n Dimension error: arg1 should be a four-tensor.\n Note that you should set is_fitted to be False.\n \"\"\")\n\n #return np.einsum(\"pi, qj, pqrs, rk, sl -> ijkl\", A, A, g, A, A, optimize=True)\n return xform.xform_4_np(g, A)\n" ]
[ [ "numpy.diag", "numpy.swapaxes", "numpy.einsum", "numpy.linalg.eigh", "numpy.mean", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
APrigarina/open_model_zoo
[ "b1ff98b64a6222cf6b5f3838dc0271422250de95", "b1ff98b64a6222cf6b5f3838dc0271422250de95", "b1ff98b64a6222cf6b5f3838dc0271422250de95", "b1ff98b64a6222cf6b5f3838dc0271422250de95", "b1ff98b64a6222cf6b5f3838dc0271422250de95", "b1ff98b64a6222cf6b5f3838dc0271422250de95" ]
[ "tools/accuracy_checker/accuracy_checker/annotation_converters/cvat_multilabel_recognition.py", "demos/action_recognition_demo/python/action_recognition_demo/result_renderer.py", "demos/place_recognition_demo/python/place_recognition_demo/visualizer.py", "tools/accuracy_checker/accuracy_checker/representation/multilabel_recognition.py", "tools/accuracy_checker/accuracy_checker/postprocessor/filter.py", "tools/accuracy_checker/accuracy_checker/metrics/regression.py" ]
[ "\"\"\"\nCopyright (c) 2018-2021 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport numpy as np\nfrom .format_converter import FileBasedAnnotationConverter, ConverterReturn\nfrom ..representation import MultiLabelRecognitionAnnotation\nfrom ..utils import read_xml, check_file_existence\nfrom ..config import StringField, PathField, ConfigError\n\n\nclass CVATMultilabelAttributesRecognitionConverter(FileBasedAnnotationConverter):\n __provider__ = 'cvat_multilabel_binary_attributes_recognition'\n annotation_types = (MultiLabelRecognitionAnnotation, )\n\n @classmethod\n def parameters(cls):\n configuration_parameters = super().parameters()\n configuration_parameters.update({\n 'label': StringField(description='specific label for attribute collection'),\n 'images_dir': PathField(\n is_directory=True, optional=True,\n description='path to dataset images, used only for content existence check'\n )\n })\n return configuration_parameters\n\n def configure(self):\n super().configure()\n self.label = self.get_value_from_config('label')\n self.images_dir = self.get_value_from_config('images_dir') or self.annotation_file.parent\n\n def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):\n annotation = read_xml(self.annotation_file)\n meta = annotation.find('meta')\n size = int(meta.find('task').find('size').text)\n label = self.select_label(meta)\n label_to_id = {attribute.find('name').text: idx for idx, attribute in enumerate(label.iter('attribute'))}\n num_attributes = len(label_to_id)\n\n annotations = []\n content_errors = None if not check_content else []\n for image_id, image in enumerate(annotation.iter('image')):\n identifier = image.attrib['name'].split('/')[-1]\n if check_content:\n if not check_file_existence(self.images_dir / identifier):\n content_errors.append('{}: does not exist'.format(self.images_dir / identifier))\n for bbox in image:\n if 'label' not in bbox.attrib.keys() or bbox.attrib['label'] != self.label:\n continue\n bbox_rect = [\n float(bbox.attrib['xtl']), float(bbox.attrib['ytl']),\n float(bbox.attrib['xbr']), float(bbox.attrib['ybr'])\n ]\n attributes = -np.ones(num_attributes)\n for attribute in bbox.iter('attribute'):\n attribute_name = attribute.attrib['name']\n attribute_label = label_to_id[attribute_name]\n attributes[attribute_label] = 1 if attribute.text == 'T' else 0\n attributes_annotation = MultiLabelRecognitionAnnotation(identifier, attributes)\n attributes_annotation.metadata['rect'] = bbox_rect\n annotations.append(attributes_annotation)\n\n if progress_callback is not None and image_id % progress_interval == 0:\n progress_callback(image_id * 100 / size)\n\n return ConverterReturn(annotations, self.generate_meta(label_to_id), content_errors)\n\n @staticmethod\n def generate_meta(attribute_values_mapping):\n return {'label_map': {value: key for key, value in attribute_values_mapping.items()}}\n\n def select_label(self, meta):\n label = [label for label in meta.iter('label') if label.find('name').text == self.label]\n if not label:\n raise ConfigError('{} does not present in annotation'.format(self.label))\n return label[0]\n", "\"\"\"\n Copyright (c) 2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom collections import Counter, defaultdict, deque\nfrom functools import partial\nfrom itertools import islice\n\nimport cv2\nimport numpy as np\n\nfrom .meters import WindowAverageMeter\n\nFONT_COLOR = (255, 255, 255)\nFONT_STYLE = cv2.FONT_HERSHEY_DUPLEX\nFONT_SIZE = 1\nTEXT_VERTICAL_INTERVAL = 45\nTEXT_LEFT_MARGIN = 15\n\n\nclass ResultRenderer:\n def __init__(self, no_show, presenter, output, limit, display_fps=False, display_confidence=True, number_of_predictions=1,\n label_smoothing_window=30, labels=None, output_height=720):\n self.no_show = no_show\n self.presenter = presenter\n self.output = output\n self.limit = limit\n self.video_writer = cv2.VideoWriter()\n self.number_of_predictions = number_of_predictions\n self.display_confidence = display_confidence\n self.display_fps = display_fps\n self.labels = labels\n self.output_height = output_height\n self.meters = defaultdict(partial(WindowAverageMeter, 16))\n self.postprocessing = [LabelPostprocessing(n_frames=label_smoothing_window, history_size=label_smoothing_window)\n for _ in range(number_of_predictions)]\n print(\"To close the application, press 'CTRL+C' here or switch to the output window and press Esc or Q\")\n\n def update_timers(self, timers):\n inference_time = 0.0\n for key, val in timers.items():\n self.meters[key].update(val)\n inference_time += self.meters[key].avg\n return inference_time\n\n def render_frame(self, frame, logits, timers, frame_ind, fps):\n inference_time = self.update_timers(timers)\n\n if logits is not None:\n labels, probs = decode_output(logits, self.labels, top_k=self.number_of_predictions,\n label_postprocessing=self.postprocessing)\n print(\"Frame {}: {} - {:.2f}% -- {:.2f}ms\".format(frame_ind, labels[0], probs[0] * 100, inference_time))\n else:\n labels = ['Preparing...']\n probs = [0.]\n\n # resize frame, keep aspect ratio\n w, h, c = frame.shape\n new_h = self.output_height\n new_w = int(h * (new_h / w))\n frame = cv2.resize(frame, (new_w, new_h))\n\n self.presenter.drawGraphs(frame)\n # Fill text area\n fill_area(frame, (0, 70), (700, 0), alpha=0.6, color=(0, 0, 0))\n\n if self.display_confidence and logits is not None:\n text_template = '{label} - {conf:.2f}%'\n else:\n text_template = '{label}'\n\n for i, (label, prob) in enumerate(islice(zip(labels, probs), self.number_of_predictions)):\n display_text = text_template.format(label=label, conf=prob * 100)\n text_loc = (TEXT_LEFT_MARGIN, TEXT_VERTICAL_INTERVAL * (i + 1))\n\n cv2.putText(frame, display_text, text_loc, FONT_STYLE, FONT_SIZE, FONT_COLOR)\n\n if frame_ind == 0 and self.output and not self.video_writer.open(self.output,\n cv2.VideoWriter_fourcc(*'MJPG'), fps, (frame.shape[1], frame.shape[0])):\n print(\"ERROR: Can't open video writer\")\n return -1\n\n if self.display_fps:\n fps = 1000 / (inference_time + 1e-6)\n text_loc = (TEXT_LEFT_MARGIN, TEXT_VERTICAL_INTERVAL * (len(labels) + 1))\n cv2.putText(frame, \"Inference time: {:.2f}ms ({:.2f} FPS)\".format(inference_time, fps),\n text_loc, FONT_STYLE, FONT_SIZE, FONT_COLOR)\n\n if self.video_writer.isOpened() and (self.limit <= 0 or frame_ind <= self.limit-1):\n self.video_writer.write(frame)\n\n if not self.no_show:\n cv2.imshow(\"Action Recognition\", frame)\n key = cv2.waitKey(1) & 0xFF\n if key in {ord('q'), ord('Q'), 27}:\n return -1\n self.presenter.handleKey(key)\n\n\nclass LabelPostprocessing:\n def __init__(self, n_frames=5, history_size=30):\n self.n_frames = n_frames\n self.history = deque(maxlen=history_size)\n self.prev_get = None\n self.prev_label = None\n\n def update(self, label):\n self.prev_label = label\n self.history.append(label)\n\n def get(self):\n if self.prev_get is None:\n self.prev_get = self.prev_label\n return self.prev_label\n\n cnt = Counter(list(self.history)[-self.n_frames:])\n if len(cnt) > 1:\n return self.prev_get\n self.prev_get = self.prev_label\n return self.prev_get\n\n\ndef fill_area(image, bottom_left, top_right, color=(0, 0, 0), alpha=1.):\n \"\"\"Fills area with the specified color\"\"\"\n xmin, ymax = bottom_left\n xmax, ymin = top_right\n\n image[ymin:ymax, xmin:xmax, :] = image[ymin:ymax, xmin:xmax, :] * (1 - alpha) + np.asarray(color) * alpha\n return image\n\n\ndef decode_output(probs, labels, top_k=None, label_postprocessing=None):\n \"\"\"Decodes top probabilities into corresponding label names\"\"\"\n top_ind = np.argsort(probs)[::-1][:top_k]\n\n if label_postprocessing:\n for k in range(top_k):\n label_postprocessing[k].update(top_ind[k])\n\n top_ind = [postproc.get() for postproc in label_postprocessing]\n\n decoded_labels = [labels[i] if labels else str(i) for i in top_ind]\n probs = [probs[i] for i in top_ind]\n return decoded_labels, probs\n", "\"\"\"\n Copyright (c) 2021 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport numpy as np\nimport cv2\n\nSIZE = 200\nBLACK = (0, 0, 0)\nLWD = 2\nTEXT_SIZE = 1.5\nBORDER = 30\n\n\ndef add_top10_gallery_images(demo_image, impaths, distances, input_image):\n \"\"\" Add top-10 most similar images from the gallery to demo image. \"\"\"\n\n for index, impath in enumerate(impaths[:10]):\n image = cv2.imread(impath)\n image = cv2.resize(image, (SIZE, SIZE))\n\n h_shift = 2 * BORDER + input_image.shape[0] + (SIZE + BORDER) * (index // 5)\n w_shift = BORDER + (index % 5) * (SIZE + BORDER)\n\n demo_image[h_shift: h_shift + SIZE, w_shift: w_shift + SIZE] = image\n\n if distances is not None:\n cv2.putText(demo_image, '{}:{}'.format(index, int(distances[index] * 100) / 100),\n (w_shift - BORDER, h_shift - 5), 1,\n TEXT_SIZE, BLACK, LWD)\n else:\n cv2.putText(demo_image, '{}'.format(index), (w_shift - BORDER, h_shift - 5), 1,\n TEXT_SIZE, BLACK, LWD)\n\n return demo_image\n\n\ndef visualize(image, impaths, distances, input_size, compute_embedding_time,\n search_in_gallery_time, imshow_delay, presenter, no_show=False):\n \"\"\" Visualizes input frame and top-10 most similar images from the gallery. \"\"\"\n\n input_image = cv2.resize(image, (SIZE * 4, SIZE * 3))\n\n demo_image = np.ones(\n (input_image.shape[0] + SIZE * 2 + BORDER * 4, SIZE * 5 + BORDER * 6, 3),\n dtype=np.uint8) * 200\n\n presenter.drawGraphs(input_image)\n\n demo_image[BORDER:BORDER + input_image.shape[0],\n BORDER:BORDER + input_image.shape[1]] = input_image\n\n cv2.putText(demo_image, 'Gallery size: {}'.format(len(impaths)),\n (BORDER * 2 + input_image.shape[1], BORDER * 2 + 30), 1, TEXT_SIZE, BLACK, LWD)\n if not np.isnan(compute_embedding_time):\n cv2.putText(demo_image,\n 'Embbeding (ms): {}'.format(int(compute_embedding_time * 10000) / 10.0),\n (BORDER * 2 + input_image.shape[1], BORDER * 2 + 60), 1, TEXT_SIZE, BLACK, LWD)\n if not np.isnan(search_in_gallery_time):\n cv2.putText(demo_image,\n 'Gallery search (ms): {}'.format(int(search_in_gallery_time * 10000) / 10.0),\n (BORDER * 2 + input_image.shape[1], BORDER * 2 + 90), 1, TEXT_SIZE, BLACK, LWD)\n\n cv2.putText(demo_image, 'Inp. res: {}x{}'.format(input_size[0], input_size[1]),\n (BORDER * 2 + input_image.shape[1], BORDER * 2 + 120), 1, TEXT_SIZE, BLACK, LWD)\n\n demo_image = add_top10_gallery_images(demo_image, impaths, distances, input_image)\n\n if not no_show:\n cv2.imshow('demo_image', demo_image)\n key_pressed = cv2.waitKey(imshow_delay)\n presenter.handleKey(key_pressed)\n return (demo_image, key_pressed & 0xff) if key_pressed != -1 else (demo_image, -1)\n\n return (demo_image, -1)\n", "\"\"\"\nCopyright (c) 2018-2021 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport numpy as np\nfrom .base_representation import BaseRepresentation\n\n\nclass MultiLabelRecognitionRepresentation(BaseRepresentation):\n def __init__(self, identifier='', multi_label=None):\n super().__init__(identifier)\n self.multi_label = np.array(multi_label) if isinstance(multi_label, list) else multi_label\n\n\nclass MultiLabelRecognitionAnnotation(MultiLabelRecognitionRepresentation):\n pass\n\n\nclass MultiLabelRecognitionPrediction(MultiLabelRecognitionRepresentation):\n def to_annotation(self, **kwargs):\n return MultiLabelRecognitionAnnotation(self.identifier, self.multi_label)\n", "\"\"\"\nCopyright (c) 2018-2021 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom functools import singledispatch\nimport numpy as np\n\nfrom ..config import BaseField, BoolField\nfrom ..dependency import ClassProvider\nfrom ..postprocessor.postprocessor import PostprocessorWithSpecificTargets\nfrom ..representation import (DetectionAnnotation, DetectionPrediction, TextDetectionAnnotation,\n TextDetectionPrediction, PoseEstimationPrediction, PoseEstimationAnnotation)\nfrom ..utils import in_interval, polygon_from_points, convert_to_range\n\n\nclass FilterPostprocessor(PostprocessorWithSpecificTargets):\n __provider__ = 'filter'\n\n annotation_types = (DetectionAnnotation, TextDetectionAnnotation)\n prediction_types = (DetectionPrediction, TextDetectionPrediction)\n\n @classmethod\n def parameters(cls):\n parameters = super().parameters()\n parameters.update({\n 'remove_filtered': BoolField(\n optional=True, default=False,\n description=\"Removing filtered data. Annotations support ignoring filtered data without removing\"\n \" as default,in other cases filtered data will be removed automatically.\"\n )\n })\n\n for functor in BaseFilter.providers:\n parameters[functor] = BaseField(optional=True, description=functor)\n\n return parameters\n\n def __init__(self, *args, **kwargs):\n self._filters = []\n self.remove_filtered = False\n super().__init__(*args, **kwargs)\n\n def configure(self):\n config = self.config.copy()\n config.pop('type')\n self.remove_filtered = self.get_value_from_config('remove_filtered')\n config.pop('remove_filtered', False)\n config.pop('annotation_source', None)\n config.pop('prediction_source', None)\n config.pop('apply_to', None)\n\n for key, value in config.items():\n self._filters.append(BaseFilter.provide(key, value))\n\n def process_image(self, annotation, prediction):\n for functor in self._filters:\n for target in annotation:\n self._filter_entry_by(target, functor)\n\n for target in prediction:\n self._filter_entry_by(target, functor)\n\n return annotation, prediction\n\n def _filter_entry_by(self, entry, functor):\n ignored_key = 'difficult_boxes'\n\n if not self.remove_filtered and isinstance(entry, (DetectionAnnotation, DetectionPrediction,\n TextDetectionAnnotation, TextDetectionPrediction,\n PoseEstimationAnnotation, PoseEstimationPrediction)):\n ignored = entry.metadata.setdefault(ignored_key, [])\n ignored.extend(functor(entry))\n else:\n entry.remove(functor(entry))\n\n return entry\n\n\nclass BaseFilter(ClassProvider):\n __provider_type__ = 'filter'\n\n def __init__(self, filter_arg):\n self.filter_arg = filter_arg\n\n def __call__(self, entry):\n return self.apply_filter(entry, self.filter_arg)\n\n def apply_filter(self, entry, filter_arg):\n raise NotImplementedError\n\n\nclass FilterByLabels(BaseFilter):\n __provider__ = 'labels'\n\n def apply_filter(self, entry, labels):\n filtered = []\n for index, label in enumerate(entry.labels):\n if label in labels:\n filtered.append(index)\n\n return filtered\n\n\nclass FilterByMinConfidence(BaseFilter):\n __provider__ = 'min_confidence'\n\n def apply_filter(self, entry, min_confidence):\n filtered = []\n\n if isinstance(entry, DetectionAnnotation):\n return filtered\n\n for index, score in enumerate(entry.scores):\n if score < min_confidence:\n filtered.append(index)\n\n return filtered\n\n\nclass FilterTopK(BaseFilter):\n __provider__ = 'top_k'\n\n def apply_filter(self, entry, top_k):\n filtered = []\n\n if isinstance(entry, DetectionAnnotation):\n return filtered\n\n if len(entry.scores) <= top_k:\n return filtered\n scores_inds = np.argsort(entry.scores)[::-1]\n non_filtered = scores_inds[:int(top_k)]\n\n return [ind for ind in range(len(entry.scores)) if ind not in non_filtered]\n\nclass FilterByHeightRange(BaseFilter):\n __provider__ = 'height_range'\n\n annotation_types = (DetectionAnnotation, TextDetectionAnnotation)\n prediction_types = (DetectionPrediction, TextDetectionPrediction)\n\n def apply_filter(self, entry, height_range):\n @singledispatch\n def filter_func(entry_value, height_range_):\n return []\n\n @filter_func.register(DetectionAnnotation)\n @filter_func.register(DetectionPrediction)\n def _(entry_value, height_range_):\n filtered = []\n for index, (y_min, y_max) in enumerate(zip(entry_value.y_mins, entry_value.y_maxs)):\n height = y_max - y_min\n if not in_interval(height, height_range_):\n filtered.append(index)\n\n return filtered\n\n @filter_func.register(TextDetectionAnnotation)\n @filter_func.register(TextDetectionPrediction)\n def _(entry_values, height_range_):\n filtered = []\n for index, polygon_points in enumerate(entry_values.points):\n left_bottom_point, left_top_point, right_top_point, right_bottom_point = polygon_points\n left_side_height = np.linalg.norm(left_bottom_point - left_top_point)\n right_side_height = np.linalg.norm(right_bottom_point - right_top_point)\n if not in_interval(np.mean([left_side_height, right_side_height]), height_range_):\n filtered.append(index)\n\n return filtered\n\n return filter_func(entry, convert_to_range(height_range))\n\n\nclass FilterByWidthRange(BaseFilter):\n __provider__ = 'width_range'\n\n annotation_types = (DetectionAnnotation, TextDetectionAnnotation)\n prediction_types = (DetectionPrediction, TextDetectionPrediction)\n\n def apply_filter(self, entry, width_range):\n @singledispatch\n def filter_func(entry_value, width_range_):\n return []\n\n @filter_func.register(DetectionAnnotation)\n @filter_func.register(DetectionPrediction)\n def _(entry_value, width_range_):\n filtered = []\n for index, (x_min, x_max) in enumerate(zip(entry_value.x_mins, entry_value.x_maxs)):\n width = x_max - x_min\n if not in_interval(width, width_range_):\n filtered.append(index)\n\n return filtered\n\n @filter_func.register(TextDetectionAnnotation)\n @filter_func.register(TextDetectionPrediction)\n def _(entry_values, width_range_):\n filtered = []\n for index, polygon_points in enumerate(entry_values.points):\n left_bottom_point, left_top_point, right_top_point, right_bottom_point = polygon_points\n top_width = np.linalg.norm(right_top_point - left_top_point)\n bottom_width = np.linalg.norm(right_bottom_point - left_bottom_point)\n if not in_interval(top_width, width_range_) or not in_interval(bottom_width, width_range_):\n filtered.append(index)\n\n return filtered\n\n return filter_func(entry, convert_to_range(width_range))\n\n\nclass FilterByAreaRange(BaseFilter):\n __provider__ = 'area_range'\n\n annotation_types = (TextDetectionAnnotation, PoseEstimationAnnotation)\n prediction_types = (TextDetectionPrediction, )\n\n def apply_filter(self, entry, area_range):\n area_range = convert_to_range(area_range)\n\n @singledispatch\n def filter_func(entry, area_range):\n return []\n\n @filter_func.register(PoseEstimationAnnotation)\n @filter_func.register(PoseEstimationPrediction)\n def _(entry, area_range):\n filtered = []\n areas = entry.areas\n for area_id, area in enumerate(areas):\n if not in_interval(area, area_range):\n filtered.append(area_id)\n return filtered\n\n @filter_func.register(TextDetectionAnnotation)\n @filter_func.register(TextDetectionPrediction)\n def _(entry, area_range):\n filtered = []\n for index, polygon_points in enumerate(entry.points):\n if not in_interval(polygon_from_points(polygon_points).area, area_range):\n filtered.append(index)\n return filtered\n\n return filter_func(entry, area_range)\n\n\nclass FilterEmpty(BaseFilter):\n __provider__ = 'is_empty'\n\n def apply_filter(self, entry: DetectionAnnotation, is_empty):\n return np.where(np.bitwise_or(entry.x_maxs - entry.x_mins <= 0, entry.y_maxs - entry.y_mins <= 0))[0]\n\n\nclass FilterByVisibility(BaseFilter):\n __provider__ = 'min_visibility'\n\n _VISIBILITY_LEVELS = {\n 'heavy occluded': 0,\n 'partially occluded': 1,\n 'visible': 2\n }\n\n def apply_filter(self, entry, min_visibility):\n filtered = []\n min_visibility_level = self.visibility_level(min_visibility)\n for index, visibility in enumerate(entry.metadata.get('visibilities', [])):\n if self.visibility_level(visibility) < min_visibility_level:\n filtered.append(index)\n\n return filtered\n\n def visibility_level(self, visibility):\n level = self._VISIBILITY_LEVELS.get(visibility)\n if level is None:\n message = 'Unknown visibility level \"{}\". Supported only \"{}\"'\n raise ValueError(message.format(visibility, ','.join(self._VISIBILITY_LEVELS.keys())))\n\n return level\n\n\nclass FilterByAspectRatio(BaseFilter):\n __provider__ = 'aspect_ratio'\n\n def apply_filter(self, entry, aspect_ratio):\n aspect_ratio = convert_to_range(aspect_ratio)\n\n filtered = []\n coordinates = zip(entry.x_mins, entry.y_mins, entry.x_maxs, entry.y_maxs)\n for index, (x_min, y_min, x_max, y_max) in enumerate(coordinates):\n ratio = (y_max - y_min) / np.maximum(x_max - x_min, np.finfo(np.float64).eps)\n if not in_interval(ratio, aspect_ratio):\n filtered.append(index)\n\n return filtered\n\n\nclass FilterByAreaRatio(BaseFilter):\n __provider__ = 'area_ratio'\n\n def apply_filter(self, entry, area_ratio):\n area_ratio = convert_to_range(area_ratio)\n\n filtered = []\n if not isinstance(entry, DetectionAnnotation):\n return filtered\n\n image_size = entry.metadata.get('image_size')\n if not image_size:\n return filtered\n image_size = image_size[0]\n\n image_area = image_size[0] * image_size[1]\n\n occluded_indices = entry.metadata.get('is_occluded', [])\n coordinates = zip(entry.x_mins, entry.y_mins, entry.x_maxs, entry.y_maxs)\n for index, (x_min, y_min, x_max, y_max) in enumerate(coordinates):\n width, height = x_max - x_min, y_max - y_min\n area = np.sqrt(float(width * height) / np.maximum(image_area, np.finfo(np.float64).eps))\n if not in_interval(area, area_ratio) or index in occluded_indices:\n filtered.append(index)\n\n return filtered\n\n\nclass FilterInvalidBoxes(BaseFilter):\n __provider__ = 'invalid_boxes'\n\n def apply_filter(self, entry, invalid_boxes):\n infinite_mask_x = np.logical_or(~np.isfinite(entry.x_mins), ~np.isfinite(entry.x_maxs)) # pylint: disable=E1130\n infinite_mask_y = np.logical_or(~np.isfinite(entry.y_mins), ~np.isfinite(entry.y_maxs)) # pylint: disable=E1130\n infinite_mask = np.logical_or(infinite_mask_x, infinite_mask_y)\n\n return np.argwhere(infinite_mask).reshape(-1).tolist()\n", "\"\"\"\nCopyright (c) 2018-2021 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport warnings\nfrom collections import OrderedDict\nfrom functools import singledispatch\nimport numpy as np\n\nfrom ..representation import (\n RegressionAnnotation,\n RegressionPrediction,\n FacialLandmarksAnnotation,\n FacialLandmarksPrediction,\n FacialLandmarks3DAnnotation,\n FacialLandmarks3DPrediction,\n GazeVectorAnnotation,\n GazeVectorPrediction,\n DepthEstimationAnnotation,\n DepthEstimationPrediction,\n ImageProcessingAnnotation,\n ImageProcessingPrediction,\n FeaturesRegressionAnnotation,\n PoseEstimationAnnotation,\n PoseEstimationPrediction,\n OpticalFlowAnnotation,\n OpticalFlowPrediction,\n BackgroundMattingAnnotation,\n BackgroundMattingPrediction,\n NiftiRegressionAnnotation,\n)\n\nfrom .metric import PerImageEvaluationMetric\nfrom ..config import BaseField, NumberField, BoolField, ConfigError\nfrom ..utils import string_to_tuple, finalize_metric_result, contains_all\n\n\nclass BaseRegressionMetric(PerImageEvaluationMetric):\n annotation_types = (\n RegressionAnnotation, FeaturesRegressionAnnotation, DepthEstimationAnnotation, ImageProcessingAnnotation,\n BackgroundMattingAnnotation, NiftiRegressionAnnotation,\n )\n prediction_types = (\n RegressionPrediction, DepthEstimationPrediction, ImageProcessingPrediction, BackgroundMattingPrediction,\n )\n\n def __init__(self, value_differ, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.value_differ = value_differ\n self.calculate_diff = singledispatch(self._calculate_diff_regression_rep)\n self.calculate_diff.register(DepthEstimationAnnotation, self._calculate_diff_depth_estimation_rep)\n\n @classmethod\n def parameters(cls):\n params = super().parameters()\n params.update({\n 'max_error': BoolField(optional=True, default=False, description='Calculate max error in magnitude')\n })\n return params\n\n def configure(self):\n self.max_error = self.get_value_from_config('max_error')\n self.meta.update({\n 'names': ['mean', 'std'] if not self.max_error else ['mean', 'std', 'max_error'],\n 'scale': 1, 'postfix': ' ', 'calculate_mean': False, 'target': 'higher-worse'\n })\n self.magnitude = []\n\n def update(self, annotation, prediction):\n diff = self.calculate_diff(annotation, prediction)\n if isinstance(diff, dict):\n if not self.magnitude:\n self.magnitude = OrderedDict()\n for key, difference in diff.items():\n v_mag = self.magnitude.get(key, [])\n v_mag.append(difference)\n self.magnitude[key] = v_mag\n return np.mean(next(iter(diff.values())))\n\n if self.profiler:\n if isinstance(annotation, RegressionAnnotation):\n ann_value, pred_value = annotation.value, prediction.value\n self.profiler.update(annotation.identifier, self.name, diff, ann_value, pred_value)\n else:\n self.profiler.update(annotation.identifier, self.name, '', '', diff)\n self.magnitude.append(diff)\n if np.ndim(diff) > 1:\n return np.mean(diff)\n\n return diff\n\n def _calculate_diff_regression_rep(self, annotation, prediction):\n if isinstance(annotation.value, dict):\n if not isinstance(prediction.value, dict):\n if len(annotation.value) != 1:\n raise ConfigError('both annotation and prediction should be dict-like in case of multiple outputs')\n return self.value_differ(next(iter(annotation.value.values())), prediction.value)\n diff_dict = OrderedDict()\n for key in annotation.value:\n diff = self.value_differ(annotation.value[key], prediction.value[key])\n if np.ndim(diff) > 1:\n diff = np.mean(diff)\n diff_dict[key] = diff\n return diff_dict\n if isinstance(prediction.value, dict):\n if len(prediction.value) != 1:\n raise ConfigError('annotation for all predictions should be provided')\n diff = self.value_differ(annotation.value, next(iter(prediction.value.values())))\n if not np.isscalar(diff) and np.size(diff) > 1:\n diff = np.mean(diff)\n return diff\n diff = self.value_differ(annotation.value, prediction.value)\n if not np.isscalar(diff) and np.size(diff) > 1:\n diff = np.mean(diff)\n return diff\n\n def _calculate_diff_depth_estimation_rep(self, annotation, prediction):\n diff = annotation.mask * self.value_differ(annotation.depth_map, prediction.depth_map)\n ret = 0\n\n if np.sum(annotation.mask) > 0:\n ret = np.sum(diff) / np.sum(annotation.mask)\n\n return ret\n\n def evaluate(self, annotations, predictions):\n if self.profiler:\n self.profiler.finish()\n if isinstance(self.magnitude, dict):\n names, result = [], []\n for key, values in self.magnitude.items():\n names.extend(\n ['{}@mean'.format(key), '{}@std'.format(key)]\n if not self.max_error else ['{}@mean'.format(key), '{}@std'.format(key), '{}@max_errir'.format(key)]\n )\n result.extend([np.mean(values), np.std(values)])\n if self.max_error:\n result.append(np.max(values))\n self.meta['names'] = names\n return result\n\n if not self.max_error:\n return np.mean(self.magnitude), np.std(self.magnitude)\n return np.mean(self.magnitude), np.std(self.magnitude), np.max(self.magnitude)\n\n def reset(self):\n self.magnitude = []\n if self.profiler:\n self.profiler.reset()\n\n\nclass BaseRegressionOnIntervals(PerImageEvaluationMetric):\n annotation_types = (RegressionAnnotation, )\n prediction_types = (RegressionPrediction, )\n\n @classmethod\n def parameters(cls):\n parameters = super().parameters()\n parameters.update({\n 'intervals': BaseField(optional=True, description=\"Comma-separated list of interval boundaries.\"),\n 'start': NumberField(\n optional=True, default=0.0,\n description=\"Start value: way to generate range of intervals from start to end with length step.\"),\n 'end': NumberField(\n optional=True,\n description=\"Stop value: way to generate range of intervals from start to end with length step.\"\n ),\n 'step': NumberField(\n optional=True, default=1.0,\n description=\"Step value: way to generate range of intervals from start to end with length step.\"\n ),\n 'ignore_values_not_in_interval': BoolField(\n optional=True, default=True,\n description=\"Allows create additional intervals for values less than minimal value \"\n \"in interval and greater than maximal.\"\n )\n })\n\n return parameters\n\n def __init__(self, value_differ, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.value_differ = value_differ\n\n def configure(self):\n self.meta.update({'scale': 1, 'postfix': ' ', 'calculate_mean': False, 'target': 'higher-worse'})\n self.ignore_out_of_range = self.get_value_from_config('ignore_values_not_in_interval')\n\n self.intervals = self.get_value_from_config('intervals')\n if not self.intervals:\n stop = self.get_value_from_config('end')\n if not stop:\n raise ConfigError('intervals or start-step-end of interval should be specified for metric')\n\n start = self.get_value_from_config('start')\n step = self.get_value_from_config('step')\n self.intervals = np.arange(start, stop + step, step)\n\n if not isinstance(self.intervals, (list, np.ndarray)):\n self.intervals = string_to_tuple(self.intervals)\n\n self.intervals = np.unique(self.intervals)\n self.magnitude = [[] for _ in range(len(self.intervals) + 1)]\n self._create_meta()\n\n def update(self, annotation, prediction):\n index = find_interval(annotation.value, self.intervals)\n diff = self.value_differ(annotation.value, prediction.value)\n self.magnitude[index].append(diff)\n if self.profiler:\n self.profiler.update(annotation.identifier, self.name, diff, annotation.value, prediction.value)\n\n return diff\n\n def evaluate(self, annotations, predictions):\n if self.ignore_out_of_range:\n self.magnitude = self.magnitude[1:-1]\n\n result = [[np.mean(values), np.std(values)] if values else [np.nan, np.nan] for values in self.magnitude]\n result, self.meta['names'] = finalize_metric_result(np.reshape(result, -1), self.meta['names'])\n\n if not result:\n warnings.warn(\"No values in given interval\")\n result.append(0)\n\n if self.profiler:\n self.profiler.finish()\n\n return result\n\n def _create_meta(self):\n self.meta['names'] = ([])\n if not self.ignore_out_of_range:\n self.meta['names'] = (['mean: < ' + str(self.intervals[0]), 'std: < ' + str(self.intervals[0])])\n\n for index in range(len(self.intervals) - 1):\n self.meta['names'].append('mean: <= ' + str(self.intervals[index]) + ' < ' + str(self.intervals[index + 1]))\n self.meta['names'].append('std: <= ' + str(self.intervals[index]) + ' < ' + str(self.intervals[index + 1]))\n\n if not self.ignore_out_of_range:\n self.meta['names'].append('mean: > ' + str(self.intervals[-1]))\n self.meta['names'].append('std: > ' + str(self.intervals[-1]))\n\n def reset(self):\n self.magnitude = [[] for _ in range(len(self.intervals) + 1)]\n self._create_meta()\n if self.profiler:\n self.profiler.finish()\n\n\nclass MeanAbsoluteError(BaseRegressionMetric):\n __provider__ = 'mae'\n\n def __init__(self, *args, **kwargs):\n super().__init__(mae_differ, *args, **kwargs)\n\n\nclass MeanSquaredError(BaseRegressionMetric):\n __provider__ = 'mse'\n\n def __init__(self, *args, **kwargs):\n super().__init__(mse_differ, *args, **kwargs)\n\n\nclass Log10Error(BaseRegressionMetric):\n __provider__ = 'log10_error'\n\n def __init__(self, *args, **kwargs):\n super().__init__(log10_differ, *args, **kwargs)\n\n\nclass MeanAbsolutePercentageError(BaseRegressionMetric):\n __provider__ = 'mape'\n\n def __init__(self, *args, **kwargs):\n super().__init__(mape_differ, *args, **kwargs)\n\n\nclass RootMeanSquaredError(BaseRegressionMetric):\n __provider__ = 'rmse'\n\n def __init__(self, *args, **kwargs):\n super().__init__(mse_differ, *args, **kwargs)\n\n def update(self, annotation, prediction):\n rmse = np.sqrt(self.calculate_diff(annotation, prediction))\n if self.profiler:\n if isinstance(annotation, RegressionAnnotation):\n ann_value, pred_value = annotation.value, prediction.value\n self.profiler.update(annotation.identifier, self.name, rmse, ann_value, pred_value)\n else:\n self.profiler.update(annotation.identifier, self.name, rmse)\n self.magnitude.append(rmse)\n return rmse\n\n\nclass MeanAbsoluteErrorOnInterval(BaseRegressionOnIntervals):\n __provider__ = 'mae_on_interval'\n\n def __init__(self, *args, **kwargs):\n super().__init__(mae_differ, *args, **kwargs)\n\n\nclass MeanSquaredErrorOnInterval(BaseRegressionOnIntervals):\n __provider__ = 'mse_on_interval'\n\n def __init__(self, *args, **kwargs):\n super().__init__(mse_differ, *args, **kwargs)\n\n\nclass RootMeanSquaredErrorOnInterval(BaseRegressionOnIntervals):\n __provider__ = 'rmse_on_interval'\n\n def __init__(self, *args, **kwargs):\n super().__init__(mse_differ, *args, **kwargs)\n\n def update(self, annotation, prediction):\n mse = super().update(annotation, prediction)\n return np.sqrt(mse)\n\n def evaluate(self, annotations, predictions):\n if self.ignore_out_of_range:\n self.magnitude = self.magnitude[1:-1]\n\n result = []\n for values in self.magnitude:\n error = [np.sqrt(np.mean(values)), np.sqrt(np.std(values))] if values else [np.nan, np.nan]\n result.append(error)\n\n result, self.meta['names'] = finalize_metric_result(np.reshape(result, -1), self.meta['names'])\n\n if not result:\n warnings.warn(\"No values in given interval\")\n result.append(0)\n if self.profiler:\n self.profiler.finish()\n\n return result\n\n\ndef relative_err(target, pred):\n if len(target.shape) > 2:\n target = target.flatten()\n if len(pred.shape) > 2:\n pred = pred.flatten()\n size = min(target.size, pred.size)\n return np.linalg.norm(target[:size] - pred[:size], 2) / (np.linalg.norm(target[:size], 2) + np.finfo(float).eps)\n\n\nclass RelativeL2Error(BaseRegressionMetric):\n __provider__ = 'relative_l2_error'\n\n def __init__(self, *args, **kwargs):\n super().__init__(relative_err, *args, **kwargs)\n\n\nclass FacialLandmarksPerPointNormedError(PerImageEvaluationMetric):\n __provider__ = 'per_point_normed_error'\n\n annotation_types = (FacialLandmarksAnnotation, FacialLandmarks3DAnnotation)\n prediction_types = (FacialLandmarksPrediction, FacialLandmarks3DPrediction)\n\n def configure(self):\n self.meta.update({\n 'scale': 1, 'postfix': ' ', 'calculate_mean': True, 'data_format': '{:.4f}', 'target': 'higher-worse'\n })\n self.magnitude = []\n\n def update(self, annotation, prediction):\n result = point_regression_differ(\n annotation.x_values, annotation.y_values, prediction.x_values, prediction.y_values\n )\n result /= np.maximum(annotation.interocular_distance, np.finfo(np.float64).eps)\n self.magnitude.append(result)\n if self.profiler:\n self.profiler.update(\n annotation.identifier,\n self.name,\n annotation.x_values, annotation.y_values,\n prediction.x_values, prediction.y_values,\n result\n )\n\n return result\n\n def evaluate(self, annotations, predictions):\n num_points = np.shape(self.magnitude)[1]\n point_result_name_pattern = 'point_{}_normed_error'\n self.meta['names'] = [point_result_name_pattern.format(point_id) for point_id in range(num_points)]\n per_point_rmse = np.mean(self.magnitude, axis=0)\n per_point_rmse, self.meta['names'] = finalize_metric_result(per_point_rmse, self.meta['names'])\n if self.profiler:\n self.profiler.finish()\n\n return per_point_rmse\n\n def reset(self):\n self.magnitude = []\n if self.profiler:\n self.profiler.reset()\n\n\nclass FacialLandmarksNormedError(PerImageEvaluationMetric):\n __provider__ = 'normed_error'\n\n annotation_types = (FacialLandmarksAnnotation, FacialLandmarks3DAnnotation)\n prediction_types = (FacialLandmarksPrediction, FacialLandmarks3DPrediction)\n\n @classmethod\n def parameters(cls):\n parameters = super().parameters()\n parameters.update({\n 'calculate_std': BoolField(\n optional=True, default=False, description=\"Allows calculation of standard deviation\"\n ),\n 'percentile': NumberField(\n optional=True, value_type=int, min_value=0, max_value=100,\n description=\"Calculate error rate for given percentile.\"\n )\n })\n\n return parameters\n\n def configure(self):\n self.calculate_std = self.get_value_from_config('calculate_std')\n self.percentile = self.get_value_from_config('percentile')\n self.meta.update({\n 'scale': 1,\n 'postfix': ' ',\n 'calculate_mean': not self.calculate_std or not self.percentile,\n 'data_format': '{:.4f}',\n 'target': 'higher-worse'\n })\n self.magnitude = []\n\n def update(self, annotation, prediction):\n per_point_result = point_regression_differ(\n annotation.x_values, annotation.y_values, prediction.x_values, prediction.y_values\n )\n avg_result = np.sum(per_point_result) / len(per_point_result)\n avg_result /= np.maximum(annotation.interocular_distance, np.finfo(np.float64).eps)\n if self.profiler:\n self.profiler.update(\n annotation.identifier,\n self.name,\n annotation.x_values, annotation.y_values,\n prediction.x_values, prediction.y_values,\n avg_result\n )\n self.magnitude.append(avg_result)\n\n return avg_result\n\n def evaluate(self, annotations, predictions):\n self.meta['names'] = ['mean']\n result = [np.mean(self.magnitude)]\n\n if self.calculate_std:\n result.append(np.std(self.magnitude))\n self.meta['names'].append('std')\n\n if self.percentile:\n sorted_magnitude = np.sort(self.magnitude)\n index = len(self.magnitude) / 100 * self.percentile\n result.append(sorted_magnitude[int(index)])\n self.meta['names'].append('{}th percentile'.format(self.percentile))\n\n if self.profiler:\n self.profiler.finish()\n\n return result\n\n def reset(self):\n self.magnitude = []\n if self.profiler:\n self.profiler.reset()\n\n\nclass NormalizedMeanError(PerImageEvaluationMetric):\n __provider__ = 'nme'\n annotation_types = (FacialLandmarks3DAnnotation, )\n prediction_types = (FacialLandmarks3DPrediction, )\n\n @classmethod\n def parameters(cls):\n parameters = super().parameters()\n parameters.update({\n 'only_2d': BoolField(\n optional=True, default=False, description=\"Allows metric calculation only across x and y dimensions\"\n ),\n })\n\n return parameters\n\n def configure(self):\n self.meta.update({\n 'scale': 1,\n 'postfix': ' ',\n 'data_format': '{:.4f}',\n 'target': 'higher-worse'\n })\n self.only_2d = self.get_value_from_config('only_2d')\n self.magnitude = []\n\n def update(self, annotation, prediction):\n gt = np.array([annotation.x_values, annotation.y_values, annotation.z_values]).T\n pred = np.array([prediction.x_values, prediction.y_values, prediction.z_values]).T\n\n diff = np.square(gt - pred)\n dist = np.sqrt(np.sum(diff[:, 0:2], axis=1)) if self.only_2d else np.sqrt(np.sum(diff, axis=1))\n normalized_result = dist / annotation.normalization_coef(self.only_2d)\n self.magnitude.append(np.mean(normalized_result))\n\n return np.mean(normalized_result)\n\n def evaluate(self, annotations, predictions):\n self.meta['names'] = ['mean']\n return np.mean(self.magnitude)\n\n def reset(self):\n self.magnitude = []\n\n\ndef calculate_distance(x_coords, y_coords, selected_points):\n first_point = [x_coords[selected_points[0]], y_coords[selected_points[0]]]\n second_point = [x_coords[selected_points[1]], y_coords[selected_points[1]]]\n return np.linalg.norm(np.subtract(first_point, second_point))\n\n\ndef mae_differ(annotation_val, prediction_val):\n return np.abs(annotation_val - prediction_val)\n\n\ndef mse_differ(annotation_val, prediction_val):\n return (annotation_val - prediction_val)**2\n\n\ndef find_interval(value, intervals):\n for index, point in enumerate(intervals):\n if value < point:\n return index\n\n return len(intervals)\n\n\ndef point_regression_differ(annotation_val_x, annotation_val_y, prediction_val_x, prediction_val_y):\n if len(np.shape(prediction_val_x)) == 2:\n prediction_val_x = prediction_val_x[0]\n prediction_val_y = prediction_val_y[0]\n loss = np.subtract(list(zip(annotation_val_x, annotation_val_y)), list(zip(prediction_val_x, prediction_val_y)))\n return np.linalg.norm(loss, 2, axis=1)\n\n\n\ndef angle_differ(gt_gaze_vector, predicted_gaze_vector):\n return np.arccos(\n gt_gaze_vector.dot(predicted_gaze_vector) / np.linalg.norm(gt_gaze_vector)\n / np.linalg.norm(predicted_gaze_vector)\n ) * 180 / np.pi\n\n\ndef log10_differ(annotation_val, prediction_val):\n return np.abs(np.log10(annotation_val) - np.log10(prediction_val))\n\n\ndef mape_differ(annotation_val, prediction_val):\n return np.abs(annotation_val - prediction_val) / annotation_val\n\n\nclass AngleError(BaseRegressionMetric):\n __provider__ = 'angle_error'\n\n annotation_types = (GazeVectorAnnotation, )\n prediction_types = (GazeVectorPrediction, )\n\n def __init__(self, *args, **kwargs):\n super().__init__(angle_differ, *args, **kwargs)\n\n\nclass PercentageCorrectKeypoints(PerImageEvaluationMetric):\n __provider__ = 'pckh'\n annotation_types = (PoseEstimationAnnotation, )\n prediction_types = (PoseEstimationPrediction, )\n\n @classmethod\n def parameters(cls):\n params = super().parameters()\n params.update({\n 'threshold': NumberField(optional=True, default=0.5),\n 'score_bias': NumberField(optional=True, default=0.6),\n 'num_joints': NumberField(optional=True, default=16, value_type=int)\n })\n return params\n\n def configure(self):\n if not self.dataset.metadata or 'joints' not in self.dataset.metadata:\n raise ConfigError('PCKh metrics require joints providing in dataset_meta'\n 'Please provide dataset meta file or regenerate annotation')\n self.joints = self.dataset.metadata['joints']\n self.num_joints = self.get_value_from_config('num_joints')\n self.jnt_count = np.zeros(self.num_joints)\n self.pck = np.zeros(self.num_joints)\n self.threshold = self.get_value_from_config('threshold')\n self.score_bias = self.get_value_from_config('score_bias')\n self.meta.update({\n 'names': ['mean', 'head', 'shoulder', 'elbow', 'wrist', 'hip', 'knee', 'ankle', 'mean'],\n 'calculate_mean': False\n })\n if not contains_all(\n self.joints, ['head', 'lsho', 'rsho', 'lwri', 'rwri', 'lhip', 'rhip', 'lkne', 'rkne', 'lank', 'rank']\n ):\n raise ConfigError('not all important joints are provided')\n\n def update(self, annotation, prediction):\n jnt_visible = annotation.visibility\n pos_pred = np.array([[x, y] for x, y in zip(prediction.x_values, prediction.y_values)])\n pos_gt = np.array([[x, y] for x, y in zip(annotation.x_values, annotation.y_values)])\n uv_error = pos_pred - pos_gt\n uv_err = np.linalg.norm(uv_error, axis=1)\n headbox = np.array(annotation.metadata['headbox'])\n headsizes = headbox[1] - headbox[0]\n headsizes = np.linalg.norm(headsizes, axis=0)\n headsizes *= self.score_bias\n scale = headsizes\n scaled_uv_err = np.divide(uv_err, scale)\n scaled_uv_err = np.multiply(scaled_uv_err, jnt_visible)\n self.jnt_count += jnt_visible\n less_than_threshold = np.multiply((scaled_uv_err < self.threshold), jnt_visible)\n self.pck += less_than_threshold\n return np.mean(np.divide(\n less_than_threshold.astype(float),\n jnt_visible.astype(float),\n out=np.zeros_like(less_than_threshold, dtype=float),\n where=jnt_visible != 0\n ))\n\n def evaluate(self, annotations, predictions):\n full_score = np.divide(self.pck, self.jnt_count, out=np.zeros_like(self.jnt_count), where=self.jnt_count != 0)\n full_score = np.ma.array(full_score, mask=False)\n full_score[6:8].mask = True\n return [\n np.mean(full_score),\n full_score[self.joints['head']],\n 0.5 * (full_score[self.joints['lsho']] + full_score[self.joints['rsho']]),\n 0.5 * (full_score[self.joints['lelb']] + full_score[self.joints['relb']]),\n 0.5 * (full_score[self.joints['lwri']] + full_score[self.joints['rwri']]),\n 0.5 * (full_score[self.joints['lhip']] + full_score[self.joints['rhip']]),\n 0.5 * (full_score[self.joints['lkne']] + full_score[self.joints['rkne']]),\n 0.5 * (full_score[self.joints['lank']] + full_score[self.joints['rank']]),\n ]\n\n def reset(self):\n self.jnt_count = np.zeros(self.num_joints)\n self.pck = np.zeros(self.num_joints)\n\n\nclass EndPointError(BaseRegressionMetric):\n __provider__ = 'epe'\n annotation_types = (OpticalFlowAnnotation, )\n prediction_types = (OpticalFlowPrediction, )\n\n def __init__(self, *args, **kwargs):\n def l2_diff(ann_value, pred_value):\n return np.mean(np.linalg.norm(ann_value - pred_value, ord=2, axis=2))\n super().__init__(l2_diff, *args, **kwargs)\n" ]
[ [ "numpy.ones" ], [ "numpy.asarray", "numpy.argsort" ], [ "numpy.isnan", "numpy.ones" ], [ "numpy.array" ], [ "numpy.isfinite", "numpy.linalg.norm", "numpy.finfo", "numpy.logical_or", "numpy.argwhere", "numpy.mean", "numpy.argsort", "numpy.bitwise_or" ], [ "numpy.sqrt", "numpy.max", "numpy.mean", "numpy.zeros_like", "numpy.ma.array", "numpy.divide", "numpy.square", "numpy.unique", "numpy.reshape", "numpy.arange", "numpy.subtract", "numpy.finfo", "numpy.std", "numpy.size", "numpy.zeros", "numpy.multiply", "numpy.ndim", "numpy.log10", "numpy.array", "numpy.sum", "numpy.abs", "numpy.linalg.norm", "numpy.sort", "numpy.shape", "numpy.isscalar" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AXATechLab/models
[ "c39ac760cfa6ce2339f5781f2a78d70db3ea5bb2" ]
[ "research/object_detection/meta_architectures/faster_rcnn_meta_arch_override_RPN.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Faster R-CNN meta-architecture definition.\n\nGeneral tensorflow implementation of Faster R-CNN detection models.\n\nSee Faster R-CNN: Ren, Shaoqing, et al.\n\"Faster R-CNN: Towards real-time object detection with region proposal\nnetworks.\" Advances in neural information processing systems. 2015.\n\nWe allow for three modes: number_of_stages={1, 2, 3}. In case of 1 stage,\nall of the user facing methods (e.g., predict, postprocess, loss) can be used as\nif the model consisted only of the RPN, returning class agnostic proposals\n(these can be thought of as approximate detections with no associated class\ninformation). In case of 2 stages, proposals are computed, then passed\nthrough a second stage \"box classifier\" to yield (multi-class) detections.\nFinally, in case of 3 stages which is only used during eval, proposals are\ncomputed, then passed through a second stage \"box classifier\" that will compute\nrefined boxes and classes, and then features are pooled from the refined and\nnon-maximum suppressed boxes and are passed through the box classifier again. If\nnumber of stages is 3 during training it will be reduced to two automatically.\n\nImplementations of Faster R-CNN models must define a new\nFasterRCNNFeatureExtractor and override three methods: `preprocess`,\n`_extract_proposal_features` (the first stage of the model), and\n`_extract_box_classifier_features` (the second stage of the model). Optionally,\nthe `restore_fn` method can be overridden. See tests for an example.\n\nA few important notes:\n+ Batching conventions: We support batched inference and training where\nall images within a batch have the same resolution. Batch sizes are determined\ndynamically via the shape of the input tensors (rather than being specified\ndirectly as, e.g., a model constructor).\n\nA complication is that due to non-max suppression, we are not guaranteed to get\nthe same number of proposals from the first stage RPN (region proposal network)\nfor each image (though in practice, we should often get the same number of\nproposals). For this reason we pad to a max number of proposals per image\nwithin a batch. This `self.max_num_proposals` property is set to the\n`first_stage_max_proposals` parameter at inference time and the\n`second_stage_batch_size` at training time since we subsample the batch to\nbe sent through the box classifier during training.\n\nFor the second stage of the pipeline, we arrange the proposals for all images\nwithin the batch along a single batch dimension. For example, the input to\n_extract_box_classifier_features is a tensor of shape\n`[total_num_proposals, crop_height, crop_width, depth]` where\ntotal_num_proposals is batch_size * self.max_num_proposals. (And note that per\nthe above comment, a subset of these entries correspond to zero paddings.)\n\n+ Coordinate representations:\nFollowing the API (see model.DetectionModel definition), our outputs after\npostprocessing operations are always normalized boxes however, internally, we\nsometimes convert to absolute --- e.g. for loss computation. In particular,\nanchors and proposal_boxes are both represented as absolute coordinates.\n\nImages are resized in the `preprocess` method.\n\nThe Faster R-CNN meta architecture has two post-processing methods\n`_postprocess_rpn` which is applied after first stage and\n`_postprocess_box_classifier` which is applied after second stage. There are\nthree different ways post-processing can happen depending on number_of_stages\nconfigured in the meta architecture:\n\n1. When number_of_stages is 1:\n `_postprocess_rpn` is run as part of the `postprocess` method where\n true_image_shapes is used to clip proposals, perform non-max suppression and\n normalize them.\n2. When number of stages is 2:\n `_postprocess_rpn` is run as part of the `_predict_second_stage` method where\n `resized_image_shapes` is used to clip proposals, perform non-max suppression\n and normalize them. In this case `postprocess` method skips `_postprocess_rpn`\n and only runs `_postprocess_box_classifier` using `true_image_shapes` to clip\n detections, perform non-max suppression and normalize them.\n3. When number of stages is 3:\n `_postprocess_rpn` is run as part of the `_predict_second_stage` using\n `resized_image_shapes` to clip proposals, perform non-max suppression and\n normalize them. Subsequently, `_postprocess_box_classifier` is run as part of\n `_predict_third_stage` using `true_image_shapes` to clip detections, peform\n non-max suppression and normalize them. In this case, the `postprocess` method\n skips both `_postprocess_rpn` and `_postprocess_box_classifier`.\n\"\"\"\nfrom abc import abstractmethod\nfrom functools import partial\nimport tensorflow as tf\nimport json\nimport numpy as np\n\nfrom object_detection.anchor_generators import grid_anchor_generator\nfrom object_detection.builders import box_predictor_builder\nfrom object_detection.core import box_list\nfrom object_detection.core import box_list_ops\nfrom object_detection.core import box_predictor\nfrom object_detection.core import losses\nfrom object_detection.core import model\nfrom object_detection.core import post_processing\nfrom object_detection.core import standard_fields as fields\nfrom object_detection.core import target_assigner\nfrom object_detection.utils import ops\nfrom object_detection.utils import shape_utils\nimport sys # for debug\nsys.path.append(\"/notebooks/text-renderer/\")\nimport data_util\n\nslim = tf.contrib.slim\n\n\nclass FasterRCNNFeatureExtractor(object):\n \"\"\"Faster R-CNN Feature Extractor definition.\"\"\"\n\n def __init__(self,\n is_training,\n first_stage_features_stride,\n batch_norm_trainable=False,\n reuse_weights=None,\n weight_decay=0.0):\n \"\"\"Constructor.\n\n Args:\n is_training: A boolean indicating whether the training version of the\n computation graph should be constructed.\n first_stage_features_stride: Output stride of extracted RPN feature map.\n batch_norm_trainable: Whether to update batch norm parameters during\n training or not. When training with a relative large batch size\n (e.g. 8), it could be desirable to enable batch norm update.\n reuse_weights: Whether to reuse variables. Default is None.\n weight_decay: float weight decay for feature extractor (default: 0.0).\n \"\"\"\n self._is_training = is_training\n self._first_stage_features_stride = first_stage_features_stride\n self._train_batch_norm = (batch_norm_trainable and is_training)\n self._reuse_weights = reuse_weights\n self._weight_decay = weight_decay\n\n @abstractmethod\n def preprocess(self, resized_inputs):\n \"\"\"Feature-extractor specific preprocessing (minus image resizing).\"\"\"\n pass\n\n def extract_proposal_features(self, preprocessed_inputs, scope):\n \"\"\"Extracts first stage RPN features.\n\n This function is responsible for extracting feature maps from preprocessed\n images. These features are used by the region proposal network (RPN) to\n predict proposals.\n\n Args:\n preprocessed_inputs: A [batch, height, width, channels] float tensor\n representing a batch of images.\n scope: A scope name.\n\n Returns:\n rpn_feature_map: A tensor with shape [batch, height, width, depth]\n activations: A dictionary mapping activation tensor names to tensors.\n \"\"\"\n with tf.variable_scope(scope, values=[preprocessed_inputs]):\n return self._extract_proposal_features(preprocessed_inputs, scope)\n\n @abstractmethod\n def _extract_proposal_features(self, preprocessed_inputs, scope):\n \"\"\"Extracts first stage RPN features, to be overridden.\"\"\"\n pass\n\n def extract_box_classifier_features(self, proposal_feature_maps, scope):\n \"\"\"Extracts second stage box classifier features.\n\n Args:\n proposal_feature_maps: A 4-D float tensor with shape\n [batch_size * self.max_num_proposals, crop_height, crop_width, depth]\n representing the feature map cropped to each proposal.\n scope: A scope name.\n\n Returns:\n proposal_classifier_features: A 4-D float tensor with shape\n [batch_size * self.max_num_proposals, height, width, depth]\n representing box classifier features for each proposal.\n \"\"\"\n with tf.variable_scope(\n scope, values=[proposal_feature_maps], reuse=tf.AUTO_REUSE):\n return self._extract_box_classifier_features(proposal_feature_maps, scope)\n\n @abstractmethod\n def _extract_box_classifier_features(self, proposal_feature_maps, scope):\n \"\"\"Extracts second stage box classifier features, to be overridden.\"\"\"\n pass\n\n def restore_from_classification_checkpoint_fn(\n self,\n first_stage_feature_extractor_scope,\n second_stage_feature_extractor_scope):\n \"\"\"Returns a map of variables to load from a foreign checkpoint.\n\n Args:\n first_stage_feature_extractor_scope: A scope name for the first stage\n feature extractor.\n second_stage_feature_extractor_scope: A scope name for the second stage\n feature extractor.\n\n Returns:\n A dict mapping variable names (to load from a checkpoint) to variables in\n the model graph.\n \"\"\"\n variables_to_restore = {}\n for variable in tf.global_variables():\n for scope_name in [first_stage_feature_extractor_scope,\n second_stage_feature_extractor_scope]:\n if variable.op.name.startswith(scope_name):\n var_name = variable.op.name.replace(scope_name + '/', '')\n variables_to_restore[var_name] = variable\n return variables_to_restore\n\n\nclass FasterRCNNMetaArchOverrideRPN(model.DetectionModel):\n \"\"\"Faster R-CNN Meta-architecture definition.\"\"\"\n\n def __init__(self,\n is_training,\n num_classes,\n image_resizer_fn,\n feature_extractor,\n number_of_stages,\n first_stage_anchor_generator,\n first_stage_target_assigner,\n first_stage_atrous_rate,\n first_stage_box_predictor_arg_scope_fn,\n first_stage_box_predictor_kernel_size,\n first_stage_box_predictor_depth,\n first_stage_minibatch_size,\n first_stage_sampler,\n first_stage_nms_score_threshold,\n first_stage_nms_iou_threshold,\n first_stage_max_proposals,\n first_stage_proposals_path,\n first_stage_localization_loss_weight,\n first_stage_objectness_loss_weight,\n initial_crop_size,\n maxpool_kernel_size,\n maxpool_stride,\n second_stage_target_assigner,\n second_stage_mask_rcnn_box_predictor,\n second_stage_batch_size,\n second_stage_sampler,\n second_stage_non_max_suppression_fn,\n second_stage_score_conversion_fn,\n second_stage_localization_loss_weight,\n second_stage_classification_loss_weight,\n second_stage_classification_loss,\n second_stage_mask_prediction_loss_weight=1.0,\n hard_example_miner=None,\n parallel_iterations=16,\n add_summaries=True,\n use_matmul_crop_and_resize=False,\n clip_anchors_to_image=False):\n \"\"\"FasterRCNNMetaArch Constructor.\n\n Args:\n is_training: A boolean indicating whether the training version of the\n computation graph should be constructed.\n num_classes: Number of classes. Note that num_classes *does not*\n include the background category, so if groundtruth labels take values\n in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the\n assigned classification targets can range from {0,... K}).\n image_resizer_fn: A callable for image resizing. This callable\n takes a rank-3 image tensor of shape [height, width, channels]\n (corresponding to a single image), an optional rank-3 instance mask\n tensor of shape [num_masks, height, width] and returns a resized rank-3\n image tensor, a resized mask tensor if one was provided in the input. In\n addition this callable must also return a 1-D tensor of the form\n [height, width, channels] containing the size of the true image, as the\n image resizer can perform zero padding. See protos/image_resizer.proto.\n feature_extractor: A FasterRCNNFeatureExtractor object.\n number_of_stages: An integer values taking values in {1, 2, 3}. If\n 1, the function will construct only the Region Proposal Network (RPN)\n part of the model. If 2, the function will perform box refinement and\n other auxiliary predictions all in the second stage. If 3, it will\n extract features from refined boxes and perform the auxiliary\n predictions on the non-maximum suppressed refined boxes.\n If is_training is true and the value of number_of_stages is 3, it is\n reduced to 2 since all the model heads are trained in parallel in second\n stage during training.\n first_stage_anchor_generator: An anchor_generator.AnchorGenerator object\n (note that currently we only support\n grid_anchor_generator.GridAnchorGenerator objects)\n first_stage_target_assigner: Target assigner to use for first stage of\n Faster R-CNN (RPN).\n first_stage_atrous_rate: A single integer indicating the atrous rate for\n the single convolution op which is applied to the `rpn_features_to_crop`\n tensor to obtain a tensor to be used for box prediction. Some feature\n extractors optionally allow for producing feature maps computed at\n denser resolutions. The atrous rate is used to compensate for the\n denser feature maps by using an effectively larger receptive field.\n (This should typically be set to 1).\n first_stage_box_predictor_arg_scope_fn: A function to construct tf-slim\n arg_scope for conv2d, separable_conv2d and fully_connected ops for the\n RPN box predictor.\n first_stage_box_predictor_kernel_size: Kernel size to use for the\n convolution op just prior to RPN box predictions.\n first_stage_box_predictor_depth: Output depth for the convolution op\n just prior to RPN box predictions.\n first_stage_minibatch_size: The \"batch size\" to use for computing the\n objectness and location loss of the region proposal network. This\n \"batch size\" refers to the number of anchors selected as contributing\n to the loss function for any given image within the image batch and is\n only called \"batch_size\" due to terminology from the Faster R-CNN paper.\n first_stage_sampler: Sampler to use for first stage loss (RPN loss).\n first_stage_nms_score_threshold: Score threshold for non max suppression\n for the Region Proposal Network (RPN). This value is expected to be in\n [0, 1] as it is applied directly after a softmax transformation. The\n recommended value for Faster R-CNN is 0.\n first_stage_nms_iou_threshold: The Intersection Over Union (IOU) threshold\n for performing Non-Max Suppression (NMS) on the boxes predicted by the\n Region Proposal Network (RPN).\n first_stage_max_proposals: Maximum number of boxes to retain after\n performing Non-Max Suppression (NMS) on the boxes predicted by the\n Region Proposal Network (RPN).\n first_stage_localization_loss_weight: A float\n first_stage_objectness_loss_weight: A float\n initial_crop_size: A single integer indicating the output size\n (width and height are set to be the same) of the initial bilinear\n interpolation based cropping during ROI pooling.\n maxpool_kernel_size: A single integer indicating the kernel size of the\n max pool op on the cropped feature map during ROI pooling.\n maxpool_stride: A single integer indicating the stride of the max pool\n op on the cropped feature map during ROI pooling.\n second_stage_target_assigner: Target assigner to use for second stage of\n Faster R-CNN. If the model is configured with multiple prediction heads,\n this target assigner is used to generate targets for all heads (with the\n correct `unmatched_class_label`).\n second_stage_mask_rcnn_box_predictor: Mask R-CNN box predictor to use for\n the second stage.\n second_stage_batch_size: The batch size used for computing the\n classification and refined location loss of the box classifier. This\n \"batch size\" refers to the number of proposals selected as contributing\n to the loss function for any given image within the image batch and is\n only called \"batch_size\" due to terminology from the Faster R-CNN paper.\n second_stage_sampler: Sampler to use for second stage loss (box\n classifier loss).\n second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression\n callable that takes `boxes`, `scores`, optional `clip_window` and\n optional (kwarg) `mask` inputs (with all other inputs already set)\n and returns a dictionary containing tensors with keys:\n `detection_boxes`, `detection_scores`, `detection_classes`,\n `num_detections`, and (optionally) `detection_masks`. See\n `post_processing.batch_multiclass_non_max_suppression` for the type and\n shape of these tensors.\n second_stage_score_conversion_fn: Callable elementwise nonlinearity\n (that takes tensors as inputs and returns tensors). This is usually\n used to convert logits to probabilities.\n second_stage_localization_loss_weight: A float indicating the scale factor\n for second stage localization loss.\n second_stage_classification_loss_weight: A float indicating the scale\n factor for second stage classification loss.\n second_stage_classification_loss: Classification loss used by the second\n stage classifier. Either losses.WeightedSigmoidClassificationLoss or\n losses.WeightedSoftmaxClassificationLoss.\n second_stage_mask_prediction_loss_weight: A float indicating the scale\n factor for second stage mask prediction loss. This is applicable only if\n second stage box predictor is configured to predict masks.\n hard_example_miner: A losses.HardExampleMiner object (can be None).\n parallel_iterations: (Optional) The number of iterations allowed to run\n in parallel for calls to tf.map_fn.\n add_summaries: boolean (default: True) controlling whether summary ops\n should be added to tensorflow graph.\n use_matmul_crop_and_resize: Force the use of matrix multiplication based\n crop and resize instead of standard tf.image.crop_and_resize while\n computing second stage input feature maps.\n clip_anchors_to_image: Normally, anchors generated for a given image size\n are pruned during training if they lie outside the image window. This\n option clips the anchors to be within the image instead of pruning.\n\n Raises:\n ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` at\n training time.\n ValueError: If first_stage_anchor_generator is not of type\n grid_anchor_generator.GridAnchorGenerator.\n \"\"\"\n # TODO(rathodv): add_summaries is currently unused. Respect that directive\n # in the future.\n print(\"Running FasterRCNN with overriden RPN\")\n super(FasterRCNNMetaArchOverrideRPN, self).__init__(num_classes=num_classes)\n\n # There is no RPN in this implementation!\n if (number_of_stages==1):\n raise ValueError('Number of stages = 1 is not allowed for overriden RPN proposals')\n\n if is_training and second_stage_batch_size > first_stage_max_proposals:\n raise ValueError('second_stage_batch_size should be no greater than '\n 'first_stage_max_proposals.')\n if not isinstance(first_stage_anchor_generator,\n grid_anchor_generator.GridAnchorGenerator):\n raise ValueError('first_stage_anchor_generator must be of type '\n 'grid_anchor_generator.GridAnchorGenerator.')\n\n # Michele: Proposals that override the RPN\n first_stage_proposals_path = os.path.join(first_stage_proposals_path, '')\n xml_root = data_util.read_xml_batch(first_stage_proposals_path)[0]['annot']\n _, self.proposals = data_util.xml_to_numpy(None, xml_root)\n\n print(\"Shape of overriding proposals\",self.proposals.shape)\n\n self._is_training = is_training\n self._image_resizer_fn = image_resizer_fn\n self._feature_extractor = feature_extractor\n self._number_of_stages = number_of_stages\n\n self._proposal_target_assigner = first_stage_target_assigner\n self._detector_target_assigner = second_stage_target_assigner\n # Both proposal and detector target assigners use the same box coder\n self._box_coder = self._proposal_target_assigner.box_coder\n\n # (First stage) Region proposal network parameters\n self._first_stage_anchor_generator = first_stage_anchor_generator\n self._first_stage_atrous_rate = first_stage_atrous_rate\n self._first_stage_box_predictor_arg_scope_fn = (\n first_stage_box_predictor_arg_scope_fn)\n self._first_stage_box_predictor_kernel_size = (\n first_stage_box_predictor_kernel_size)\n self._first_stage_box_predictor_depth = first_stage_box_predictor_depth\n self._first_stage_minibatch_size = first_stage_minibatch_size\n self._first_stage_sampler = first_stage_sampler\n self._first_stage_box_predictor = (\n box_predictor_builder.build_convolutional_box_predictor(\n is_training=self._is_training,\n num_classes=1,\n conv_hyperparams_fn=self._first_stage_box_predictor_arg_scope_fn,\n use_dropout=False,\n dropout_keep_prob=1.0,\n box_code_size=self._box_coder.code_size,\n kernel_size=1,\n num_layers_before_predictor=0,\n min_depth=0,\n max_depth=0))\n\n self._first_stage_nms_score_threshold = first_stage_nms_score_threshold\n self._first_stage_nms_iou_threshold = first_stage_nms_iou_threshold\n self._first_stage_max_proposals = first_stage_max_proposals\n\n self._first_stage_localization_loss = (\n losses.WeightedSmoothL1LocalizationLoss())\n self._first_stage_objectness_loss = (\n losses.WeightedSoftmaxClassificationLoss())\n self._first_stage_loc_loss_weight = first_stage_localization_loss_weight\n self._first_stage_obj_loss_weight = first_stage_objectness_loss_weight\n\n # Per-region cropping parameters\n self._initial_crop_size = initial_crop_size\n self._maxpool_kernel_size = maxpool_kernel_size\n self._maxpool_stride = maxpool_stride\n\n self._mask_rcnn_box_predictor = second_stage_mask_rcnn_box_predictor\n\n self._second_stage_batch_size = second_stage_batch_size\n self._second_stage_sampler = second_stage_sampler\n\n self._second_stage_nms_fn = second_stage_non_max_suppression_fn\n self._second_stage_score_conversion_fn = second_stage_score_conversion_fn\n\n self._second_stage_localization_loss = (\n losses.WeightedSmoothL1LocalizationLoss())\n self._second_stage_classification_loss = second_stage_classification_loss\n self._second_stage_mask_loss = (\n losses.WeightedSigmoidClassificationLoss())\n self._second_stage_loc_loss_weight = second_stage_localization_loss_weight\n self._second_stage_cls_loss_weight = second_stage_classification_loss_weight\n self._second_stage_mask_loss_weight = (\n second_stage_mask_prediction_loss_weight)\n self._use_matmul_crop_and_resize = use_matmul_crop_and_resize\n self._hard_example_miner = hard_example_miner\n self._parallel_iterations = parallel_iterations\n\n self.clip_anchors_to_image = clip_anchors_to_image\n\n if self._number_of_stages <= 0 or self._number_of_stages > 3:\n raise ValueError('Number of stages should be a value in {1, 2, 3}.')\n\n @property\n def first_stage_feature_extractor_scope(self):\n return 'FirstStageFeatureExtractor'\n\n @property\n def second_stage_feature_extractor_scope(self):\n return 'SecondStageFeatureExtractor'\n\n @property\n def first_stage_box_predictor_scope(self):\n return 'FirstStageBoxPredictor'\n\n @property\n def second_stage_box_predictor_scope(self):\n return 'SecondStageBoxPredictor'\n\n @property\n def max_num_proposals(self):\n \"\"\"Max number of proposals (to pad to) for each image in the input batch.\n\n At training time, this is set to be the `second_stage_batch_size` if hard\n example miner is not configured, else it is set to\n `first_stage_max_proposals`. At inference time, this is always set to\n `first_stage_max_proposals`.\n\n Returns:\n A positive integer.\n \"\"\"\n if self._is_training and not self._hard_example_miner:\n return self._second_stage_batch_size\n #return self._first_stage_max_proposals\n return self.proposals.shape[1]\n\n @property\n def anchors(self):\n if not self._anchors:\n raise RuntimeError('anchors have not been constructed yet!')\n if not isinstance(self._anchors, box_list.BoxList):\n raise RuntimeError('anchors should be a BoxList object, but is not.')\n return self._anchors\n\n def preprocess(self, inputs):\n \"\"\"Feature-extractor specific preprocessing.\n\n See base class.\n\n For Faster R-CNN, we perform image resizing in the base class --- each\n class subclassing FasterRCNNMetaArch is responsible for any additional\n preprocessing (e.g., scaling pixel values to be in [-1, 1]).\n\n Args:\n inputs: a [batch, height_in, width_in, channels] float tensor representing\n a batch of images with values between 0 and 255.0.\n\n Returns:\n preprocessed_inputs: a [batch, height_out, width_out, channels] float\n tensor representing a batch of images.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n Raises:\n ValueError: if inputs tensor does not have type tf.float32\n \"\"\"\n if inputs.dtype is not tf.float32:\n raise ValueError('`preprocess` expects a tf.float32 tensor')\n with tf.name_scope('Preprocessor'):\n outputs = shape_utils.static_or_dynamic_map_fn(\n self._image_resizer_fn,\n elems=inputs,\n dtype=[tf.float32, tf.int32],\n parallel_iterations=self._parallel_iterations)\n resized_inputs = outputs[0]\n true_image_shapes = outputs[1]\n return (self._feature_extractor.preprocess(resized_inputs),\n true_image_shapes)\n\n def _compute_clip_window(self, image_shapes):\n \"\"\"Computes clip window for non max suppression based on image shapes.\n\n This function assumes that the clip window's left top corner is at (0, 0).\n\n Args:\n image_shapes: A 2-D int32 tensor of shape [batch_size, 3] containing\n shapes of images in the batch. Each row represents [height, width,\n channels] of an image.\n\n Returns:\n A 2-D float32 tensor of shape [batch_size, 4] containing the clip window\n for each image in the form [ymin, xmin, ymax, xmax].\n \"\"\"\n clip_heights = image_shapes[:, 0]\n clip_widths = image_shapes[:, 1]\n clip_window = tf.to_float(tf.stack([tf.zeros_like(clip_heights),\n tf.zeros_like(clip_heights),\n clip_heights, clip_widths], axis=1))\n return clip_window\n\n def predict(self, preprocessed_inputs, true_image_shapes):\n \"\"\"Predicts unpostprocessed tensors from input tensor.\n\n This function takes an input batch of images and runs it through the\n forward pass of the network to yield \"raw\" un-postprocessed predictions.\n If `number_of_stages` is 1, this function only returns first stage\n RPN predictions (un-postprocessed). Otherwise it returns both\n first stage RPN predictions as well as second stage box classifier\n predictions.\n\n Other remarks:\n + Anchor pruning vs. clipping: following the recommendation of the Faster\n R-CNN paper, we prune anchors that venture outside the image window at\n training time and clip anchors to the image window at inference time.\n + Proposal padding: as described at the top of the file, proposals are\n padded to self._max_num_proposals and flattened so that proposals from all\n images within the input batch are arranged along the same batch dimension.\n\n Args:\n preprocessed_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n\n Returns:\n prediction_dict: a dictionary holding \"raw\" prediction tensors:\n 1) rpn_box_predictor_features: A 4-D float32 tensor with shape\n [batch_size, height, width, depth] to be used for predicting proposal\n boxes and corresponding objectness scores.\n 2) rpn_features_to_crop: A 4-D float32 tensor with shape\n [batch_size, height, width, depth] representing image features to crop\n using the proposal boxes predicted by the RPN.\n 3) image_shape: a 1-D tensor of shape [4] representing the input\n image shape.\n 4) rpn_box_encodings: 3-D float tensor of shape\n [batch_size, num_anchors, self._box_coder.code_size] containing\n predicted boxes.\n 5) rpn_objectness_predictions_with_background: 3-D float tensor of shape\n [batch_size, num_anchors, 2] containing class\n predictions (logits) for each of the anchors. Note that this\n tensor *includes* background class predictions (at class index 0).\n 6) anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors\n for the first stage RPN (in absolute coordinates). Note that\n `num_anchors` can differ depending on whether the model is created in\n training or inference mode.\n\n (and if number_of_stages > 1):\n 7) refined_box_encodings: a 3-D tensor with shape\n [total_num_proposals, num_classes, self._box_coder.code_size]\n representing predicted (final) refined box encodings, where\n total_num_proposals=batch_size*self._max_num_proposals. If using\n a shared box across classes the shape will instead be\n [total_num_proposals, 1, self._box_coder.code_size].\n 8) class_predictions_with_background: a 3-D tensor with shape\n [total_num_proposals, num_classes + 1] containing class\n predictions (logits) for each of the anchors, where\n total_num_proposals=batch_size*self._max_num_proposals.\n Note that this tensor *includes* background class predictions\n (at class index 0).\n 9) num_proposals: An int32 tensor of shape [batch_size] representing the\n number of proposals generated by the RPN. `num_proposals` allows us\n to keep track of which entries are to be treated as zero paddings and\n which are not since we always pad the number of proposals to be\n `self.max_num_proposals` for each image.\n 10) proposal_boxes: A float32 tensor of shape\n [batch_size, self.max_num_proposals, 4] representing\n decoded proposal bounding boxes in absolute coordinates.\n 11) mask_predictions: (optional) a 4-D tensor with shape\n [total_num_padded_proposals, num_classes, mask_height, mask_width]\n containing instance mask predictions.\n\n Raises:\n ValueError: If `predict` is called before `preprocess`.\n \"\"\"\n '''(rpn_box_predictor_features, rpn_features_to_crop, anchors_boxlist,\n image_shape) = self._extract_rpn_feature_maps(preprocessed_inputs)'''\n print(\"Predict running\")\n image_shape = tf.shape(preprocessed_inputs)\n rpn_features_to_crop, _ = self._feature_extractor.extract_proposal_features(\n preprocessed_inputs, scope=self.first_stage_feature_extractor_scope)\n #(rpn_box_encodings, rpn_objectness_predictions_with_background\n #) = self._predict_rpn_proposals(rpn_box_predictor_features)\n\n # The Faster R-CNN paper recommends pruning anchors that venture outside\n # the image window at training time and clipping at inference time.\n '''clip_window = tf.to_float(tf.stack([0, 0, image_shape[1], image_shape[2]]))\n if self._is_training:\n if self.clip_anchors_to_image:\n anchors_boxlist = box_list_ops.clip_to_window(\n anchors_boxlist, clip_window, filter_nonoverlapping=False)\n else:\n (rpn_box_encodings, rpn_objectness_predictions_with_background,\n anchors_boxlist) = self._remove_invalid_anchors_and_predictions(\n rpn_box_encodings, rpn_objectness_predictions_with_background,\n anchors_boxlist, clip_window)\n else:\n anchors_boxlist = box_list_ops.clip_to_window(\n anchors_boxlist, clip_window)\n\n self._anchors = anchors_boxlist'''\n prediction_dict = {\n #'rpn_box_predictor_features': rpn_box_predictor_features,\n 'rpn_features_to_crop': rpn_features_to_crop,\n 'image_shape': image_shape,\n #'rpn_box_encodings': rpn_box_encodings,\n #'rpn_objectness_predictions_with_background':\n #rpn_objectness_predictions_with_background,\n #'anchors': self._anchors.get()\n }\n\n if self._number_of_stages >= 2:\n '''prediction_dict.update(self._predict_second_stage(\n rpn_box_encodings,\n rpn_objectness_predictions_with_background,\n rpn_features_to_crop,\n self._anchors.get(), image_shape, true_image_shapes))'''\n prediction_dict.update(self._predict_second_stage(\n rpn_features_to_crop, image_shape, true_image_shapes))\n\n if self._number_of_stages == 3:\n prediction_dict = self._predict_third_stage(\n prediction_dict, true_image_shapes)\n\n return prediction_dict\n\n def _image_batch_shape_2d(self, image_batch_shape_1d):\n \"\"\"Takes a 1-D image batch shape tensor and converts it to a 2-D tensor.\n\n Example:\n If 1-D image batch shape tensor is [2, 300, 300, 3]. The corresponding 2-D\n image batch tensor would be [[300, 300, 3], [300, 300, 3]]\n\n Args:\n image_batch_shape_1d: 1-D tensor of the form [batch_size, height,\n width, channels].\n\n Returns:\n image_batch_shape_2d: 2-D tensor of shape [batch_size, 3] were each row is\n of the form [height, width, channels].\n \"\"\"\n return tf.tile(tf.expand_dims(image_batch_shape_1d[1:], 0),\n [image_batch_shape_1d[0], 1])\n\n '''def _predict_second_stage(self, rpn_box_encodings,\n rpn_objectness_predictions_with_background,\n rpn_features_to_crop,\n anchors,\n image_shape,\n true_image_shapes):\n \"\"\"Predicts the output tensors from second stage of Faster R-CNN.\n\n Args:\n rpn_box_encodings: 4-D float tensor of shape\n [batch_size, num_valid_anchors, self._box_coder.code_size] containing\n predicted boxes.\n rpn_objectness_predictions_with_background: 2-D float tensor of shape\n [batch_size, num_valid_anchors, 2] containing class\n predictions (logits) for each of the anchors. Note that this\n tensor *includes* background class predictions (at class index 0).\n rpn_features_to_crop: A 4-D float32 tensor with shape\n [batch_size, height, width, depth] representing image features to crop\n using the proposal boxes predicted by the RPN.\n anchors: 2-D float tensor of shape\n [num_anchors, self._box_coder.code_size].\n image_shape: A 1D int32 tensors of size [4] containing the image shape.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n\n Returns:\n prediction_dict: a dictionary holding \"raw\" prediction tensors:\n 1) refined_box_encodings: a 3-D tensor with shape\n [total_num_proposals, num_classes, self._box_coder.code_size]\n representing predicted (final) refined box encodings, where\n total_num_proposals=batch_size*self._max_num_proposals. If using a\n shared box across classes the shape will instead be\n [total_num_proposals, 1, self._box_coder.code_size].\n 2) class_predictions_with_background: a 3-D tensor with shape\n [total_num_proposals, num_classes + 1] containing class\n predictions (logits) for each of the anchors, where\n total_num_proposals=batch_size*self._max_num_proposals.\n Note that this tensor *includes* background class predictions\n (at class index 0).\n 3) num_proposals: An int32 tensor of shape [batch_size] representing the\n number of proposals generated by the RPN. `num_proposals` allows us\n to keep track of which entries are to be treated as zero paddings and\n which are not since we always pad the number of proposals to be\n `self.max_num_proposals` for each image.\n 4) proposal_boxes: A float32 tensor of shape\n [batch_size, self.max_num_proposals, 4] representing\n decoded proposal bounding boxes in absolute coordinates.\n 5) proposal_boxes_normalized: A float32 tensor of shape\n [batch_size, self.max_num_proposals, 4] representing decoded proposal\n bounding boxes in normalized coordinates. Can be used to override the\n boxes proposed by the RPN, thus enabling one to extract features and\n get box classification and prediction for externally selected areas\n of the image.\n 6) box_classifier_features: a 4-D float32 tensor representing the\n features for each proposal.\n \"\"\"\n image_shape_2d = self._image_batch_shape_2d(image_shape)\n proposal_boxes_normalized, _, num_proposals = self._postprocess_rpn(\n rpn_box_encodings, rpn_objectness_predictions_with_background,\n anchors, image_shape_2d, true_image_shapes)\n # Override RPN proposals\n # proposal_boxes_normalized = tf.Print(proposal_boxes_normalized, [], message=(\"original size= \" + str(proposal_boxes_normalized.shape[1])))\n # proposal_boxes_normalized = tf.constant(self.proposals, dtype='float32')\n\n flattened_proposal_feature_maps = (\n self._compute_second_stage_input_feature_maps(\n rpn_features_to_crop, proposal_boxes_normalized))\n\n box_classifier_features = (\n self._feature_extractor.extract_box_classifier_features(\n flattened_proposal_feature_maps,\n scope=self.second_stage_feature_extractor_scope))\n\n if self._mask_rcnn_box_predictor.is_keras_model:\n box_predictions = self._mask_rcnn_box_predictor(\n [box_classifier_features],\n prediction_stage=2)\n else:\n box_predictions = self._mask_rcnn_box_predictor.predict(\n [box_classifier_features],\n num_predictions_per_location=[1],\n scope=self.second_stage_box_predictor_scope,\n prediction_stage=2)\n\n refined_box_encodings = tf.squeeze(\n box_predictions[box_predictor.BOX_ENCODINGS],\n axis=1, name='all_refined_box_encodings')\n class_predictions_with_background = tf.squeeze(\n box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],\n axis=1, name='all_class_predictions_with_background')\n\n absolute_proposal_boxes = ops.normalized_to_image_coordinates(\n proposal_boxes_normalized, image_shape, self._parallel_iterations)\n\n prediction_dict = {\n 'refined_box_encodings': refined_box_encodings,\n 'class_predictions_with_background':\n class_predictions_with_background,\n 'num_proposals': num_proposals,\n 'proposal_boxes': absolute_proposal_boxes,\n 'box_classifier_features': box_classifier_features,\n 'proposal_boxes_normalized': proposal_boxes_normalized,\n }\n\n return prediction_dict'''\n def _predict_second_stage(self, rpn_features_to_crop,\n image_shape,\n true_image_shapes):\n \"\"\"Predicts the output tensors from second stage of Faster R-CNN.\n\n Args:\n rpn_features_to_crop: A 4-D float32 tensor with shape\n [batch_size, height, width, depth] representing image features to crop\n using the proposal boxes predicted by the RPN.\n image_shape: A 1D int32 tensors of size [4] containing the image shape.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n\n Returns:\n prediction_dict: a dictionary holding \"raw\" prediction tensors:\n 1) refined_box_encodings: a 3-D tensor with shape\n [total_num_proposals, num_classes, self._box_coder.code_size]\n representing predicted (final) refined box encodings, where\n total_num_proposals=batch_size*self._max_num_proposals. If using a\n shared box across classes the shape will instead be\n [total_num_proposals, 1, self._box_coder.code_size].\n 2) class_predictions_with_background: a 3-D tensor with shape\n [total_num_proposals, num_classes + 1] containing class\n predictions (logits) for each of the anchors, where\n total_num_proposals=batch_size*self._max_num_proposals.\n Note that this tensor *includes* background class predictions\n (at class index 0).\n 3) num_proposals: An int32 tensor of shape [batch_size] representing the\n number of proposals generated by the RPN. `num_proposals` allows us\n to keep track of which entries are to be treated as zero paddings and\n which are not since we always pad the number of proposals to be\n `self.max_num_proposals` for each image.\n 4) proposal_boxes: A float32 tensor of shape\n [batch_size, self.max_num_proposals, 4] representing\n decoded proposal bounding boxes in absolute coordinates.\n 5) proposal_boxes_normalized: A float32 tensor of shape\n [batch_size, self.max_num_proposals, 4] representing decoded proposal\n bounding boxes in normalized coordinates. Can be used to override the\n boxes proposed by the RPN, thus enabling one to extract features and\n get box classification and prediction for externally selected areas\n of the image.\n 6) box_classifier_features: a 4-D float32 tensor representing the\n features for each proposal.\n \"\"\"\n image_shape_2d = self._image_batch_shape_2d(image_shape) # same as true shape\n '''proposal_boxes_normalized, _, num_proposals = self._postprocess_rpn(\n rpn_box_encodings, rpn_objectness_predictions_with_background,\n anchors, image_shape_2d, true_image_shapes)'''\n # Override RPN proposals\n # proposal_boxes_normalized = tf.Print(proposal_boxes_normalized, [], message=(\"original size= \" + str(proposal_boxes_normalized.shape[1])))\n # normalize proposal boxes\n\n def normalize_boxes(args):\n proposal_boxes_per_image = args[0]\n image_shape = args[1]\n normalized_boxes_per_image = box_list_ops.to_normalized_coordinates(\n box_list.BoxList(proposal_boxes_per_image), image_shape[0],\n image_shape[1], check_range=False).get()\n return normalized_boxes_per_image\n def to_absolute_boxes(args):\n proposal_boxes_per_image = args[0]\n image_shape = args[1]\n normalized_boxes_per_image = box_list_ops.to_absolute_coordinates(\n box_list.BoxList(proposal_boxes_per_image), image_shape[0],\n image_shape[1], check_range=False).get()\n return normalized_boxes_per_image \n \n proposal_boxes = tf.constant(self.proposals, dtype='float32')\n proposal_boxes = shape_utils.static_or_dynamic_map_fn(\n to_absolute_boxes, elems=[proposal_boxes, true_image_shapes], dtype=tf.float32)\n\n\n num_proposals = tf.constant([proposal_boxes.shape[1]], dtype='int32')\n # single_image_boxlist = box_list.BoxList(proposals_absolute)\n # proposal_boxes = self._sample_box_classifier_minibatch_single_image(single_image_boxlist, num_proposals, groundtruth_boxlists[0], \n # groundtruth_classes_with_background_list[0], groundtruth_weights_list[0]).get()\n # Minibatch sampling during training\n if self._is_training:\n proposal_boxes = tf.stop_gradient(proposal_boxes)\n if not self._hard_example_miner:\n\n placeholder_scores = tf.zeros((1, proposal_boxes.shape[1], 2))\n #proposal_boxes = tf.Print(proposal_boxes, [proposal_boxes], message=\"1: \")\n\n (groundtruth_boxlists, groundtruth_classes_with_background_list, _,\n groundtruth_weights_list\n ) = self._format_groundtruth_data(true_image_shapes)\n\n (proposal_boxes, _, num_proposals) = self._sample_box_classifier_batch(proposal_boxes, placeholder_scores, num_proposals, \n groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list, true_image_shapes[0])\n #proposal_boxes = tf.Print(proposal_boxes, [proposal_boxes], message=\"2: \")\n\n #proposal_boxes = tf.Print(proposal_boxes, [], message=(\"Shape of pboxes \" + str(proposal_boxes.shape[1])))\n #num_proposals = tf.Print(num_proposals, [num_proposals])\n \n proposal_boxes_normalized = shape_utils.static_or_dynamic_map_fn(\n normalize_boxes, elems=[proposal_boxes, true_image_shapes], dtype=tf.float32)\n #proposal_boxes_normalized = tf.Print(proposal_boxes_normalized, [proposal_boxes_normalized], message=\"3: \")\n\n #proposal_boxes_normalized = tf.Print(proposal_boxes_normalized, [tf.shape(proposal_boxes_normalized)], message=(\"Shape of pboxes \"))\n\n\n #proposal_boxes_normalized = tf.constant(self.proposals[:, 0:64, :], dtype='float32')\n #proposal_boxes_normalized = tf.Print(proposal_boxes_normalized, [], message=(\"Shape of minibatch \" + str(proposal_boxes_normalized.shape[1])))\n\n flattened_proposal_feature_maps = (\n self._compute_second_stage_input_feature_maps(\n rpn_features_to_crop, proposal_boxes_normalized))\n #flattened_proposal_feature_maps = tf.stop_gradient(flattened_proposal_feature_maps)\n #flattened_proposal_feature_maps = tf.Print(flattened_proposal_feature_maps, [], message=(\"Cropped props : \" + str(flattened_proposal_feature_maps.shape)))\n\n box_classifier_features = (\n self._feature_extractor.extract_box_classifier_features(\n flattened_proposal_feature_maps,\n scope=self.second_stage_feature_extractor_scope))\n\n if self._mask_rcnn_box_predictor.is_keras_model:\n box_predictions = self._mask_rcnn_box_predictor(\n [box_classifier_features],\n prediction_stage=2)\n else:\n box_predictions = self._mask_rcnn_box_predictor.predict(\n [box_classifier_features],\n num_predictions_per_location=[1],\n scope=self.second_stage_box_predictor_scope,\n prediction_stage=2)\n\n refined_box_encodings = tf.squeeze(\n box_predictions[box_predictor.BOX_ENCODINGS],\n axis=1, name='all_refined_box_encodings')\n class_predictions_with_background = tf.squeeze(\n box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],\n axis=1, name='all_class_predictions_with_background')\n\n absolute_proposal_boxes = ops.normalized_to_image_coordinates(\n proposal_boxes_normalized, image_shape, self._parallel_iterations)\n\n prediction_dict = {\n 'refined_box_encodings': refined_box_encodings,\n 'class_predictions_with_background':\n class_predictions_with_background,\n 'num_proposals': num_proposals,\n 'proposal_boxes': absolute_proposal_boxes,\n 'box_classifier_features': box_classifier_features,\n 'proposal_boxes_normalized': proposal_boxes_normalized,\n }\n\n return prediction_dict\n\n def _predict_third_stage(self, prediction_dict, image_shapes):\n \"\"\"Predicts non-box, non-class outputs using refined detections.\n\n For training, masks as predicted directly on the box_classifier_features,\n which are region-features from the initial anchor boxes.\n For inference, this happens after calling the post-processing stage, such\n that masks are only calculated for the top scored boxes.\n\n Args:\n prediction_dict: a dictionary holding \"raw\" prediction tensors:\n 1) refined_box_encodings: a 3-D tensor with shape\n [total_num_proposals, num_classes, self._box_coder.code_size]\n representing predicted (final) refined box encodings, where\n total_num_proposals=batch_size*self._max_num_proposals. If using a\n shared box across classes the shape will instead be\n [total_num_proposals, 1, self._box_coder.code_size].\n 2) class_predictions_with_background: a 3-D tensor with shape\n [total_num_proposals, num_classes + 1] containing class\n predictions (logits) for each of the anchors, where\n total_num_proposals=batch_size*self._max_num_proposals.\n Note that this tensor *includes* background class predictions\n (at class index 0).\n 3) num_proposals: An int32 tensor of shape [batch_size] representing the\n number of proposals generated by the RPN. `num_proposals` allows us\n to keep track of which entries are to be treated as zero paddings and\n which are not since we always pad the number of proposals to be\n `self.max_num_proposals` for each image.\n 4) proposal_boxes: A float32 tensor of shape\n [batch_size, self.max_num_proposals, 4] representing\n decoded proposal bounding boxes in absolute coordinates.\n 5) box_classifier_features: a 4-D float32 tensor representing the\n features for each proposal.\n image_shapes: A 2-D int32 tensors of shape [batch_size, 3] containing\n shapes of images in the batch.\n\n Returns:\n prediction_dict: a dictionary that in addition to the input predictions\n does hold the following predictions as well:\n 1) mask_predictions: a 4-D tensor with shape\n [batch_size, max_detection, mask_height, mask_width] containing\n instance mask predictions.\n \"\"\"\n if self._is_training:\n curr_box_classifier_features = prediction_dict['box_classifier_features']\n detection_classes = prediction_dict['class_predictions_with_background']\n if self._mask_rcnn_box_predictor.is_keras_model:\n mask_predictions = self._mask_rcnn_box_predictor(\n [curr_box_classifier_features],\n prediction_stage=3)\n else:\n mask_predictions = self._mask_rcnn_box_predictor.predict(\n [curr_box_classifier_features],\n num_predictions_per_location=[1],\n scope=self.second_stage_box_predictor_scope,\n prediction_stage=3)\n prediction_dict['mask_predictions'] = tf.squeeze(mask_predictions[\n box_predictor.MASK_PREDICTIONS], axis=1)\n else:\n detections_dict = self._postprocess_box_classifier(\n prediction_dict['refined_box_encodings'],\n prediction_dict['class_predictions_with_background'],\n prediction_dict['proposal_boxes'],\n prediction_dict['num_proposals'],\n image_shapes)\n prediction_dict.update(detections_dict)\n detection_boxes = detections_dict[\n fields.DetectionResultFields.detection_boxes]\n detection_classes = detections_dict[\n fields.DetectionResultFields.detection_classes]\n rpn_features_to_crop = prediction_dict['rpn_features_to_crop']\n batch_size = tf.shape(detection_boxes)[0]\n max_detection = tf.shape(detection_boxes)[1]\n flattened_detected_feature_maps = (\n self._compute_second_stage_input_feature_maps(\n rpn_features_to_crop, detection_boxes))\n curr_box_classifier_features = (\n self._feature_extractor.extract_box_classifier_features(\n flattened_detected_feature_maps,\n scope=self.second_stage_feature_extractor_scope))\n\n if self._mask_rcnn_box_predictor.is_keras_model:\n mask_predictions = self._mask_rcnn_box_predictor(\n [curr_box_classifier_features],\n prediction_stage=3)\n else:\n mask_predictions = self._mask_rcnn_box_predictor.predict(\n [curr_box_classifier_features],\n num_predictions_per_location=[1],\n scope=self.second_stage_box_predictor_scope,\n prediction_stage=3)\n\n detection_masks = tf.squeeze(mask_predictions[\n box_predictor.MASK_PREDICTIONS], axis=1)\n\n _, num_classes, mask_height, mask_width = (\n detection_masks.get_shape().as_list())\n _, max_detection = detection_classes.get_shape().as_list()\n if num_classes > 1:\n detection_masks = self._gather_instance_masks(\n detection_masks, detection_classes)\n\n prediction_dict[fields.DetectionResultFields.detection_masks] = (\n tf.reshape(detection_masks,\n [batch_size, max_detection, mask_height, mask_width]))\n\n return prediction_dict\n\n def _gather_instance_masks(self, instance_masks, classes):\n \"\"\"Gathers the masks that correspond to classes.\n\n Args:\n instance_masks: A 4-D float32 tensor with shape\n [K, num_classes, mask_height, mask_width].\n classes: A 2-D int32 tensor with shape [batch_size, max_detection].\n\n Returns:\n masks: a 3-D float32 tensor with shape [K, mask_height, mask_width].\n \"\"\"\n _, num_classes, height, width = instance_masks.get_shape().as_list()\n k = tf.shape(instance_masks)[0]\n instance_masks = tf.reshape(instance_masks, [-1, height, width])\n classes = tf.to_int32(tf.reshape(classes, [-1]))\n gather_idx = tf.range(k) * num_classes + classes\n return tf.gather(instance_masks, gather_idx)\n\n def _extract_rpn_feature_maps(self, preprocessed_inputs):\n \"\"\"Extracts RPN features.\n\n This function extracts two feature maps: a feature map to be directly\n fed to a box predictor (to predict location and objectness scores for\n proposals) and a feature map from which to crop regions which will then\n be sent to the second stage box classifier.\n\n Args:\n preprocessed_inputs: a [batch, height, width, channels] image tensor.\n\n Returns:\n rpn_box_predictor_features: A 4-D float32 tensor with shape\n [batch, height, width, depth] to be used for predicting proposal boxes\n and corresponding objectness scores.\n rpn_features_to_crop: A 4-D float32 tensor with shape\n [batch, height, width, depth] representing image features to crop using\n the proposals boxes.\n anchors: A BoxList representing anchors (for the RPN) in\n absolute coordinates.\n image_shape: A 1-D tensor representing the input image shape.\n \"\"\"\n image_shape = tf.shape(preprocessed_inputs)\n rpn_features_to_crop, _ = self._feature_extractor.extract_proposal_features(\n preprocessed_inputs, scope=self.first_stage_feature_extractor_scope)\n\n feature_map_shape = tf.shape(rpn_features_to_crop)\n anchors = box_list_ops.concatenate(\n self._first_stage_anchor_generator.generate([(feature_map_shape[1],\n feature_map_shape[2])]))\n\n with slim.arg_scope(self._first_stage_box_predictor_arg_scope_fn()):\n kernel_size = self._first_stage_box_predictor_kernel_size\n rpn_box_predictor_features = slim.conv2d(\n rpn_features_to_crop,\n self._first_stage_box_predictor_depth,\n kernel_size=[kernel_size, kernel_size],\n rate=self._first_stage_atrous_rate,\n activation_fn=tf.nn.relu6)\n return (rpn_box_predictor_features, rpn_features_to_crop,\n anchors, image_shape)\n\n def _predict_rpn_proposals(self, rpn_box_predictor_features):\n \"\"\"Adds box predictors to RPN feature map to predict proposals.\n\n Note resulting tensors will not have been postprocessed.\n\n Args:\n rpn_box_predictor_features: A 4-D float32 tensor with shape\n [batch, height, width, depth] to be used for predicting proposal boxes\n and corresponding objectness scores.\n\n Returns:\n box_encodings: 3-D float tensor of shape\n [batch_size, num_anchors, self._box_coder.code_size] containing\n predicted boxes.\n objectness_predictions_with_background: 3-D float tensor of shape\n [batch_size, num_anchors, 2] containing class\n predictions (logits) for each of the anchors. Note that this\n tensor *includes* background class predictions (at class index 0).\n\n Raises:\n RuntimeError: if the anchor generator generates anchors corresponding to\n multiple feature maps. We currently assume that a single feature map\n is generated for the RPN.\n \"\"\"\n num_anchors_per_location = (\n self._first_stage_anchor_generator.num_anchors_per_location())\n if len(num_anchors_per_location) != 1:\n raise RuntimeError('anchor_generator is expected to generate anchors '\n 'corresponding to a single feature map.')\n if self._first_stage_box_predictor.is_keras_model:\n box_predictions = self._first_stage_box_predictor(\n [rpn_box_predictor_features])\n else:\n box_predictions = self._first_stage_box_predictor.predict(\n [rpn_box_predictor_features],\n num_anchors_per_location,\n scope=self.first_stage_box_predictor_scope)\n\n box_encodings = tf.concat(\n box_predictions[box_predictor.BOX_ENCODINGS], axis=1)\n objectness_predictions_with_background = tf.concat(\n box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],\n axis=1)\n return (tf.squeeze(box_encodings, axis=2),\n objectness_predictions_with_background)\n\n def _remove_invalid_anchors_and_predictions(\n self,\n box_encodings,\n objectness_predictions_with_background,\n anchors_boxlist,\n clip_window):\n \"\"\"Removes anchors that (partially) fall outside an image.\n\n Also removes associated box encodings and objectness predictions.\n\n Args:\n box_encodings: 3-D float tensor of shape\n [batch_size, num_anchors, self._box_coder.code_size] containing\n predicted boxes.\n objectness_predictions_with_background: 3-D float tensor of shape\n [batch_size, num_anchors, 2] containing class\n predictions (logits) for each of the anchors. Note that this\n tensor *includes* background class predictions (at class index 0).\n anchors_boxlist: A BoxList representing num_anchors anchors (for the RPN)\n in absolute coordinates.\n clip_window: a 1-D tensor representing the [ymin, xmin, ymax, xmax]\n extent of the window to clip/prune to.\n\n Returns:\n box_encodings: 4-D float tensor of shape\n [batch_size, num_valid_anchors, self._box_coder.code_size] containing\n predicted boxes, where num_valid_anchors <= num_anchors\n objectness_predictions_with_background: 2-D float tensor of shape\n [batch_size, num_valid_anchors, 2] containing class\n predictions (logits) for each of the anchors, where\n num_valid_anchors <= num_anchors. Note that this\n tensor *includes* background class predictions (at class index 0).\n anchors: A BoxList representing num_valid_anchors anchors (for the RPN) in\n absolute coordinates.\n \"\"\"\n pruned_anchors_boxlist, keep_indices = box_list_ops.prune_outside_window(\n anchors_boxlist, clip_window)\n def _batch_gather_kept_indices(predictions_tensor):\n return shape_utils.static_or_dynamic_map_fn(\n partial(tf.gather, indices=keep_indices),\n elems=predictions_tensor,\n dtype=tf.float32,\n parallel_iterations=self._parallel_iterations,\n back_prop=True)\n return (_batch_gather_kept_indices(box_encodings),\n _batch_gather_kept_indices(objectness_predictions_with_background),\n pruned_anchors_boxlist)\n\n def _flatten_first_two_dimensions(self, inputs):\n \"\"\"Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor.\n\n Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape\n [A * B, ..., depth].\n\n Args:\n inputs: A float tensor with shape [A, B, ..., depth]. Note that the first\n two and last dimensions must be statically defined.\n Returns:\n A float tensor with shape [A * B, ..., depth] (where the first and last\n dimension are statically defined.\n \"\"\"\n combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs)\n flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] +\n combined_shape[2:])\n return tf.reshape(inputs, flattened_shape)\n\n def postprocess(self, prediction_dict, true_image_shapes):\n \"\"\"Convert prediction tensors to final detections.\n\n This function converts raw predictions tensors to final detection results.\n See base class for output format conventions. Note also that by default,\n scores are to be interpreted as logits, but if a score_converter is used,\n then scores are remapped (and may thus have a different interpretation).\n\n If number_of_stages=1, the returned results represent proposals from the\n first stage RPN and are padded to have self.max_num_proposals for each\n image; otherwise, the results can be interpreted as multiclass detections\n from the full two-stage model and are padded to self._max_detections.\n\n Args:\n prediction_dict: a dictionary holding prediction tensors (see the\n documentation for the predict method. If number_of_stages=1, we\n expect prediction_dict to contain `rpn_box_encodings`,\n `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,\n and `anchors` fields. Otherwise we expect prediction_dict to\n additionally contain `refined_box_encodings`,\n `class_predictions_with_background`, `num_proposals`,\n `proposal_boxes` and, optionally, `mask_predictions` fields.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n\n Returns:\n detections: a dictionary containing the following fields\n detection_boxes: [batch, max_detection, 4]\n detection_scores: [batch, max_detections]\n detection_classes: [batch, max_detections]\n (this entry is only created if rpn_mode=False)\n num_detections: [batch]\n\n Raises:\n ValueError: If `predict` is called before `preprocess`.\n \"\"\"\n\n with tf.name_scope('FirstStagePostprocessor'):\n if self._number_of_stages == 1: \n # Michele's addition\n\n proposal_boxes, proposal_scores, num_proposals = self._postprocess_rpn(\n prediction_dict['rpn_box_encodings'],\n prediction_dict['rpn_objectness_predictions_with_background'],\n prediction_dict['anchors'],\n true_image_shapes,\n true_image_shapes)\n return {\n fields.DetectionResultFields.detection_boxes: proposal_boxes,\n fields.DetectionResultFields.detection_scores: proposal_scores,\n fields.DetectionResultFields.num_detections:\n tf.to_float(num_proposals),\n }\n\n # TODO(jrru): Remove mask_predictions from _post_process_box_classifier.\n with tf.name_scope('SecondStagePostprocessor'):\n if (self._number_of_stages == 2 or\n (self._number_of_stages == 3 and self._is_training)):\n mask_predictions = prediction_dict.get(box_predictor.MASK_PREDICTIONS)\n detections_dict = self._postprocess_box_classifier(\n prediction_dict['refined_box_encodings'],\n prediction_dict['class_predictions_with_background'],\n prediction_dict['proposal_boxes'],\n prediction_dict['num_proposals'],\n true_image_shapes,\n mask_predictions=mask_predictions)\n return detections_dict\n\n if self._number_of_stages == 3:\n # Post processing is already performed in 3rd stage. We need to transfer\n # postprocessed tensors from `prediction_dict` to `detections_dict`.\n detections_dict = {}\n for key in prediction_dict:\n if key == fields.DetectionResultFields.detection_masks:\n detections_dict[key] = tf.sigmoid(prediction_dict[key])\n elif 'detection' in key:\n detections_dict[key] = prediction_dict[key]\n return detections_dict\n\n def _postprocess_rpn(self,\n rpn_box_encodings_batch,\n rpn_objectness_predictions_with_background_batch,\n anchors,\n image_shapes,\n true_image_shapes):\n \"\"\"Converts first stage prediction tensors from the RPN to proposals.\n\n This function decodes the raw RPN predictions, runs non-max suppression\n on the result.\n\n Note that the behavior of this function is slightly modified during\n training --- specifically, we stop the gradient from passing through the\n proposal boxes and we only return a balanced sampled subset of proposals\n with size `second_stage_batch_size`.\n\n Args:\n rpn_box_encodings_batch: A 3-D float32 tensor of shape\n [batch_size, num_anchors, self._box_coder.code_size] containing\n predicted proposal box encodings.\n rpn_objectness_predictions_with_background_batch: A 3-D float tensor of\n shape [batch_size, num_anchors, 2] containing objectness predictions\n (logits) for each of the anchors with 0 corresponding to background\n and 1 corresponding to object.\n anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors\n for the first stage RPN. Note that `num_anchors` can differ depending\n on whether the model is created in training or inference mode.\n image_shapes: A 2-D tensor of shape [batch, 3] containing the shapes of\n images in the batch.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n\n Returns:\n proposal_boxes: A float tensor with shape\n [batch_size, max_num_proposals, 4] representing the (potentially zero\n padded) proposal boxes for all images in the batch. These boxes are\n represented as normalized coordinates.\n proposal_scores: A float tensor with shape\n [batch_size, max_num_proposals] representing the (potentially zero\n padded) proposal objectness scores for all images in the batch.\n num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]\n representing the number of proposals predicted for each image in\n the batch.\n \"\"\"\n rpn_box_encodings_batch = tf.expand_dims(rpn_box_encodings_batch, axis=2)\n rpn_encodings_shape = shape_utils.combined_static_and_dynamic_shape(\n rpn_box_encodings_batch)\n tiled_anchor_boxes = tf.tile(\n tf.expand_dims(anchors, 0), [rpn_encodings_shape[0], 1, 1])\n proposal_boxes = self._batch_decode_boxes(rpn_box_encodings_batch,\n tiled_anchor_boxes)\n proposal_boxes = tf.squeeze(proposal_boxes, axis=2)\n rpn_objectness_softmax_without_background = tf.nn.softmax(\n rpn_objectness_predictions_with_background_batch)[:, :, 1]\n clip_window = self._compute_clip_window(image_shapes)\n (proposal_boxes, proposal_scores, _, _, _,\n num_proposals) = post_processing.batch_multiclass_non_max_suppression(\n tf.expand_dims(proposal_boxes, axis=2),\n tf.expand_dims(rpn_objectness_softmax_without_background,\n axis=2),\n self._first_stage_nms_score_threshold,\n self._first_stage_nms_iou_threshold,\n self._first_stage_max_proposals,\n self._first_stage_max_proposals,\n clip_window=clip_window)\n if self._is_training:\n proposal_boxes = tf.stop_gradient(proposal_boxes)\n if not self._hard_example_miner:\n (groundtruth_boxlists, groundtruth_classes_with_background_list, _,\n groundtruth_weights_list\n ) = self._format_groundtruth_data(true_image_shapes)\n (proposal_boxes, proposal_scores,\n num_proposals) = self._sample_box_classifier_batch(\n proposal_boxes, proposal_scores, num_proposals,\n groundtruth_boxlists, groundtruth_classes_with_background_list,\n groundtruth_weights_list)\n # normalize proposal boxes\n def normalize_boxes(args):\n proposal_boxes_per_image = args[0]\n image_shape = args[1]\n normalized_boxes_per_image = box_list_ops.to_normalized_coordinates(\n box_list.BoxList(proposal_boxes_per_image), image_shape[0],\n image_shape[1], check_range=False).get()\n return normalized_boxes_per_image\n normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn(\n normalize_boxes, elems=[proposal_boxes, image_shapes], dtype=tf.float32)\n return normalized_proposal_boxes, proposal_scores, num_proposals\n\n def _sample_box_classifier_batch(\n self,\n proposal_boxes,\n proposal_scores,\n num_proposals,\n groundtruth_boxlists,\n groundtruth_classes_with_background_list,\n groundtruth_weights_list,\n debug=None):\n \"\"\"Samples a minibatch for second stage.\n\n Args:\n proposal_boxes: A float tensor with shape\n [batch_size, num_proposals, 4] representing the (potentially zero\n padded) proposal boxes for all images in the batch. These boxes are\n represented in absolute coordinates.\n proposal_scores: A float tensor with shape\n [batch_size, num_proposals] representing the (potentially zero\n padded) proposal objectness scores for all images in the batch.\n num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]\n representing the number of proposals predicted for each image in\n the batch.\n groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates\n of the groundtruth boxes.\n groundtruth_classes_with_background_list: A list of 2-D one-hot\n (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the\n class targets with the 0th index assumed to map to the background class.\n groundtruth_weights_list: A list of 1-D tensors of shape [num_boxes]\n indicating the weight associated with the groundtruth boxes.\n\n Returns:\n proposal_boxes: A float tensor with shape\n [batch_size, second_stage_batch_size, 4] representing the (potentially\n zero padded) proposal boxes for all images in the batch. These boxes\n are represented in absolute coordinates.\n proposal_scores: A float tensor with shape\n [batch_size, second_stage_batch_size] representing the (potentially zero\n padded) proposal objectness scores for all images in the batch.\n num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]\n representing the number of proposals predicted for each image in\n the batch.\n \"\"\"\n single_image_proposal_box_sample = []\n single_image_proposal_score_sample = []\n single_image_num_proposals_sample = []\n for (single_image_proposal_boxes,\n single_image_proposal_scores,\n single_image_num_proposals,\n single_image_groundtruth_boxlist,\n single_image_groundtruth_classes_with_background,\n single_image_groundtruth_weights) in zip(\n tf.unstack(proposal_boxes),\n tf.unstack(proposal_scores),\n tf.unstack(num_proposals),\n groundtruth_boxlists,\n groundtruth_classes_with_background_list,\n groundtruth_weights_list):\n single_image_boxlist = box_list.BoxList(single_image_proposal_boxes)\n single_image_boxlist.add_field(fields.BoxListFields.scores,\n single_image_proposal_scores)\n sampled_boxlist = self._sample_box_classifier_minibatch_single_image(\n single_image_boxlist,\n single_image_num_proposals,\n single_image_groundtruth_boxlist,\n single_image_groundtruth_classes_with_background,\n single_image_groundtruth_weights,\n debug)\n # sampled_boxlist.set(tf.Print(sampled_boxlist.get(), [sampled_boxlist.num_boxes()], message=\"sample size \"))\n\n sampled_padded_boxlist = box_list_ops.pad_or_clip_box_list(\n sampled_boxlist,\n num_boxes=self._second_stage_batch_size)\n single_image_num_proposals_sample.append(tf.minimum(\n sampled_boxlist.num_boxes(),\n self._second_stage_batch_size))\n bb = sampled_padded_boxlist.get()\n #bb = tf.Print(bb, [single_image_groundtruth_boxlist.num_boxes()], message=(\"After padding and num of GT\" + str(bb.shape)))\n single_image_proposal_box_sample.append(bb)\n single_image_proposal_score_sample.append(\n sampled_padded_boxlist.get_field(fields.BoxListFields.scores))\n return (tf.stack(single_image_proposal_box_sample),\n tf.stack(single_image_proposal_score_sample),\n tf.stack(single_image_num_proposals_sample))\n\n def _format_groundtruth_data(self, true_image_shapes, stage='detection'):\n \"\"\"Helper function for preparing groundtruth data for target assignment.\n\n In order to be consistent with the model.DetectionModel interface,\n groundtruth boxes are specified in normalized coordinates and classes are\n specified as label indices with no assumed background category. To prepare\n for target assignment, we:\n 1) convert boxes to absolute coordinates,\n 2) add a background class at class index 0\n 3) groundtruth instance masks, if available, are resized to match\n image_shape.\n\n Args:\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n\n Returns:\n groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates\n of the groundtruth boxes.\n groundtruth_classes_with_background_list: A list of 2-D one-hot\n (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the\n class targets with the 0th index assumed to map to the background class.\n groundtruth_masks_list: If present, a list of 3-D tf.float32 tensors of\n shape [num_boxes, image_height, image_width] containing instance masks.\n This is set to None if no masks exist in the provided groundtruth.\n \"\"\"\n groundtruth_boxlists = [\n box_list_ops.to_absolute_coordinates(\n box_list.BoxList(boxes), true_image_shapes[i, 0],\n true_image_shapes[i, 1])\n for i, boxes in enumerate(\n self.groundtruth_lists(fields.BoxListFields.boxes))\n ]\n groundtruth_classes_with_background_list = [\n tf.to_float(\n tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT'))\n for one_hot_encoding in self.groundtruth_lists(\n fields.BoxListFields.classes)]\n\n groundtruth_masks_list = self._groundtruth_lists.get(\n fields.BoxListFields.masks)\n if groundtruth_masks_list is not None:\n resized_masks_list = []\n for mask in groundtruth_masks_list:\n _, resized_mask, _ = self._image_resizer_fn(\n # Reuse the given `image_resizer_fn` to resize groundtruth masks.\n # `mask` tensor for an image is of the shape [num_masks,\n # image_height, image_width]. Below we create a dummy image of the\n # the shape [image_height, image_width, 1] to use with\n # `image_resizer_fn`.\n image=tf.zeros(tf.stack([tf.shape(mask)[1], tf.shape(mask)[2], 1])),\n masks=mask)\n resized_masks_list.append(resized_mask)\n\n groundtruth_masks_list = resized_masks_list\n if self.groundtruth_has_field(fields.BoxListFields.weights):\n groundtruth_weights_list = self.groundtruth_lists(\n fields.BoxListFields.weights)\n else:\n # Set weights for all batch elements equally to 1.0\n groundtruth_weights_list = []\n for groundtruth_classes in groundtruth_classes_with_background_list:\n num_gt = tf.shape(groundtruth_classes)[0]\n groundtruth_weights = tf.ones(num_gt)\n groundtruth_weights_list.append(groundtruth_weights)\n\n return (groundtruth_boxlists, groundtruth_classes_with_background_list,\n groundtruth_masks_list, groundtruth_weights_list)\n\n def _sample_box_classifier_minibatch_single_image(\n self, proposal_boxlist, num_valid_proposals, groundtruth_boxlist,\n groundtruth_classes_with_background, groundtruth_weights, debug=None):\n \"\"\"Samples a mini-batch of proposals to be sent to the box classifier.\n\n Helper function for self._postprocess_rpn.\n\n Args:\n proposal_boxlist: A BoxList containing K proposal boxes in absolute\n coordinates.\n num_valid_proposals: Number of valid proposals in the proposal boxlist.\n groundtruth_boxlist: A Boxlist containing N groundtruth object boxes in\n absolute coordinates.\n groundtruth_classes_with_background: A tensor with shape\n `[N, self.num_classes + 1]` representing groundtruth classes. The\n classes are assumed to be k-hot encoded, and include background as the\n zero-th class.\n groundtruth_weights: Weights attached to the groundtruth_boxes.\n debug: contains (optional) true_image_shape\n\n Returns:\n a BoxList contained sampled proposals.\n \"\"\"\n (cls_targets, cls_weights, _, _, _) = self._detector_target_assigner.assign(\n proposal_boxlist,\n groundtruth_boxlist,\n groundtruth_classes_with_background,\n unmatched_class_label=tf.constant(\n [1] + self._num_classes * [0], dtype=tf.float32),\n groundtruth_weights=groundtruth_weights)\n # Selects all boxes as candidates if none of them is selected according\n # to cls_weights. This could happen as boxes within certain IOU ranges\n # are ignored. If triggered, the selected boxes will still be ignored\n # during loss computation.\n positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0)\n # Debug target mapping\n #positive_indicator = tf.Print(positive_indicator, [positive_indicator, box_list_ops.to_normalized_coordinates(groundtruth_boxlist, debug[0], debug[1]).get()], summarize=999999)\n\n\n valid_indicator = tf.logical_and(\n tf.range(proposal_boxlist.num_boxes()) < num_valid_proposals,\n cls_weights > 0\n )\n sampled_indices = self._second_stage_sampler.subsample(\n valid_indicator,\n self._second_stage_batch_size,\n positive_indicator)\n return box_list_ops.boolean_mask(proposal_boxlist, sampled_indices)\n\n def _compute_second_stage_input_feature_maps(self, features_to_crop,\n proposal_boxes_normalized):\n \"\"\"Crops to a set of proposals from the feature map for a batch of images.\n\n Helper function for self._postprocess_rpn. This function calls\n `tf.image.crop_and_resize` to create the feature map to be passed to the\n second stage box classifier for each proposal.\n\n Args:\n features_to_crop: A float32 tensor with shape\n [batch_size, height, width, depth]\n proposal_boxes_normalized: A float32 tensor with shape [batch_size,\n num_proposals, box_code_size] containing proposal boxes in\n normalized coordinates.\n\n Returns:\n A float32 tensor with shape [K, new_height, new_width, depth].\n \"\"\"\n def get_box_inds(proposals):\n proposals_shape = proposals.get_shape().as_list()\n if any(dim is None for dim in proposals_shape):\n proposals_shape = tf.shape(proposals)\n ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32)\n multiplier = tf.expand_dims(\n tf.range(start=0, limit=proposals_shape[0]), 1)\n return tf.reshape(ones_mat * multiplier, [-1])\n\n if self._use_matmul_crop_and_resize:\n def _single_image_crop_and_resize(inputs):\n single_image_features_to_crop, proposal_boxes_normalized = inputs\n return ops.matmul_crop_and_resize(\n tf.expand_dims(single_image_features_to_crop, 0),\n proposal_boxes_normalized,\n [self._initial_crop_size, self._initial_crop_size])\n\n cropped_regions = self._flatten_first_two_dimensions(\n shape_utils.static_or_dynamic_map_fn(\n _single_image_crop_and_resize,\n elems=[features_to_crop, proposal_boxes_normalized],\n dtype=tf.float32,\n parallel_iterations=self._parallel_iterations))\n else:\n cropped_regions = tf.image.crop_and_resize(\n features_to_crop,\n self._flatten_first_two_dimensions(proposal_boxes_normalized),\n get_box_inds(proposal_boxes_normalized),\n (self._initial_crop_size, self._initial_crop_size))\n return slim.max_pool2d(\n cropped_regions,\n [self._maxpool_kernel_size, self._maxpool_kernel_size], # Michele: Being specific to text, we want to preserve width more than height\n stride=[self._maxpool_stride, 1])\n\n def _postprocess_box_classifier(self,\n refined_box_encodings,\n class_predictions_with_background,\n proposal_boxes,\n num_proposals,\n image_shapes,\n mask_predictions=None):\n \"\"\"Converts predictions from the second stage box classifier to detections.\n\n Args:\n refined_box_encodings: a 3-D float tensor with shape\n [total_num_padded_proposals, num_classes, self._box_coder.code_size]\n representing predicted (final) refined box encodings. If using a shared\n box across classes the shape will instead be\n [total_num_padded_proposals, 1, 4]\n class_predictions_with_background: a 3-D tensor float with shape\n [total_num_padded_proposals, num_classes + 1] containing class\n predictions (logits) for each of the proposals. Note that this tensor\n *includes* background class predictions (at class index 0).\n proposal_boxes: a 3-D float tensor with shape\n [batch_size, self.max_num_proposals, 4] representing decoded proposal\n bounding boxes in absolute coordinates.\n num_proposals: a 1-D int32 tensor of shape [batch] representing the number\n of proposals predicted for each image in the batch.\n image_shapes: a 2-D int32 tensor containing shapes of input image in the\n batch.\n mask_predictions: (optional) a 4-D float tensor with shape\n [total_num_padded_proposals, num_classes, mask_height, mask_width]\n containing instance mask prediction logits.\n\n Returns:\n A dictionary containing:\n `detection_boxes`: [batch, max_detection, 4]\n `detection_scores`: [batch, max_detections]\n `detection_classes`: [batch, max_detections]\n `num_detections`: [batch]\n `detection_masks`:\n (optional) [batch, max_detections, mask_height, mask_width]. Note\n that a pixel-wise sigmoid score converter is applied to the detection\n masks.\n \"\"\"\n refined_box_encodings_batch = tf.reshape(\n refined_box_encodings,\n [-1,\n self.max_num_proposals,\n refined_box_encodings.shape[1],\n self._box_coder.code_size])\n class_predictions_with_background_batch = tf.reshape(\n class_predictions_with_background,\n [-1, self.max_num_proposals, self.num_classes + 1]\n )\n refined_decoded_boxes_batch = self._batch_decode_boxes(\n refined_box_encodings_batch, proposal_boxes)\n class_predictions_with_background_batch = (\n self._second_stage_score_conversion_fn(\n class_predictions_with_background_batch))\n class_predictions_batch = tf.reshape(\n tf.slice(class_predictions_with_background_batch,\n [0, 0, 1], [-1, -1, -1]),\n [-1, self.max_num_proposals, self.num_classes])\n clip_window = self._compute_clip_window(image_shapes)\n mask_predictions_batch = None\n if mask_predictions is not None:\n mask_height = mask_predictions.shape[2].value\n mask_width = mask_predictions.shape[3].value\n mask_predictions = tf.sigmoid(mask_predictions)\n mask_predictions_batch = tf.reshape(\n mask_predictions, [-1, self.max_num_proposals,\n self.num_classes, mask_height, mask_width])\n (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, _,\n num_detections) = self._second_stage_nms_fn(\n refined_decoded_boxes_batch,\n class_predictions_batch,\n clip_window=clip_window,\n change_coordinate_frame=True,\n num_valid_boxes=num_proposals,\n masks=mask_predictions_batch)\n detections = {\n fields.DetectionResultFields.detection_boxes: nmsed_boxes,\n fields.DetectionResultFields.detection_scores: nmsed_scores,\n fields.DetectionResultFields.detection_classes: nmsed_classes,\n fields.DetectionResultFields.num_detections: tf.to_float(num_detections)\n }\n if nmsed_masks is not None:\n detections[fields.DetectionResultFields.detection_masks] = nmsed_masks\n return detections\n\n def _batch_decode_boxes(self, box_encodings, anchor_boxes):\n \"\"\"Decodes box encodings with respect to the anchor boxes.\n\n Args:\n box_encodings: a 4-D tensor with shape\n [batch_size, num_anchors, num_classes, self._box_coder.code_size]\n representing box encodings.\n anchor_boxes: [batch_size, num_anchors, self._box_coder.code_size]\n representing decoded bounding boxes. If using a shared box across\n classes the shape will instead be\n [total_num_proposals, 1, self._box_coder.code_size].\n\n Returns:\n decoded_boxes: a\n [batch_size, num_anchors, num_classes, self._box_coder.code_size]\n float tensor representing bounding box predictions (for each image in\n batch, proposal and class). If using a shared box across classes the\n shape will instead be\n [batch_size, num_anchors, 1, self._box_coder.code_size].\n \"\"\"\n combined_shape = shape_utils.combined_static_and_dynamic_shape(\n box_encodings)\n num_classes = combined_shape[2]\n tiled_anchor_boxes = tf.tile(\n tf.expand_dims(anchor_boxes, 2), [1, 1, num_classes, 1])\n tiled_anchors_boxlist = box_list.BoxList(\n tf.reshape(tiled_anchor_boxes, [-1, 4]))\n decoded_boxes = self._box_coder.decode(\n tf.reshape(box_encodings, [-1, self._box_coder.code_size]),\n tiled_anchors_boxlist)\n return tf.reshape(decoded_boxes.get(),\n tf.stack([combined_shape[0], combined_shape[1],\n num_classes, 4]))\n\n '''def loss(self, prediction_dict, true_image_shapes, scope=None):\n \"\"\"Compute scalar loss tensors given prediction tensors.\n\n If number_of_stages=1, only RPN related losses are computed (i.e.,\n `rpn_localization_loss` and `rpn_objectness_loss`). Otherwise all\n losses are computed.\n\n Args:\n prediction_dict: a dictionary holding prediction tensors (see the\n documentation for the predict method. If number_of_stages=1, we\n expect prediction_dict to contain `rpn_box_encodings`,\n `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,\n `image_shape`, and `anchors` fields. Otherwise we expect\n prediction_dict to additionally contain `refined_box_encodings`,\n `class_predictions_with_background`, `num_proposals`, and\n `proposal_boxes` fields.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n scope: Optional scope name.\n\n Returns:\n a dictionary mapping loss keys (`first_stage_localization_loss`,\n `first_stage_objectness_loss`, 'second_stage_localization_loss',\n 'second_stage_classification_loss') to scalar tensors representing\n corresponding loss values.\n \"\"\"\n with tf.name_scope(scope, 'Loss', prediction_dict.values()):\n (groundtruth_boxlists, groundtruth_classes_with_background_list,\n groundtruth_masks_list, groundtruth_weights_list\n ) = self._format_groundtruth_data(true_image_shapes)\n loss_dict = self._loss_rpn(\n prediction_dict['rpn_box_encodings'],\n prediction_dict['rpn_objectness_predictions_with_background'],\n prediction_dict['anchors'], groundtruth_boxlists,\n groundtruth_classes_with_background_list, groundtruth_weights_list)\n if self._number_of_stages > 1:\n loss_dict.update(\n self._loss_box_classifier(\n prediction_dict['refined_box_encodings'],\n prediction_dict['class_predictions_with_background'],\n prediction_dict['proposal_boxes'],\n prediction_dict['num_proposals'],\n groundtruth_boxlists,\n groundtruth_classes_with_background_list,\n groundtruth_weights_list,\n prediction_dict['image_shape'],\n prediction_dict.get('mask_predictions'),\n groundtruth_masks_list,\n ))\n return loss_dict'''\n\n def loss(self, prediction_dict, true_image_shapes, scope=None):\n \"\"\"Compute scalar loss tensors given prediction tensors.\n\n If number_of_stages=1, only RPN related losses are computed (i.e.,\n `rpn_localization_loss` and `rpn_objectness_loss`). Otherwise all\n losses are computed.\n\n Args:\n prediction_dict: a dictionary holding prediction tensors (see the\n documentation for the predict method. If number_of_stages=1, we\n expect prediction_dict to contain `rpn_box_encodings`,\n `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,\n `image_shape`, and `anchors` fields. Otherwise we expect\n prediction_dict to additionally contain `refined_box_encodings`,\n `class_predictions_with_background`, `num_proposals`, and\n `proposal_boxes` fields.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is\n of the form [height, width, channels] indicating the shapes\n of true images in the resized images, as resized images can be padded\n with zeros.\n scope: Optional scope name.\n\n Returns:\n a dictionary mapping loss keys (`first_stage_localization_loss`,\n `first_stage_objectness_loss`, 'second_stage_localization_loss',\n 'second_stage_classification_loss') to scalar tensors representing\n corresponding loss values.\n \"\"\"\n with tf.name_scope(scope, 'Loss', prediction_dict.values()):\n (groundtruth_boxlists, groundtruth_classes_with_background_list,\n groundtruth_masks_list, groundtruth_weights_list\n ) = self._format_groundtruth_data(true_image_shapes)\n '''loss_dict = self._loss_rpn(\n prediction_dict['rpn_box_encodings'],\n prediction_dict['rpn_objectness_predictions_with_background'],\n prediction_dict['anchors'], groundtruth_boxlists,\n groundtruth_classes_with_background_list, groundtruth_weights_list)'''\n #if self._number_of_stages > 1:\n # loss_dict.update(\n loss_dict = self._loss_box_classifier(\n prediction_dict['refined_box_encodings'],\n prediction_dict['class_predictions_with_background'],\n prediction_dict['proposal_boxes'],\n prediction_dict['num_proposals'],\n groundtruth_boxlists,\n groundtruth_classes_with_background_list,\n groundtruth_weights_list,\n prediction_dict['image_shape'],\n prediction_dict.get('mask_predictions'),\n groundtruth_masks_list,\n )#)\n return loss_dict\n\n def _loss_rpn(self, rpn_box_encodings,\n rpn_objectness_predictions_with_background, anchors,\n groundtruth_boxlists, groundtruth_classes_with_background_list,\n groundtruth_weights_list):\n \"\"\"Computes scalar RPN loss tensors.\n\n Uses self._proposal_target_assigner to obtain regression and classification\n targets for the first stage RPN, samples a \"minibatch\" of anchors to\n participate in the loss computation, and returns the RPN losses.\n\n Args:\n rpn_box_encodings: A 4-D float tensor of shape\n [batch_size, num_anchors, self._box_coder.code_size] containing\n predicted proposal box encodings.\n rpn_objectness_predictions_with_background: A 2-D float tensor of shape\n [batch_size, num_anchors, 2] containing objectness predictions\n (logits) for each of the anchors with 0 corresponding to background\n and 1 corresponding to object.\n anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors\n for the first stage RPN. Note that `num_anchors` can differ depending\n on whether the model is created in training or inference mode.\n groundtruth_boxlists: A list of BoxLists containing coordinates of the\n groundtruth boxes.\n groundtruth_classes_with_background_list: A list of 2-D one-hot\n (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the\n class targets with the 0th index assumed to map to the background class.\n groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape\n [num_boxes] containing weights for groundtruth boxes.\n\n Returns:\n a dictionary mapping loss keys (`first_stage_localization_loss`,\n `first_stage_objectness_loss`) to scalar tensors representing\n corresponding loss values.\n \"\"\"\n with tf.name_scope('RPNLoss'):\n (batch_cls_targets, batch_cls_weights, batch_reg_targets,\n batch_reg_weights, _) = target_assigner.batch_assign_targets(\n target_assigner=self._proposal_target_assigner,\n anchors_batch=box_list.BoxList(anchors),\n gt_box_batch=groundtruth_boxlists,\n gt_class_targets_batch=(len(groundtruth_boxlists) * [None]),\n gt_weights_batch=groundtruth_weights_list)\n batch_cls_targets = tf.squeeze(batch_cls_targets, axis=2)\n\n def _minibatch_subsample_fn(inputs):\n cls_targets, cls_weights = inputs\n return self._first_stage_sampler.subsample(\n tf.cast(cls_weights, tf.bool),\n self._first_stage_minibatch_size, tf.cast(cls_targets, tf.bool))\n batch_sampled_indices = tf.to_float(shape_utils.static_or_dynamic_map_fn(\n _minibatch_subsample_fn,\n [batch_cls_targets, batch_cls_weights],\n dtype=tf.bool,\n parallel_iterations=self._parallel_iterations,\n back_prop=True))\n\n # Normalize by number of examples in sampled minibatch\n normalizer = tf.reduce_sum(batch_sampled_indices, axis=1)\n batch_one_hot_targets = tf.one_hot(\n tf.to_int32(batch_cls_targets), depth=2)\n sampled_reg_indices = tf.multiply(batch_sampled_indices,\n batch_reg_weights)\n\n localization_losses = self._first_stage_localization_loss(\n rpn_box_encodings, batch_reg_targets, weights=sampled_reg_indices)\n objectness_losses = self._first_stage_objectness_loss(\n rpn_objectness_predictions_with_background,\n batch_one_hot_targets, weights=batch_sampled_indices)\n localization_loss = tf.reduce_mean(\n tf.reduce_sum(localization_losses, axis=1) / normalizer)\n objectness_loss = tf.reduce_mean(\n tf.reduce_sum(objectness_losses, axis=1) / normalizer)\n\n localization_loss = tf.multiply(self._first_stage_loc_loss_weight,\n localization_loss,\n name='localization_loss')\n objectness_loss = tf.multiply(self._first_stage_obj_loss_weight,\n objectness_loss, name='objectness_loss')\n loss_dict = {localization_loss.op.name: localization_loss,\n objectness_loss.op.name: objectness_loss}\n return loss_dict\n\n def _loss_box_classifier(self,\n refined_box_encodings,\n class_predictions_with_background,\n proposal_boxes,\n num_proposals,\n groundtruth_boxlists,\n groundtruth_classes_with_background_list,\n groundtruth_weights_list,\n image_shape,\n prediction_masks=None,\n groundtruth_masks_list=None):\n \"\"\"Computes scalar box classifier loss tensors.\n\n Uses self._detector_target_assigner to obtain regression and classification\n targets for the second stage box classifier, optionally performs\n hard mining, and returns losses. All losses are computed independently\n for each image and then averaged across the batch.\n Please note that for boxes and masks with multiple labels, the box\n regression and mask prediction losses are only computed for one label.\n\n This function assumes that the proposal boxes in the \"padded\" regions are\n actually zero (and thus should not be matched to).\n\n\n Args:\n refined_box_encodings: a 3-D tensor with shape\n [total_num_proposals, num_classes, box_coder.code_size] representing\n predicted (final) refined box encodings. If using a shared box across\n classes this will instead have shape\n [total_num_proposals, 1, box_coder.code_size].\n class_predictions_with_background: a 2-D tensor with shape\n [total_num_proposals, num_classes + 1] containing class\n predictions (logits) for each of the anchors. Note that this tensor\n *includes* background class predictions (at class index 0).\n proposal_boxes: [batch_size, self.max_num_proposals, 4] representing\n decoded proposal bounding boxes.\n num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]\n representing the number of proposals predicted for each image in\n the batch.\n groundtruth_boxlists: a list of BoxLists containing coordinates of the\n groundtruth boxes.\n groundtruth_classes_with_background_list: a list of 2-D one-hot\n (or k-hot) tensors of shape [num_boxes, num_classes + 1] containing the\n class targets with the 0th index assumed to map to the background class.\n groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape\n [num_boxes] containing weights for groundtruth boxes.\n image_shape: a 1-D tensor of shape [4] representing the image shape.\n prediction_masks: an optional 4-D tensor with shape [total_num_proposals,\n num_classes, mask_height, mask_width] containing the instance masks for\n each box.\n groundtruth_masks_list: an optional list of 3-D tensors of shape\n [num_boxes, image_height, image_width] containing the instance masks for\n each of the boxes.\n\n Returns:\n a dictionary mapping loss keys ('second_stage_localization_loss',\n 'second_stage_classification_loss') to scalar tensors representing\n corresponding loss values.\n\n Raises:\n ValueError: if `predict_instance_masks` in\n second_stage_mask_rcnn_box_predictor is True and\n `groundtruth_masks_list` is not provided.\n \"\"\"\n with tf.name_scope('BoxClassifierLoss'):\n paddings_indicator = self._padded_batched_proposals_indicator(\n num_proposals, self.max_num_proposals)\n proposal_boxlists = [\n box_list.BoxList(proposal_boxes_single_image)\n for proposal_boxes_single_image in tf.unstack(proposal_boxes)]\n batch_size = len(proposal_boxlists)\n\n num_proposals_or_one = tf.to_float(tf.expand_dims(\n tf.maximum(num_proposals, tf.ones_like(num_proposals)), 1))\n normalizer = tf.tile(num_proposals_or_one,\n [1, self.max_num_proposals]) * batch_size\n\n (batch_cls_targets_with_background, batch_cls_weights, batch_reg_targets,\n batch_reg_weights, _) = target_assigner.batch_assign_targets(\n target_assigner=self._detector_target_assigner,\n anchors_batch=proposal_boxlists,\n gt_box_batch=groundtruth_boxlists,\n gt_class_targets_batch=groundtruth_classes_with_background_list,\n unmatched_class_label=tf.constant(\n [1] + self._num_classes * [0], dtype=tf.float32),\n gt_weights_batch=groundtruth_weights_list)\n\n class_predictions_with_background = tf.reshape(\n class_predictions_with_background,\n [batch_size, self.max_num_proposals, -1])\n\n flat_cls_targets_with_background = tf.reshape(\n batch_cls_targets_with_background,\n [batch_size * self.max_num_proposals, -1])\n one_hot_flat_cls_targets_with_background = tf.argmax(\n flat_cls_targets_with_background, axis=1)\n one_hot_flat_cls_targets_with_background = tf.one_hot(\n one_hot_flat_cls_targets_with_background,\n flat_cls_targets_with_background.get_shape()[1])\n\n # If using a shared box across classes use directly\n if refined_box_encodings.shape[1] == 1:\n reshaped_refined_box_encodings = tf.reshape(\n refined_box_encodings,\n [batch_size, self.max_num_proposals, self._box_coder.code_size])\n # For anchors with multiple labels, picks refined_location_encodings\n # for just one class to avoid over-counting for regression loss and\n # (optionally) mask loss.\n else:\n # We only predict refined location encodings for the non background\n # classes, but we now pad it to make it compatible with the class\n # predictions\n refined_box_encodings_with_background = tf.pad(\n refined_box_encodings, [[0, 0], [1, 0], [0, 0]])\n refined_box_encodings_masked_by_class_targets = tf.boolean_mask(\n refined_box_encodings_with_background,\n tf.greater(one_hot_flat_cls_targets_with_background, 0))\n reshaped_refined_box_encodings = tf.reshape(\n refined_box_encodings_masked_by_class_targets,\n [batch_size, self.max_num_proposals, self._box_coder.code_size])\n\n second_stage_loc_losses = self._second_stage_localization_loss(\n reshaped_refined_box_encodings,\n batch_reg_targets, weights=batch_reg_weights) / normalizer\n second_stage_cls_losses = ops.reduce_sum_trailing_dimensions(\n self._second_stage_classification_loss(\n class_predictions_with_background,\n batch_cls_targets_with_background,\n weights=batch_cls_weights),\n ndims=2) / normalizer\n\n second_stage_loc_loss = tf.reduce_sum(\n tf.boolean_mask(second_stage_loc_losses, paddings_indicator))\n second_stage_cls_loss = tf.reduce_sum(\n tf.boolean_mask(second_stage_cls_losses, paddings_indicator))\n\n if self._hard_example_miner:\n (second_stage_loc_loss, second_stage_cls_loss\n ) = self._unpad_proposals_and_apply_hard_mining(\n proposal_boxlists, second_stage_loc_losses,\n second_stage_cls_losses, num_proposals)\n localization_loss = tf.multiply(self._second_stage_loc_loss_weight,\n second_stage_loc_loss,\n name='localization_loss')\n\n classification_loss = tf.multiply(self._second_stage_cls_loss_weight,\n second_stage_cls_loss,\n name='classification_loss')\n\n loss_dict = {localization_loss.op.name: localization_loss,\n classification_loss.op.name: classification_loss}\n second_stage_mask_loss = None\n if prediction_masks is not None:\n if groundtruth_masks_list is None:\n raise ValueError('Groundtruth instance masks not provided. '\n 'Please configure input reader.')\n\n unmatched_mask_label = tf.zeros(image_shape[1:3], dtype=tf.float32)\n (batch_mask_targets, _, _, batch_mask_target_weights,\n _) = target_assigner.batch_assign_targets(\n target_assigner=self._detector_target_assigner,\n anchors_batch=proposal_boxlists,\n gt_box_batch=groundtruth_boxlists,\n gt_class_targets_batch=groundtruth_masks_list,\n unmatched_class_label=unmatched_mask_label,\n gt_weights_batch=groundtruth_weights_list)\n\n # Pad the prediction_masks with to add zeros for background class to be\n # consistent with class predictions.\n if prediction_masks.get_shape().as_list()[1] == 1:\n # Class agnostic masks or masks for one-class prediction. Logic for\n # both cases is the same since background predictions are ignored\n # through the batch_mask_target_weights.\n prediction_masks_masked_by_class_targets = prediction_masks\n else:\n prediction_masks_with_background = tf.pad(\n prediction_masks, [[0, 0], [1, 0], [0, 0], [0, 0]])\n prediction_masks_masked_by_class_targets = tf.boolean_mask(\n prediction_masks_with_background,\n tf.greater(one_hot_flat_cls_targets_with_background, 0))\n\n mask_height = prediction_masks.shape[2].value\n mask_width = prediction_masks.shape[3].value\n reshaped_prediction_masks = tf.reshape(\n prediction_masks_masked_by_class_targets,\n [batch_size, -1, mask_height * mask_width])\n\n batch_mask_targets_shape = tf.shape(batch_mask_targets)\n flat_gt_masks = tf.reshape(batch_mask_targets,\n [-1, batch_mask_targets_shape[2],\n batch_mask_targets_shape[3]])\n\n # Use normalized proposals to crop mask targets from image masks.\n flat_normalized_proposals = box_list_ops.to_normalized_coordinates(\n box_list.BoxList(tf.reshape(proposal_boxes, [-1, 4])),\n image_shape[1], image_shape[2]).get()\n\n flat_cropped_gt_mask = tf.image.crop_and_resize(\n tf.expand_dims(flat_gt_masks, -1),\n flat_normalized_proposals,\n tf.range(flat_normalized_proposals.shape[0].value),\n [mask_height, mask_width])\n\n batch_cropped_gt_mask = tf.reshape(\n flat_cropped_gt_mask,\n [batch_size, -1, mask_height * mask_width])\n\n second_stage_mask_losses = ops.reduce_sum_trailing_dimensions(\n self._second_stage_mask_loss(\n reshaped_prediction_masks,\n batch_cropped_gt_mask,\n weights=batch_mask_target_weights),\n ndims=2) / (\n mask_height * mask_width * tf.maximum(\n tf.reduce_sum(\n batch_mask_target_weights, axis=1, keep_dims=True\n ), tf.ones((batch_size, 1))))\n second_stage_mask_loss = tf.reduce_sum(\n tf.boolean_mask(second_stage_mask_losses, paddings_indicator))\n\n if second_stage_mask_loss is not None:\n mask_loss = tf.multiply(self._second_stage_mask_loss_weight,\n second_stage_mask_loss, name='mask_loss')\n loss_dict[mask_loss.op.name] = mask_loss\n return loss_dict\n\n def _padded_batched_proposals_indicator(self,\n num_proposals,\n max_num_proposals):\n \"\"\"Creates indicator matrix of non-pad elements of padded batch proposals.\n\n Args:\n num_proposals: Tensor of type tf.int32 with shape [batch_size].\n max_num_proposals: Maximum number of proposals per image (integer).\n\n Returns:\n A Tensor of type tf.bool with shape [batch_size, max_num_proposals].\n \"\"\"\n batch_size = tf.size(num_proposals)\n tiled_num_proposals = tf.tile(\n tf.expand_dims(num_proposals, 1), [1, max_num_proposals])\n tiled_proposal_index = tf.tile(\n tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1])\n return tf.greater(tiled_num_proposals, tiled_proposal_index)\n\n def _unpad_proposals_and_apply_hard_mining(self,\n proposal_boxlists,\n second_stage_loc_losses,\n second_stage_cls_losses,\n num_proposals):\n \"\"\"Unpads proposals and applies hard mining.\n\n Args:\n proposal_boxlists: A list of `batch_size` BoxLists each representing\n `self.max_num_proposals` representing decoded proposal bounding boxes\n for each image.\n second_stage_loc_losses: A Tensor of type `float32`. A tensor of shape\n `[batch_size, self.max_num_proposals]` representing per-anchor\n second stage localization loss values.\n second_stage_cls_losses: A Tensor of type `float32`. A tensor of shape\n `[batch_size, self.max_num_proposals]` representing per-anchor\n second stage classification loss values.\n num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]\n representing the number of proposals predicted for each image in\n the batch.\n\n Returns:\n second_stage_loc_loss: A scalar float32 tensor representing the second\n stage localization loss.\n second_stage_cls_loss: A scalar float32 tensor representing the second\n stage classification loss.\n \"\"\"\n for (proposal_boxlist, single_image_loc_loss, single_image_cls_loss,\n single_image_num_proposals) in zip(\n proposal_boxlists,\n tf.unstack(second_stage_loc_losses),\n tf.unstack(second_stage_cls_losses),\n tf.unstack(num_proposals)):\n proposal_boxlist = box_list.BoxList(\n tf.slice(proposal_boxlist.get(),\n [0, 0], [single_image_num_proposals, -1]))\n single_image_loc_loss = tf.slice(single_image_loc_loss,\n [0], [single_image_num_proposals])\n single_image_cls_loss = tf.slice(single_image_cls_loss,\n [0], [single_image_num_proposals])\n return self._hard_example_miner(\n location_losses=tf.expand_dims(single_image_loc_loss, 0),\n cls_losses=tf.expand_dims(single_image_cls_loss, 0),\n decoded_boxlist_list=[proposal_boxlist])\n\n def restore_map(self,\n fine_tune_checkpoint_type='detection',\n load_all_detection_checkpoint_vars=False):\n \"\"\"Returns a map of variables to load from a foreign checkpoint.\n\n See parent class for details.\n\n Args:\n fine_tune_checkpoint_type: whether to restore from a full detection\n checkpoint (with compatible variable names) or to restore from a\n classification checkpoint for initialization prior to training.\n Valid values: `detection`, `classification`. Default 'detection'.\n load_all_detection_checkpoint_vars: whether to load all variables (when\n `fine_tune_checkpoint_type` is `detection`). If False, only variables\n within the feature extractor scopes are included. Default False.\n\n Returns:\n A dict mapping variable names (to load from a checkpoint) to variables in\n the model graph.\n Raises:\n ValueError: if fine_tune_checkpoint_type is neither `classification`\n nor `detection`.\n \"\"\"\n if fine_tune_checkpoint_type not in ['detection', 'classification']:\n raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format(\n fine_tune_checkpoint_type))\n if fine_tune_checkpoint_type == 'classification':\n return self._feature_extractor.restore_from_classification_checkpoint_fn(\n self.first_stage_feature_extractor_scope,\n self.second_stage_feature_extractor_scope)\n\n variables_to_restore = tf.global_variables()\n variables_to_restore.append(slim.get_or_create_global_step())\n # Only load feature extractor variables to be consistent with loading from\n # a classification checkpoint.\n include_patterns = None\n if not load_all_detection_checkpoint_vars:\n include_patterns = [\n self.first_stage_feature_extractor_scope,\n self.second_stage_feature_extractor_scope\n ]\n feature_extractor_variables = tf.contrib.framework.filter_variables(\n variables_to_restore, include_patterns=include_patterns)\n return {var.op.name: var for var in feature_extractor_variables}\n" ]
[ [ "tensorflow.concat", "tensorflow.zeros", "tensorflow.stack", "tensorflow.reduce_sum", "tensorflow.global_variables", "tensorflow.cast", "tensorflow.pad", "tensorflow.to_int32", "tensorflow.boolean_mask", "tensorflow.greater", "tensorflow.squeeze", "tensorflow.stop_gradient", "tensorflow.gather", "tensorflow.name_scope", "tensorflow.to_float", "tensorflow.argmax", "tensorflow.tile", "tensorflow.unstack", "tensorflow.shape", "tensorflow.zeros_like", "tensorflow.size", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.multiply", "tensorflow.range", "tensorflow.slice", "tensorflow.reshape", "tensorflow.sigmoid", "tensorflow.expand_dims", "tensorflow.ones", "tensorflow.ones_like", "tensorflow.contrib.framework.filter_variables", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Sakura176/PointRCNN
[ "a7fbb25e931609a39c32cb821a7c98a326e8b0c0" ]
[ "tools/train_eval.py" ]
[ "import os\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom lib.utils.bbox_transform import decode_bbox_target\nfrom tools.kitti_object_eval_python.evaluate import evaluate as kitti_evaluate\n\nfrom lib.config import cfg\nimport lib.utils.kitti_utils as kitti_utils\nimport lib.utils.iou3d.iou3d_utils as iou3d_utils\nfrom datetime import datetime\nfrom tensorboardX import SummaryWriter\nimport tqdm\n\nnp.random.seed(1024) # set the same seed\n\n\ndef save_kitti_format(sample_id, calib, bbox3d, kitti_output_dir, scores, img_shape):\n corners3d = kitti_utils.boxes3d_to_corners3d(bbox3d)\n img_boxes, _ = calib.corners3d_to_img_boxes(corners3d)\n\n img_boxes[:, 0] = np.clip(img_boxes[:, 0], 0, img_shape[1] - 1)\n img_boxes[:, 1] = np.clip(img_boxes[:, 1], 0, img_shape[0] - 1)\n img_boxes[:, 2] = np.clip(img_boxes[:, 2], 0, img_shape[1] - 1)\n img_boxes[:, 3] = np.clip(img_boxes[:, 3], 0, img_shape[0] - 1)\n\n img_boxes_w = img_boxes[:, 2] - img_boxes[:, 0]\n img_boxes_h = img_boxes[:, 3] - img_boxes[:, 1]\n box_valid_mask = np.logical_and(\n img_boxes_w < img_shape[1] * 0.8, img_boxes_h < img_shape[0] * 0.8)\n\n kitti_output_file = os.path.join(kitti_output_dir, '%06d.txt' % sample_id)\n with open(kitti_output_file, 'w') as f:\n for k in range(bbox3d.shape[0]):\n if box_valid_mask[k] == 0:\n continue\n x, z, ry = bbox3d[k, 0], bbox3d[k, 2], bbox3d[k, 6]\n beta = np.arctan2(z, x)\n alpha = -np.sign(beta) * np.pi / 2 + beta + ry\n\n print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f' %\n (cfg.CLASSES, alpha, img_boxes[k, 0], img_boxes[k, 1], img_boxes[k, 2], img_boxes[k, 3],\n bbox3d[k, 3], bbox3d[k, 4], bbox3d[k,\n 5], bbox3d[k, 0], bbox3d[k, 1], bbox3d[k, 2],\n bbox3d[k, 6], scores[k]), file=f)\n\n\ndef eval_one_epoch_joint(model, dataloader, epoch_id, result_dir):\n # print(\"-----------------joint____________________________*******\")\n np.random.seed(666)\n MEAN_SIZE = torch.from_numpy(cfg.CLS_MEAN_SIZE[0]).cuda()\n mode = 'EVAL'\n\n final_output_dir = os.path.join(result_dir, 'final_result', 'data')\n os.makedirs(final_output_dir, exist_ok=True)\n\n if True:\n # print(\"------------save_result__________________*******\")\n roi_output_dir = os.path.join(result_dir, 'roi_result', 'data')\n refine_output_dir = os.path.join(result_dir, 'refine_result', 'data')\n rpn_output_dir = os.path.join(result_dir, 'rpn_result', 'data')\n os.makedirs(rpn_output_dir, exist_ok=True)\n os.makedirs(roi_output_dir, exist_ok=True)\n os.makedirs(refine_output_dir, exist_ok=True)\n\n model.eval()\n\n thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9]\n total_recalled_bbox_list, total_gt_bbox = [0] * 5, 0\n total_roi_recalled_bbox_list = [0] * 5\n dataset = dataloader.dataset\n cnt = final_total = total_cls_acc = total_cls_acc_refined = total_rpn_iou = 0\n\n progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval')\n for data in dataloader:\n cnt += 1\n calib = data['calib']\n sample_id, pts_rect, pts_features, pts_input = \\\n data['sample_id'], data['pts_rect'], data['pts_features'], data['pts_input']\n batch_size = len(sample_id)\n inputs = torch.from_numpy(pts_input).cuda(non_blocking=True).float()\n input_data = {'pts_input': inputs, 'calib': calib}\n\n # model inference\n ret_dict = model(input_data)\n print(ret_dict.key())\n\n roi_scores_raw = ret_dict['roi_scores_raw'] # (B, M)\n roi_boxes3d = ret_dict['rois'] # (B, M, 7)\n seg_result = ret_dict['seg_result'].long() # (B, N)\n\n rcnn_cls = ret_dict['rcnn_cls'].view(\n batch_size, -1, ret_dict['rcnn_cls'].shape[1])\n rcnn_reg = ret_dict['rcnn_reg'].view(\n batch_size, -1, ret_dict['rcnn_reg'].shape[1]) # (B, M, C)\n\n # bounding box regression\n anchor_size = MEAN_SIZE\n if cfg.RCNN.SIZE_RES_ON_ROI:\n assert False\n\n pred_boxes3d = decode_bbox_target(roi_boxes3d.view(-1, 7), rcnn_reg.view(-1, rcnn_reg.shape[-1]),\n anchor_size=anchor_size,\n loc_scope=cfg.RCNN.LOC_SCOPE,\n loc_bin_size=cfg.RCNN.LOC_BIN_SIZE,\n num_head_bin=cfg.RCNN.NUM_HEAD_BIN,\n get_xz_fine=True, get_y_by_bin=cfg.RCNN.LOC_Y_BY_BIN,\n loc_y_scope=cfg.RCNN.LOC_Y_SCOPE, loc_y_bin_size=cfg.RCNN.LOC_Y_BIN_SIZE,\n get_ry_fine=True).view(batch_size, -1, 7)\n\n # scoring\n if rcnn_cls.shape[2] == 1:\n raw_scores = rcnn_cls # (B, M, 1)\n\n norm_scores = torch.sigmoid(raw_scores)\n pred_classes = (norm_scores > cfg.RCNN.SCORE_THRESH).long()\n else:\n pred_classes = torch.argmax(rcnn_cls, dim=1).view(-1)\n cls_norm_scores = F.softmax(rcnn_cls, dim=1)\n raw_scores = rcnn_cls[:, pred_classes]\n norm_scores = cls_norm_scores[:, pred_classes]\n\n # evaluation\n recalled_num = gt_num = rpn_iou = 0\n if not False:\n if not cfg.RPN.FIXED:\n rpn_cls_label, rpn_reg_label = data['rpn_cls_label'], data['rpn_reg_label']\n rpn_cls_label = torch.from_numpy(\n rpn_cls_label).cuda(non_blocking=True).long()\n\n gt_boxes3d = data['gt_boxes3d']\n\n for k in range(batch_size):\n # calculate recall\n cur_gt_boxes3d = gt_boxes3d[k]\n tmp_idx = cur_gt_boxes3d.__len__() - 1\n\n while tmp_idx >= 0 and cur_gt_boxes3d[tmp_idx].sum() == 0:\n tmp_idx -= 1\n\n if tmp_idx >= 0:\n cur_gt_boxes3d = cur_gt_boxes3d[:tmp_idx + 1]\n\n cur_gt_boxes3d = torch.from_numpy(\n cur_gt_boxes3d).cuda(non_blocking=True).float()\n iou3d = iou3d_utils.boxes_iou3d_gpu(\n pred_boxes3d[k], cur_gt_boxes3d)\n gt_max_iou, _ = iou3d.max(dim=0)\n refined_iou, _ = iou3d.max(dim=1)\n\n for idx, thresh in enumerate(thresh_list):\n total_recalled_bbox_list[idx] += (\n gt_max_iou > thresh).sum().item()\n recalled_num += (gt_max_iou > 0.7).sum().item()\n gt_num += cur_gt_boxes3d.shape[0]\n total_gt_bbox += cur_gt_boxes3d.shape[0]\n\n # original recall\n iou3d_in = iou3d_utils.boxes_iou3d_gpu(\n roi_boxes3d[k], cur_gt_boxes3d)\n gt_max_iou_in, _ = iou3d_in.max(dim=0)\n\n for idx, thresh in enumerate(thresh_list):\n total_roi_recalled_bbox_list[idx] += (\n gt_max_iou_in > thresh).sum().item()\n\n if not cfg.RPN.FIXED:\n fg_mask = rpn_cls_label > 0\n correct = ((seg_result == rpn_cls_label)\n & fg_mask).sum().float()\n union = fg_mask.sum().float() + (seg_result > 0).sum().float() - correct\n rpn_iou = correct / torch.clamp(union, min=1.0)\n total_rpn_iou += rpn_iou.item()\n\n disp_dict = {\n 'mode': mode, 'recall': '%d/%d' % (total_recalled_bbox_list[3], total_gt_bbox)}\n progress_bar.set_postfix(disp_dict)\n progress_bar.update()\n\n if True:\n # save roi and refine results\n roi_boxes3d_np = roi_boxes3d.cpu().numpy()\n pred_boxes3d_np = pred_boxes3d.cpu().numpy()\n roi_scores_raw_np = roi_scores_raw.cpu().numpy()\n raw_scores_np = raw_scores.cpu().numpy()\n\n rpn_cls_np = ret_dict['rpn_cls'].cpu().numpy()\n rpn_xyz_np = ret_dict['backbone_xyz'].cpu().numpy()\n seg_result_np = seg_result.cpu().numpy()\n output_data = np.concatenate((rpn_xyz_np, rpn_cls_np.reshape(batch_size, -1, 1),\n seg_result_np.reshape(batch_size, -1, 1)), axis=2)\n\n for k in range(batch_size):\n cur_sample_id = sample_id[k]\n calib = dataset.get_calib(cur_sample_id)\n image_shape = dataset.get_image_shape(cur_sample_id)\n save_kitti_format(cur_sample_id, calib, roi_boxes3d_np[k], roi_output_dir,\n roi_scores_raw_np[k], image_shape)\n save_kitti_format(cur_sample_id, calib, pred_boxes3d_np[k], refine_output_dir,\n raw_scores_np[k], image_shape)\n\n output_file = os.path.join(\n rpn_output_dir, '%06d.npy' % cur_sample_id)\n np.save(output_file, output_data.astype(np.float32))\n\n # scores thresh\n inds = norm_scores > cfg.RCNN.SCORE_THRESH\n\n for k in range(batch_size):\n cur_inds = inds[k].view(-1)\n if cur_inds.sum() == 0:\n continue\n\n pred_boxes3d_selected = pred_boxes3d[k, cur_inds]\n raw_scores_selected = raw_scores[k, cur_inds]\n norm_scores_selected = norm_scores[k, cur_inds]\n\n # NMS thresh\n # rotated nms\n boxes_bev_selected = kitti_utils.boxes3d_to_bev_torch(\n pred_boxes3d_selected)\n keep_idx = iou3d_utils.nms_gpu(\n boxes_bev_selected, raw_scores_selected, cfg.RCNN.NMS_THRESH).view(-1)\n pred_boxes3d_selected = pred_boxes3d_selected[keep_idx]\n scores_selected = raw_scores_selected[keep_idx]\n pred_boxes3d_selected, scores_selected = pred_boxes3d_selected.cpu(\n ).numpy(), scores_selected.cpu().numpy()\n\n cur_sample_id = sample_id[k]\n calib = dataset.get_calib(cur_sample_id)\n final_total += pred_boxes3d_selected.shape[0]\n image_shape = dataset.get_image_shape(cur_sample_id)\n save_kitti_format(cur_sample_id, calib, pred_boxes3d_selected,\n final_output_dir, scores_selected, image_shape)\n\n progress_bar.close()\n # dump empty files\n split_file = os.path.join(dataset.imageset_dir,\n '..', '..', 'ImageSets', dataset.split + '.txt')\n split_file = os.path.abspath(split_file)\n image_idx_list = [x.strip() for x in open(split_file).readlines()]\n empty_cnt = 0\n for k in range(image_idx_list.__len__()):\n cur_file = os.path.join(final_output_dir, '%s.txt' % image_idx_list[k])\n if not os.path.exists(cur_file):\n with open(cur_file, 'w') as temp_f:\n pass\n empty_cnt += 1\n\n ret_dict = {'empty_cnt': empty_cnt}\n\n avg_rpn_iou = (total_rpn_iou / max(cnt, 1.0))\n avg_cls_acc = (total_cls_acc / max(cnt, 1.0))\n avg_cls_acc_refined = (total_cls_acc_refined / max(cnt, 1.0))\n avg_det_num = (final_total / max(len(dataset), 1.0))\n\n ret_dict['rpn_iou'] = avg_rpn_iou\n ret_dict['rcnn_cls_acc'] = avg_cls_acc\n ret_dict['rcnn_cls_acc_refined'] = avg_cls_acc_refined\n ret_dict['rcnn_avg_num'] = avg_det_num\n\n for idx, thresh in enumerate(thresh_list):\n cur_roi_recall = total_roi_recalled_bbox_list[idx] / max(\n total_gt_bbox, 1.0)\n\n ret_dict['rpn_recall(thresh=%.2f)' % thresh] = cur_roi_recall\n\n for idx, thresh in enumerate(thresh_list):\n cur_recall = total_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)\n\n ret_dict['rcnn_recall(thresh=%.2f)' % thresh] = cur_recall\n\n if cfg.TEST.SPLIT != 'test':\n name_to_class = {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2}\n ap_result_str, ap_dict = kitti_evaluate(dataset.label_dir, final_output_dir, label_split_file=split_file,\n current_class=name_to_class[cfg.CLASSES])\n\n ret_dict.update(ap_dict)\n\n return ap_result_str\n" ]
[ [ "torch.sigmoid", "torch.nn.functional.softmax", "numpy.random.seed", "numpy.clip", "torch.from_numpy", "numpy.arctan2", "numpy.sign", "torch.clamp", "numpy.logical_and", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zblanks/parallel_esn
[ "25a979d0863ce54a4a588f4216dc473d4e9c5e8a" ]
[ "parallel_esn/bo.py" ]
[ "from math import log10\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import Matern\nimport numpy as np\nfrom .utils import create_rng\n\n\nclass BO:\n \"\"\"\n Bayesian Optimization framework\n \"\"\"\n\n def __init__(self, k, hidden_dim=(100, 10000),\n spectral_radius=(.9, 1.3), p=(0, 1),\n alpha=(0, 1), beta=(1e-5, 1e3), random_state=None):\n \"\"\"\n\n Parameters\n ----------\n k : tuple\n Range of values for nearest neighbors in small-world network\n hidden_dim : tuple, optional\n Range values for the number of nodes in the reservoir\n spectral_radius : tuple, optional\n Range of values for the spectral radius for the reservoir\n p : tuple, optional\n Range of values to consider for the rewire probability\n alpha : tuple, optional\n Range of values for the leaking rate\n beta : tuple, optional\n Range of values for the L2 regression regularization\n random_state : int or np.random.RandomState, optional\n Random state initializer\n \"\"\"\n # Check that all the hyper-parameters are tuples with two entries\n # which define the lower and upper bounds for the search space\n hyper_params = [k, hidden_dim, spectral_radius, p, alpha, beta]\n for param in hyper_params:\n assert isinstance(param, tuple), \"{} must be a tuple\".format(param)\n assert len(param) == 2, \"{} must have two arguments; the upper\" \\\n \"and lower bound\".format(param)\n\n self.lwr_k = k[0]\n self.upr_k = k[1]\n self.lwr_hidden_dim = hidden_dim[0]\n self.upr_hidden_dim = hidden_dim[1]\n self.lwr_spectral_radius = spectral_radius[0]\n self.upr_spectral_radius = spectral_radius[1]\n self.lwr_p = p[0]\n self.upr_p = p[1]\n self.lwr_alpha = alpha[0]\n self.upr_alpha = alpha[1]\n self.lwr_beta = beta[0]\n self.upr_beta = beta[1]\n\n self.rng = create_rng(random_state)\n self.gpr = GaussianProcessRegressor(kernel=Matern(),\n random_state=self.rng)\n\n # We need a placeholder for different hyper-parameter values that\n # arrive and the corresponding error values\n self.H = []\n self.y = []\n\n def update_gpr(self, X, y):\n \"\"\"\n Updates the Gaussian process with new data and error value\n\n Updates the Gaussian process by adding, `H`, the list of\n hyper-parameter values that were used with true function and y\n is the resulting error from the model\n\n Parameters\n ----------\n X : list\n Hyper-parameter values that were tried\n y : float\n Error that resulted from using X on the true function\n\n Returns\n -------\n None\n\n \"\"\"\n self.H.append(X)\n self.y.append(y)\n\n self.gpr.fit(self.H, self.y)\n\n def _sample_uniformly(self, num_samples, lwr_bound, upr_bound):\n \"\"\"\n Samples uniformly from a non-uniform space\n\n Parameters\n ----------\n num_samples : int\n Number of samples to generate\n lwr_bound : float\n Hyper-parameter lower bound\n upr_bound : float\n Hyper-parameter upper bound\n\n Returns\n -------\n param_vals : np.ndarray\n Uniformly sampled hyper-parameter values\n\n \"\"\"\n # To sample in a uniform fashion we need the base ten representation\n # of the upper and lower bounds and then we treat this as a region\n # to sample\n new_lwr_bound = log10(lwr_bound)\n new_upr_bound = log10(upr_bound)\n samples = self.rng.uniform(low=new_lwr_bound, high=new_upr_bound,\n size=(num_samples, 1))\n param_vals = np.power(10, samples)\n return param_vals\n\n def _build_options(self, num_samples=1000):\n \"\"\"\n Builds matrix which defines possible options for this iteration\n\n Parameters\n ----------\n num_samples : int, optional\n Number of hyper-parameter samples to generate\n\n Returns\n -------\n H_space : np.ndarray\n Matrix of options for the ESN hyper-parameters\n\n \"\"\"\n k_vals = self.rng.randint(low=self.lwr_k, high=self.upr_k,\n size=(num_samples, 1), dtype=np.int32)\n\n hidden_dim_vals = self.rng.randint(low=self.lwr_hidden_dim,\n high=self.upr_hidden_dim,\n size=(num_samples, 1),\n dtype=np.int32)\n\n spectral_radius_vals = self.rng.uniform(low=self.lwr_spectral_radius,\n high=self.upr_spectral_radius,\n size=(num_samples, 1))\n\n p_vals = self.rng.uniform(low=self.lwr_p, high=self.upr_p,\n size=(num_samples, 1))\n\n alpha_vals = self.rng.uniform(low=self.lwr_alpha, high=self.upr_alpha,\n size=(num_samples, 1))\n\n beta_vals = self._sample_uniformly(num_samples, self.lwr_beta,\n self.upr_beta)\n\n H_space = np.concatenate([k_vals, hidden_dim_vals,\n spectral_radius_vals, p_vals, alpha_vals,\n beta_vals], axis=1)\n return H_space\n\n def find_best_choices(self, num_samples=1000, num_choices=1):\n \"\"\"\n Finds the best hyper-parameter combination\n\n Parameters\n ----------\n num_samples : int, optional\n Number of hyper-parameter samples to generate\n num_choices : int, optional\n Number of choices to select\n\n Returns\n -------\n param_vals : dict\n Best hyper-parameter values for the current Gaussian process\n\n \"\"\"\n H_space = self._build_options(num_samples)\n\n # For the first MPI iteration because there is no prior, randomly\n # sample num_choices points\n if num_choices > 1:\n idx = self.rng.choice(np.arange(num_samples), size=num_choices,\n replace=False)\n best_vals = H_space[idx, :]\n else:\n y_pred = self.gpr.sample_y(H_space, random_state=self.rng)\n choices = np.argmin(y_pred)\n best_vals = H_space[choices, :]\n\n hyper_parameters = ['k', 'hidden_dim', 'spectral_radius', 'p', 'alpha',\n 'beta']\n\n param_vals = {}\n for (i, val) in enumerate(hyper_parameters):\n if num_choices == 1:\n param_vals[val] = best_vals[i]\n\n if (val == 'k') or (val == 'hidden_dim'):\n param_vals[val] = int(param_vals[val])\n else:\n param_vals[val] = best_vals[:, i]\n\n if (val == 'k') or (val == 'hidden_dim'):\n param_vals[val] = param_vals[val].astype(int)\n\n return param_vals\n\n def return_best_parameters(self):\n min_error = min(self.y)\n index = self.y.index(min_error)\n print(\"Minimum Validation Error = \", min_error)\n print(\"Best parameters found = \", self.H[index])\n return min_error, self.H[index]\n" ]
[ [ "numpy.power", "numpy.arange", "numpy.concatenate", "numpy.argmin", "sklearn.gaussian_process.kernels.Matern" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hjonnala/deeplab2
[ "1868757c4333ec5287cc0bf0a6bbf38fbbe34c2e", "1868757c4333ec5287cc0bf0a6bbf38fbbe34c2e", "1868757c4333ec5287cc0bf0a6bbf38fbbe34c2e", "1868757c4333ec5287cc0bf0a6bbf38fbbe34c2e", "1868757c4333ec5287cc0bf0a6bbf38fbbe34c2e" ]
[ "model/encoder/model_export_test.py", "utils/net_surgery_convert_last_layer.py", "model/test_utils.py", "tflite_conversion.py", "model/encoder/axial_resnet_instances_test.py" ]
[ "# coding=utf-8\n# Copyright 2022 The Deeplab2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests of model exports for axial_resnet_instances.\"\"\"\n\nimport os\n\nfrom absl import flags\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom deeplab2.model.encoder import axial_resnet_instances\n\nFLAGS = flags.FLAGS\n\n\nclass ModelExportTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.parameters(\n ('resnet50',),\n ('resnet50_beta',),\n ('max_deeplab_s_backbone',),\n ('max_deeplab_l_backbone',),\n ('axial_resnet_s',),\n ('axial_resnet_l',),\n ('axial_deeplab_s',),\n ('axial_deeplab_l',),\n ('swidernet',),\n ('axial_swidernet',),\n )\n def test_model_export(self, model_name):\n model = axial_resnet_instances.get_model(\n model_name,\n output_stride=16,\n backbone_layer_multiplier=1.0,\n bn_layer=tf.keras.layers.BatchNormalization,\n conv_kernel_weight_decay=0.0001,\n # Test with small models only.\n num_blocks=[2, 2, 2, 2],\n # Disable drop path as it is not compatible with model exporting.\n block_group_config={'drop_path_keep_prob': 1.0})\n model(tf.keras.Input([257, 257, 3], batch_size=1), training=False)\n export_dir = os.path.join(\n FLAGS.test_tmpdir, 'test_model_export', model_name)\n model.save(export_dir)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2022 The Deeplab2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility script to perform net surgery on a model.\n\nThis script will perform net surgery on DeepLab models trained on a source\ndataset and create a new checkpoint for the target dataset.\n\"\"\"\n\nfrom typing import Any, Dict, Text, Tuple\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom google.protobuf import text_format\nfrom deeplab2 import common\nfrom deeplab2 import config_pb2\nfrom deeplab2.data import dataset\nfrom deeplab2.model import deeplab\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('source_dataset', 'cityscapes',\n 'Dataset name on which the model has been pretrained. '\n 'Supported datasets: `cityscapes`.')\n\nflags.DEFINE_string('target_dataset', 'motchallenge_step',\n 'Dataset name for conversion. Supported datasets: '\n '`motchallenge_step`.')\n\nflags.DEFINE_string('input_config_path', None,\n 'Path to a config file that defines the DeepLab model and '\n 'the checkpoint path.')\n\nflags.DEFINE_string('output_checkpoint_path', None,\n 'Output filename for the generated checkpoint file.')\n\n\n_SUPPORTED_SOURCE_DATASETS = {'cityscapes'}\n_SUPPORTED_TARGET_DATASETS = {'motchallenge_step'}\n\n_CITYSCAPES_TO_MOTCHALLENGE_STEP = (\n 1, # sidewalk\n 2, # building\n 8, # vegetation\n 10, # sky\n 11, # pedestrian\n 12, # rider\n 18, # bicycle\n)\n\n_DATASET_TO_INFO = {\n 'cityscapes': dataset.CITYSCAPES_PANOPTIC_INFORMATION,\n 'motchallenge_step': dataset.MOTCHALLENGE_STEP_INFORMATION,\n}\n_INPUT_SIZE = (1025, 2049, 3)\n\n\ndef _load_model(\n config_path: Text,\n source_dataset: Text) -> Tuple[deeplab.DeepLab,\n config_pb2.ExperimentOptions]:\n \"\"\"Load DeepLab model based on config and dataset.\"\"\"\n options = config_pb2.ExperimentOptions()\n with tf.io.gfile.GFile(config_path) as f:\n text_format.Parse(f.read(), options)\n options.model_options.panoptic_deeplab.semantic_head.output_channels = (\n _DATASET_TO_INFO[source_dataset].num_classes)\n model = deeplab.DeepLab(options,\n _DATASET_TO_INFO[source_dataset])\n return model, options\n\n\ndef _convert_bias(input_tensor: np.ndarray,\n label_list: Tuple[int, ...]) -> np.ndarray:\n \"\"\"Converts 1D tensor bias w.r.t. label list.\n\n We select the subsets from the input_tensor based on the label_list.\n\n We assume input_tensor has shape = [num_classes], where\n input_tensor is the bias weights trained on source dataset, and num_classes\n is the number of classes in source dataset.\n\n Args:\n input_tensor: A numpy array with ndim == 1.\n label_list: A tuple of labels used for net surgery.\n\n Returns:\n A numpy array with values modified.\n\n Raises:\n ValueError: input_tensor's ndim != 1.\n \"\"\"\n if input_tensor.ndim != 1:\n raise ValueError('The bias tensor should have ndim == 1.')\n\n num_elements = len(label_list)\n output_tensor = np.zeros(num_elements, dtype=np.float32)\n for i, label in enumerate(label_list):\n output_tensor[i] = input_tensor[label]\n return output_tensor\n\n\ndef _convert_kernels(input_tensor: np.ndarray,\n label_list: Tuple[int, ...]) -> np.ndarray:\n \"\"\"Converts 4D tensor kernels w.r.t. label list.\n\n We select the subsets from the input_tensor based on the label_list.\n\n We assume input_tensor has shape = [h, w, input_dim, num_classes], where\n input_tensor is the kernel weights trained on source dataset, and num_classes\n is the number of classes in source dataset.\n\n Args:\n input_tensor: A numpy array with ndim == 4.\n label_list: A tuple of labels used for net surgery.\n\n Returns:\n A numpy array with values modified.\n\n Raises:\n ValueError: input_tensor's ndim != 4.\n \"\"\"\n if input_tensor.ndim != 4:\n raise ValueError('The kernels tensor should have ndim == 4.')\n\n num_elements = len(label_list)\n kernel_height, kernel_width, input_dim, _ = input_tensor.shape\n output_tensor = np.zeros(\n (kernel_height, kernel_width, input_dim, num_elements), dtype=np.float32)\n for i, label in enumerate(label_list):\n output_tensor[:, :, :, i] = input_tensor[:, :, :, label]\n return output_tensor\n\n\ndef _restore_checkpoint(restore_dict: Dict[Any, Any],\n options: config_pb2.ExperimentOptions\n ) -> tf.train.Checkpoint:\n \"\"\"Reads the provided dict items from the checkpoint specified in options.\n\n Args:\n restore_dict: A mapping of checkpoint item to location.\n options: A experiment configuration containing the checkpoint location.\n\n Returns:\n The loaded checkpoint.\n \"\"\"\n ckpt = tf.train.Checkpoint(**restore_dict)\n if tf.io.gfile.isdir(options.model_options.initial_checkpoint):\n path = tf.train.latest_checkpoint(\n options.model_options.initial_checkpoint)\n status = ckpt.restore(path)\n else:\n status = ckpt.restore(options.model_options.initial_checkpoint)\n status.expect_partial().assert_existing_objects_matched()\n return ckpt\n\n\ndef main(_) -> None:\n if FLAGS.source_dataset not in _SUPPORTED_SOURCE_DATASETS:\n raise ValueError('Source dataset is not supported. Use --help to get list '\n 'of supported datasets.')\n if FLAGS.target_dataset not in _SUPPORTED_TARGET_DATASETS:\n raise ValueError('Target dataset is not supported. Use --help to get list '\n 'of supported datasets.')\n\n logging.info('Loading DeepLab model from config %s', FLAGS.input_config_path)\n source_model, options = _load_model(FLAGS.input_config_path,\n FLAGS.source_dataset)\n logging.info('Load pretrained checkpoint.')\n _restore_checkpoint(source_model.checkpoint_items, options)\n source_model(tf.keras.Input(_INPUT_SIZE), training=False)\n\n logging.info('Perform net surgery.')\n semantic_weights = (\n source_model._decoder._semantic_head.final_conv.get_weights()) # pylint: disable=protected-access\n\n if (FLAGS.source_dataset == 'cityscapes' and\n FLAGS.target_dataset == 'motchallenge_step'):\n # Kernels.\n semantic_weights[0] = _convert_kernels(semantic_weights[0],\n _CITYSCAPES_TO_MOTCHALLENGE_STEP)\n # Bias.\n semantic_weights[1] = _convert_bias(semantic_weights[1],\n _CITYSCAPES_TO_MOTCHALLENGE_STEP)\n\n logging.info('Load target model without last semantic layer.')\n target_model, _ = _load_model(FLAGS.input_config_path, FLAGS.target_dataset)\n restore_dict = target_model.checkpoint_items\n del restore_dict[common.CKPT_SEMANTIC_LAST_LAYER]\n\n ckpt = _restore_checkpoint(restore_dict, options)\n target_model(tf.keras.Input(_INPUT_SIZE), training=False)\n target_model._decoder._semantic_head.final_conv.set_weights(semantic_weights) # pylint: disable=protected-access\n\n logging.info('Save checkpoint to output path: %s',\n FLAGS.output_checkpoint_path)\n ckpt = tf.train.Checkpoint(**target_model.checkpoint_items)\n ckpt.save(FLAGS.output_checkpoint_path)\n\n\nif __name__ == '__main__':\n flags.mark_flags_as_required(\n ['input_config_path', 'output_checkpoint_path'])\n app.run(main)\n", "# coding=utf-8\n# Copyright 2022 The Deeplab2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This file contains utility functions for the model tests.\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\n\ndef create_test_input(batch, height, width, channels):\n \"\"\"Creates test input tensor.\"\"\"\n input_tensor = np.tile(\n np.reshape(\n np.reshape(np.arange(height), [height, 1]) +\n np.reshape(np.arange(width), [1, width]),\n [1, height, width, 1]),\n [batch, 1, 1, channels])\n # Normalize the input tensor so that the outputs are not too large.\n input_tensor = (input_tensor * 2 / np.max(input_tensor)) - 1\n return tf.cast(input_tensor, tf.float32)\n", "# -*- coding: utf-8 -*-\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nprint(tf.__version__)\n\n\nLOADED_MODEL = tf.keras.models.load_model('/usr/local/google/home/deeplab_edge/mobilenet_models')\nMODEL_NAME = 'relu_mnv3small_full'\nmodel_out_path = f'/usr/local/google/home/deeplab_edge/tflite_models/{MODEL_NAME}.tflite'\nrepresentative_data = '/usr/local/google/home/deeplab_edge/representative_data/img_size_256'\nIMAGE_SIZE = 256\nrepresentative_images = os.listdir(representative_data)\n# A generator that provides a representative dataset\ndef representative_data_gen():\n for i in range(len(representative_images)):\n image = os.path.join(representative_data, next(iter(representative_images)))\n image = tf.io.read_file(image)\n image = tf.io.decode_jpeg(image, channels=3)\n image = tf.image.resize(image, [IMAGE_SIZE, IMAGE_SIZE])\n image = tf.cast(image / 255., tf.float32)\n image = tf.expand_dims(image, 0)\n yield [image]\n\n# def representative_data_gen():\n# for i in range(100):\n# data = np.random.rand(1, 224, 224, 3)\n# yield [data.astype(np.float32)]\n\nconverter = tf.lite.TFLiteConverter.from_keras_model(LOADED_MODEL._model)\n\n# This enables quantization\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\n# This sets the representative dataset for quantization\nconverter.representative_dataset = representative_data_gen\n# This ensures that if any ops can't be quantized, the converter throws an error\nconverter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n# For full integer quantization, though supported types defaults to int8 only, we explicitly declare it for clarity.\nconverter.target_spec.supported_types = [tf.int8]\n# These set the input and output tensors to uint8 (added in r2.3)\nconverter.inference_input_type = tf.uint8\nconverter.inference_output_type = tf.uint8\n\n# converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]\n# converter._experimental_lower_tensor_list_ops = False\ntflite_model = converter.convert()\n\nwith open(model_out_path, 'wb') as f:\n f.write(tflite_model)\n\n\n# Load the TFLite model and allocate tensors.\ninterpreter = tf.lite.Interpreter(model_path=model_out_path)\ninterpreter.allocate_tensors()\n\n# Get input and output tensors.\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\n\n# Test the model on random input data.\ninput_shape = input_details[0]['shape']\nprint(input_shape)\ninput_data = np.array(np.random.random_sample(input_shape), dtype= input_details[0]['dtype'])\n# arr4d = np.expand_dims(im,0)\n# input_data = np.array(arr4d, dtype=np.float32)\ninterpreter.set_tensor(input_details[0]['index'], input_data)\n\ninterpreter.invoke()\n\n# The function `get_tensor()` returns a copy of the tensor data.\n# Use `tensor()` in order to get a pointer to the tensor.\noutput_data = interpreter.get_tensor(output_details[0]['index'])\nprint(output_data[0])\n\n", "# coding=utf-8\n# Copyright 2022 The Deeplab2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for axial_resnet_instances.\"\"\"\n\nimport os\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom deeplab2.model.encoder import axial_resnet_instances\n\n\nclass AxialResnetInstancesTest(tf.test.TestCase, parameterized.TestCase):\n\n # The parameter count does not include the classification head.\n @parameterized.parameters(\n ('resnet50', 1, 23508032),\n ('resnet50_beta', 1, 23631808), # 123776 more than resnet50\n ('max_deeplab_s_backbone', 1, 41343424),\n ('max_deeplab_l_backbone', 1, 175115392),\n ('axial_resnet_s', 1, 11466912),\n ('axial_resnet_l', 1, 43714048), # 127872 fewer than axial_deeplab_l\n ('axial_deeplab_s', 1, 11565856),\n ('axial_deeplab_l', 1, 43841920),\n ('swidernet', 1, 109014080), # SWideRNet-(1,1,1) without SE or SAC\n ('swidernet', 3, 333245504), # Should be more than 3 x 109014080\n ('swidernet', 4.5, 487453760), # Rounded down to [13, 27, 13, 13]\n ('axial_swidernet', 1, 136399392),\n ('axial_swidernet', 3, 393935520),\n ('axial_swidernet', 4.5, 570346912),\n )\n def test_model_output_shape_and_num_params(\n self, model_name, backbone_layer_multiplier, expected_num_params):\n model = axial_resnet_instances.get_model(\n model_name,\n backbone_layer_multiplier=backbone_layer_multiplier,\n bn_layer=tf.keras.layers.BatchNormalization,\n conv_kernel_weight_decay=0.0001)\n output = model(tf.keras.Input(shape=(224, 224, 3)))\n if model_name in ('axial_resnet_s', 'axial_deeplab_s'):\n self.assertListEqual(output['res5'].get_shape().as_list(),\n [None, 14, 14, 1024])\n else:\n self.assertListEqual(output['res5'].get_shape().as_list(),\n [None, 14, 14, 2048])\n num_params = np.sum(\n [np.prod(v.get_shape().as_list()) for v in model.trainable_weights])\n self.assertEqual(num_params, expected_num_params)\n\n def test_resnet50_variable_checkpoint_names(self):\n model = axial_resnet_instances.get_model(\n 'resnet50',\n bn_layer=tf.keras.layers.BatchNormalization,\n conv_kernel_weight_decay=0.0001)\n model(tf.keras.Input(shape=(224, 224, 3)))\n variable_names = [w.name for w in model.trainable_weights]\n test_variable_name = 'resnet50/stage4/block6/conv3_bn/batch_norm/beta:0'\n self.assertIn(test_variable_name, variable_names)\n temp_dir = self.create_tempdir()\n temp_path = os.path.join(temp_dir, 'ckpt')\n checkpoint = tf.train.Checkpoint(encoder=model)\n checkpoint.save(temp_path)\n latest_checkpoint = tf.train.latest_checkpoint(temp_dir)\n reader = tf.train.load_checkpoint(latest_checkpoint)\n checkpoint_names = reader.get_variable_to_shape_map().keys()\n test_checkpoint_name = 'encoder/_stage4/_block6/_conv3_bn/_batch_norm/gamma/.ATTRIBUTES/VARIABLE_VALUE'\n self.assertIn(test_checkpoint_name, checkpoint_names)\n\n def test_max_deeplab_s_output_shape_and_num_params(self):\n model = axial_resnet_instances.get_model(\n 'max_deeplab_s',\n bn_layer=tf.keras.layers.BatchNormalization,\n conv_kernel_weight_decay=0.0001)\n endpoints = model(tf.keras.Input(shape=(65, 65, 3)))\n self.assertListEqual(endpoints['backbone_output'].get_shape().as_list(),\n [None, 5, 5, 2048])\n self.assertListEqual(\n endpoints['transformer_class_feature'].get_shape().as_list(),\n [None, 128, 256])\n self.assertListEqual(\n endpoints['transformer_mask_feature'].get_shape().as_list(),\n [None, 128, 256])\n self.assertListEqual(endpoints['feature_panoptic'].get_shape().as_list(),\n [None, 17, 17, 256])\n self.assertListEqual(endpoints['feature_semantic'].get_shape().as_list(),\n [None, 5, 5, 2048])\n num_params = np.sum(\n [np.prod(v.get_shape().as_list()) for v in model.trainable_weights])\n self.assertEqual(num_params, 61726624)\n\n def test_max_deeplab_l_output_shape_and_num_params(self):\n model = axial_resnet_instances.get_model(\n 'max_deeplab_l',\n bn_layer=tf.keras.layers.BatchNormalization,\n conv_kernel_weight_decay=0.0001)\n endpoints = model(tf.keras.Input(shape=(65, 65, 3)))\n self.assertListEqual(endpoints['backbone_output'].get_shape().as_list(),\n [None, 5, 5, 2048])\n self.assertListEqual(\n endpoints['transformer_class_feature'].get_shape().as_list(),\n [None, 128, 512])\n self.assertListEqual(\n endpoints['transformer_mask_feature'].get_shape().as_list(),\n [None, 128, 512])\n self.assertListEqual(endpoints['feature_panoptic'].get_shape().as_list(),\n [None, 17, 17, 256])\n self.assertListEqual(endpoints['feature_semantic'].get_shape().as_list(),\n [None, 17, 17, 256])\n num_params = np.sum(\n [np.prod(v.get_shape().as_list()) for v in model.trainable_weights])\n self.assertEqual(num_params, 450523232)\n\n def test_global_attention_absolute_positional_encoding_names(self):\n model = axial_resnet_instances.get_model(\n 'max_deeplab_s_backbone',\n block_group_config={'use_global_beyond_stride': 16,\n 'positional_encoding_type': '1D',\n 'axial_layer_config': {\n 'use_query_rpe_similarity': False,\n 'use_key_rpe_similarity': False,\n 'retrieve_value_rpe': False}},\n bn_layer=tf.keras.layers.BatchNormalization,\n conv_kernel_weight_decay=0.0001)\n model(tf.keras.Input(shape=(224, 224, 3)))\n variable_names = [w.name for w in model.trainable_weights]\n test_variable_name1 = 'max_deeplab_s_backbone/stage4/add_absolute_positional_encoding/height_axis_embeddings:0'\n test_variable_name2 = 'max_deeplab_s_backbone/stage4/block2/attention/global/qkv_kernel:0'\n self.assertIn(test_variable_name1, variable_names)\n self.assertIn(test_variable_name2, variable_names)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.test.main", "tensorflow.keras.Input" ], [ "tensorflow.io.gfile.isdir", "tensorflow.train.latest_checkpoint", "tensorflow.keras.Input", "tensorflow.train.Checkpoint", "tensorflow.io.gfile.GFile", "numpy.zeros" ], [ "numpy.arange", "numpy.max", "tensorflow.cast" ], [ "tensorflow.keras.models.load_model", "tensorflow.lite.TFLiteConverter.from_keras_model", "tensorflow.io.decode_jpeg", "tensorflow.lite.Interpreter", "tensorflow.cast", "numpy.random.random_sample", "tensorflow.expand_dims", "tensorflow.image.resize", "tensorflow.io.read_file" ], [ "tensorflow.train.latest_checkpoint", "tensorflow.keras.Input", "tensorflow.train.Checkpoint", "tensorflow.test.main", "tensorflow.train.load_checkpoint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
kagemeka/atcoder-submissions
[ "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e" ]
[ "jp.atcoder/abc012/abc012_4/21865313.py", "jp.atcoder/abc081/arc086_b/17664033.py", "jp.atcoder/abc025/abc025_b/8907335.py", "jp.atcoder/abc214/abc214_f/25543460.py", "jp.atcoder/abc013/abc013_3/8806609.py", "jp.atcoder/abc214/abc214_f/26740844.py", "jp.atcoder/abc009/abc009_4/17183548.py", "jp.atcoder/abc222/abc222_e/26462221.py", "jp.atcoder/abc031/abc031_b/8925627.py", "jp.atcoder/abc106/abc106_d/9341820.py", "jp.atcoder/abc185/abc185_f/26724747.py", "jp.atcoder/abc014/abc014_1/16950154.py", "jp.atcoder/abc185/abc185_f/26092335.py", "jp.atcoder/ddcc2020-qual/ddcc2020_qual_d/26718152.py", "jp.atcoder/abc068/arc079_b/17606994.py", "jp.atcoder/abc129/abc129_d/10071689.py", "jp.atcoder/abc023/abc023_c/8900747.py", "jp.atcoder/abc218/abc218_e/25792925.py", "jp.atcoder/abc074/arc083_b/9121988.py", "jp.atcoder/abc108/arc102_b/17830175.py", "jp.atcoder/abc213/abc213_f/25169168.py", "jp.atcoder/abc225/abc225_c/26907564.py", "jp.atcoder/abc147/abc147_c/12030898.py", "jp.atcoder/arc120/arc120_b/25886003.py", "jp.atcoder/typical90/typical90_af/26200043.py", "jp.atcoder/abc212/abc212_g/24782346.py", "jp.atcoder/abc019/abc019_3/26071240.py", "jp.atcoder/abc068/arc079_b/17607017.py", "jp.atcoder/abc147/abc147_d/8887410.py", "jp.atcoder/agc023/agc023_a/8518916.py", "jp.atcoder/abc212/abc212_g/24791842.py", "jp.atcoder/abc009/abc009_4/16914312.py", "jp.atcoder/abc147/abc147_c/12031076.py", "jp.atcoder/arc044/arc044_b/26740286.py", "jp.atcoder/abc213/abc213_f/25169341.py", "jp.atcoder/abc010/abc010_4/26227328.py", "jp.atcoder/acl1/acl1_b/26710287.py", "jp.atcoder/abc127/abc127_c/11991646.py", "jp.atcoder/abc147/abc147_d/8884240.py", "jp.atcoder/abc213/abc213_b/24854487.py", "jp.atcoder/abc028/abc028_d/17206564.py", "jp.atcoder/abc074/arc083_b/9122105.py", "jp.atcoder/abc213/abc213_c/24860813.py", "jp.atcoder/abc035/abc035_c/11763183.py", "jp.atcoder/abc018/abc018_1/26070117.py" ]
[ "from __future__ import annotations\n\nfrom typing import Generator, NoReturn\n\n\nclass StdReader:\n def __init__(\n self,\n ) -> NoReturn:\n import sys\n\n self.buf = sys.stdin.buffer\n self.lines = self.async_readlines()\n self.chunks: Generator\n\n def async_readlines(\n self,\n ) -> Generator:\n while True:\n gen = self.line_chunks()\n yield gen\n\n def line_chunks(\n self,\n ) -> Generator:\n ln = self.buf.readline()\n for chunk in ln.split():\n yield chunk\n\n def __call__(\n self,\n ) -> bytes:\n try:\n chunk = next(self.chunks)\n except:\n self.chunks = next(\n self.lines,\n )\n chunk = self()\n return chunk\n\n def str(\n self,\n ) -> str:\n b = self()\n return b.decode()\n\n def int(\n self,\n ) -> int:\n return int(self.str())\n\n\nfrom abc import ABC, abstractmethod\n\n\nclass Solver(ABC):\n def __init__(self):\n self.reader = StdReader()\n\n def __call__(\n self,\n ):\n self.prepare()\n self.solve()\n\n @abstractmethod\n def prepare(self):\n ...\n\n @abstractmethod\n def solve(self):\n ...\n\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse.csgraph import floyd_warshall\n\n\nclass Problem(\n Solver,\n):\n def prepare(self):\n reader = self.reader\n n = reader.int()\n m = reader.int()\n a = [reader.int() for _ in range(3 * m)]\n a = np.array(\n a,\n ).reshape(m, 3)\n a, b, t = a.T\n self.n, self.m = n, m\n self.a = a - 1\n self.b = b - 1\n self.t = t\n\n def solve(self):\n self.compute_dist_mat()\n dist = self.dist\n d = dist.max(axis=1).min()\n print(int(d))\n\n def compute_dist_mat(\n self,\n ):\n n = self.n\n a = self.a\n b = self.b\n t = self.t\n g = csr_matrix(\n (t, (a, b)),\n shape=(n, n),\n )\n dist = floyd_warshall(\n csgraph=g,\n directed=False,\n )\n self.dist = dist\n\n\ndef main():\n t = 1\n # t = StdReader().int()\n for _ in range(t):\n Problem()()\n\n\nif __name__ == \"__main__\":\n main()\n", "import itertools\r\nimport math\r\nimport string\r\nimport sys\r\nfrom bisect import bisect_left as bi_l\r\nfrom bisect import bisect_right as bi_r\r\nfrom collections import Counter, defaultdict, deque\r\nfrom functools import lru_cache, reduce\r\nfrom heapq import heapify, heappop, heappush\r\nfrom operator import or_, xor\r\n\r\nsys.setrecursionlimit(10**7)\r\ninf = float(\"inf\")\r\nMOD = 10**9 + 7\r\n# MOD = 998244353\r\n\r\n\r\nusing_numpy = 1\r\nimport networkx as nx\r\nimport numpy as np\r\nfrom numba import i8, njit\r\nfrom scipy import optimize\r\nfrom scipy.ndimage import distance_transform_cdt\r\nfrom scipy.sparse import csr_matrix\r\nfrom scipy.sparse.csgraph import (\r\n connected_components,\r\n csgraph_to_dense,\r\n maximum_flow,\r\n minimum_spanning_tree,\r\n shortest_path,\r\n)\r\nfrom scipy.spatial import ConvexHull\r\nfrom scipy.special import comb\r\n\r\n\r\nclass Algebra:\r\n class Modular(int):\r\n def __init__(self, n, mod=MOD):\r\n self.value = n\r\n self.mod = mod\r\n\r\n def __str__(self):\r\n return f\"{self.value}\"\r\n\r\n def __add__(self, other):\r\n return self.__class__((self.value + other.value) % self.mod)\r\n\r\n def __sub__(self, x):\r\n return self.__class__((self.value - x.value) % self.mod)\r\n\r\n def __mul__(self, x):\r\n return self.__class__((self.value * x.value) % self.mod)\r\n\r\n def __pow__(self, x):\r\n return self.__class__(pow(self.value, x.value, self.mod))\r\n\r\n def __lt__(self, x):\r\n return self.value < x.value\r\n\r\n def __le__(self, x):\r\n return self.value <= x.value\r\n\r\n def __eq__(self, x):\r\n return self.value == x.value\r\n\r\n def __ne__(self, x):\r\n return self.value != x.value\r\n\r\n def __gt__(self, x):\r\n return self.value > x.value\r\n\r\n def __ge__(self, x):\r\n return self.value >= x.value\r\n\r\n class SemiGroup:\r\n pass\r\n\r\n class Monoid:\r\n pass\r\n\r\n class Group:\r\n pass\r\n\r\n class SemiRing:\r\n pass\r\n\r\n class Ring:\r\n pass\r\n\r\n @staticmethod\r\n def identity(n):\r\n if using_numpy:\r\n return np.identity(n, dtype=np.int64)\r\n else:\r\n a = [[0] * n for _ in range(n)]\r\n for i in range(n):\r\n a[i][i] = 1\r\n return a\r\n\r\n @staticmethod\r\n def dot(a, b):\r\n if using_numpy:\r\n return np.dot(a, b)\r\n else:\r\n h, w, l = len(a), len(b[0]), len(b)\r\n assert len(a[0]) == l\r\n c = [[0] * w for _ in range(h)]\r\n for i in range(h):\r\n for j in range(w):\r\n for k in range(l):\r\n c[i][j] += a[i][k] * b[k][j]\r\n return c\r\n\r\n @classmethod\r\n def matrix_pow(cls, a, n, mod=10**9 + 7):\r\n m = len(a)\r\n b = cls.identity(m)\r\n while n:\r\n if n & 1:\r\n b = cls.dot(b, a)\r\n n >>= 1\r\n a = cls.dot(a, a)\r\n if using_numpy:\r\n a %= mod\r\n b %= mod\r\n else:\r\n for i in range(m):\r\n for j in range(m):\r\n a[i][j] %= mod\r\n b[i][j] %= mod\r\n return b\r\n\r\n @staticmethod\r\n def bitwise_dot(a, b):\r\n if using_numpy:\r\n return np.bitwise_xor.reduce(\r\n a[:, None, :] & b.T[None, :, :], axis=-1\r\n )\r\n else:\r\n h, w, l = len(a), len(b[0]), len(b)\r\n assert len(a[0]) == l\r\n c = [[0] * w for _ in range(h)]\r\n for i in range(h):\r\n for j in range(w):\r\n for k in range(l):\r\n c[i][j] ^= a[i][k] & b[k][j]\r\n return c\r\n\r\n @classmethod\r\n def bitwise_mat_pow(cls, a, n):\r\n if n == 0:\r\n return np.eye(len(a), dtype=np.uint32) * ((1 << 32) - 1)\r\n res = cls.bitwise_mat_pow(a, n // 2)\r\n res = cls.bitwise_dot(res, res)\r\n return cls.bitwise_dot(res, a) if n & 1 else res\r\n\r\n @staticmethod\r\n def cumprod(a, mod):\r\n l = len(a)\r\n sql = int(np.sqrt(l) + 1)\r\n a = np.resize(a, sql**2).reshape(sql, sql)\r\n for i in range(sql - 1):\r\n a[:, i + 1] *= a[:, i]\r\n a[:, i + 1] %= mod\r\n for i in range(sql - 1):\r\n a[i + 1] *= a[i, -1]\r\n a[i + 1] %= mod\r\n return np.ravel(a)[:l]\r\n\r\n @classmethod\r\n def generate_fac_ifac(cls, n, p=MOD):\r\n if using_numpy:\r\n fac = np.arange(n + 1)\r\n fac[0] = 1\r\n fac = cls.cumprod(fac, p)\r\n ifac = np.arange(n + 1, 0, -1)\r\n ifac[0] = pow(int(fac[-1]), p - 2, p)\r\n ifac = cls.cumprod(ifac, p)[n::-1]\r\n else:\r\n fac = [None] * (n + 1)\r\n fac[0] = 1\r\n for i in range(n):\r\n fac[i + 1] = fac[i] * (i + 1) % p\r\n ifac = [None] * (n + 1)\r\n ifac[n] = pow(fac[n], p - 2, p)\r\n for i in range(n, 0, -1):\r\n ifac[i - 1] = ifac[i] * i % p\r\n return fac, ifac\r\n\r\n class Kitamasa:\r\n pass\r\n\r\n\r\nmint = Algebra.Modular\r\n\r\n\r\nclass NumberTheory:\r\n class PrimeNumbers: # pn\r\n def __init__(self, n=2 * 10**6):\r\n self.is_prime, self.prime_nums = self.find(n)\r\n\r\n def __call__(self, n):\r\n return self.is_prime[n]\r\n\r\n def __iter__(self):\r\n return iter(self.prime_nums)\r\n\r\n def __getitem__(self, key):\r\n return self.prime_nums[key]\r\n\r\n @staticmethod\r\n def find(n): # Sieve of eratosthenes\r\n if using_numpy:\r\n is_prime = np.ones(n + 1, dtype=np.bool)\r\n is_prime[:2] = 0\r\n for i in range(2, int(n**0.5) + 1):\r\n if is_prime[i]:\r\n is_prime[i * 2 :: i] = 0\r\n prime_nums = np.flatnonzero(is_prime)\r\n else:\r\n is_prime = [True] * (n + 1)\r\n is_prime[0] = is_prime[1] = 0\r\n for i in range(2, int(n**0.5) + 1):\r\n if not is_prime[i]:\r\n continue\r\n for j in range(i * 2, n + 1, i):\r\n is_prime[j] = 0\r\n prime_nums = [i for i in range(2, n + 1) if is_prime[i]]\r\n return is_prime, prime_nums\r\n\r\n @lru_cache(maxsize=None)\r\n def factorize(self, n):\r\n res = defaultdict(int)\r\n if n < 2:\r\n return res\r\n for p in self:\r\n if p * p > n:\r\n break\r\n while n % p == 0:\r\n res[p] += 1\r\n n //= p\r\n if n == 1:\r\n return res\r\n res[n] = 1\r\n return res\r\n\r\n def factorize_factorial(self, n):\r\n res = defaultdict(int)\r\n for i in range(2, n + 1):\r\n for p, c in self.factorize(i).items():\r\n res[p] += c\r\n return res\r\n\r\n @classmethod\r\n @lru_cache(maxsize=None)\r\n def gcd(cls, a, b):\r\n return cls.gcd(b, a % b) if b else abs(a)\r\n\r\n @classmethod\r\n def lcm(cls, a, b):\r\n return abs(a // cls.gcd(a, b) * b)\r\n\r\n @staticmethod\r\n def find_divisors(n):\r\n divisors = []\r\n for i in range(1, int(n**0.5) + 1):\r\n if n % i:\r\n continue\r\n divisors.append(i)\r\n j = n // i\r\n if j != i:\r\n divisors.append(j)\r\n return sorted(divisors)\r\n\r\n @staticmethod\r\n def base_convert(n, b):\r\n if not n:\r\n return [0]\r\n res = []\r\n while n:\r\n n, r = divmod(n, b)\r\n if r < 0:\r\n n += 1\r\n r -= b\r\n res.append(r)\r\n return res\r\n\r\n\r\nclass Combinatorics:\r\n @classmethod\r\n @lru_cache(maxsize=None)\r\n def choose(cls, n, r, mod=None):\r\n if r > n or r < 0:\r\n return 0\r\n if r == 0:\r\n return 1\r\n res = cls.choose(n - 1, r, mod) + cls.choose(n - 1, r - 1, mod)\r\n if mod:\r\n res %= mod\r\n return res\r\n\r\n class CombinationsMod:\r\n def __init__(self, n=2 * 10**6, mod=MOD):\r\n self.__mod = mod\r\n self.fac, self.ifac = Algebra.generate_fac_ifac(n, mod)\r\n\r\n def __call__(self, n, r):\r\n return self.__choose(n, r)\r\n\r\n def __choose(self, n, r):\r\n bl = (0 <= r) & (r <= n)\r\n p = self.__mod\r\n return bl * self.fac[n] * self.ifac[r] % p * self.ifac[n - r] % p\r\n\r\n def make_nchoose_table(self, n):\r\n p = self.__mod\r\n r = len(self.__fac) - 1\r\n if using_numpy:\r\n n_choose = np.arange(n + 1, n - r, -1)\r\n n_choose[0] = 1\r\n n_choose = Algebra.cumprod(n_choose, p) * self.ifac % p\r\n else:\r\n n_choose = [None] * (r + 1)\r\n n_choose[0] = 1\r\n for i in range(r):\r\n n_choose[i + 1] = n_choose[i] * (n - i) % p\r\n for i in range(1, r + 1):\r\n n_choose[i] = n_choose[i] * self.ifac[i] % p\r\n return n_choose\r\n\r\n @classmethod\r\n def permutations(cls, a, r=None, i=0):\r\n a = list(a)\r\n n = len(a)\r\n if r is None:\r\n r = n\r\n res = []\r\n if r > n or i > r:\r\n return res\r\n if i == r:\r\n return [tuple(a[:r])]\r\n for j in range(i, n):\r\n a[i], a[j] = a[j], a[i]\r\n res += cls.permutations(a, r, i + 1)\r\n return res\r\n\r\n @staticmethod\r\n def combinations(a, r):\r\n a = tuple(a)\r\n n = len(a)\r\n if r > n:\r\n return\r\n indices = list(range(r))\r\n yield a[:r]\r\n while True:\r\n for i in range(r - 1, -1, -1):\r\n if indices[i] != i + n - r:\r\n break\r\n else:\r\n return\r\n indices[i] += 1\r\n for j in range(i + 1, r):\r\n indices[j] = indices[j - 1] + 1\r\n yield tuple(a[i] for i in indices)\r\n\r\n\r\nclass DP:\r\n @staticmethod\r\n def LIS(a):\r\n res = [inf] * len(a)\r\n for x in a:\r\n res[bi_l(res, x)] = x\r\n return res\r\n\r\n\r\nclass String:\r\n @staticmethod\r\n def z_algorithm(s):\r\n n = len(s)\r\n a = [0] * n\r\n a[0] = n\r\n l = r = -1\r\n for i in range(1, n):\r\n if r >= i:\r\n a[i] = min(a[i - l], r - i)\r\n while i + a[i] < n and s[i + a[i]] == s[a[i]]:\r\n a[i] += 1\r\n if i + a[i] >= r:\r\n l, r = i, i + a[i]\r\n return a\r\n\r\n\r\nclass GeometryTopology:\r\n class Graph:\r\n class __Edge:\r\n def __init__(self, weight=1, capacity=1, **args):\r\n self.weight = weight\r\n self.capacity = capacity\r\n\r\n def __str__(self):\r\n return f\"weight: {self.weight}, cap: {self.capacity}\"\r\n\r\n class __Node:\r\n def __init__(self, **args):\r\n pass\r\n\r\n def __init__(self, n=0):\r\n self.__N = n\r\n self.nodes = [None] * n\r\n self.edges = [{} for _ in range(n)]\r\n\r\n def add_node_info(self, v, **args):\r\n self.nodes[v] = self.__Node(**args)\r\n\r\n def add_edge(self, u, v, update=False, **args):\r\n if not update and v in self.edges[u]:\r\n return\r\n self.edges[u][v] = self.__Edge(**args)\r\n\r\n def get_size(self):\r\n return self.__N\r\n\r\n def bfs(self, src=0):\r\n n = self.__N\r\n self.depth = self.lv = lv = [None] * n\r\n lv[src] = 0 # depth in tree, or level in general graph.\r\n self.dist = dist = [inf] * n\r\n dist[src] = 0 # dist for only tree.\r\n self.parent = par = [None] * n\r\n par[src] = src\r\n q = deque([src])\r\n while q:\r\n u = q.popleft()\r\n for v, e in self.edges[u].items():\r\n if e.capacity == 0 or lv[v] is not None:\r\n continue\r\n lv[v], dist[v], par[v] = lv[u] + 1, dist[u] + e.weight, u\r\n q.append(v)\r\n return dist\r\n\r\n def dinic(self, src, sink):\r\n def flow_to_sink(u, flow_in):\r\n if u == sink:\r\n return flow_in\r\n flow = 0\r\n for v, e in self.edges[u].items():\r\n if e.capacity == 0 or self.lv[v] <= self.lv[u]:\r\n continue\r\n f = flow_to_sink(v, min(flow_in, e.capacity))\r\n if not f:\r\n continue\r\n self.edges[u][v].capacity -= f\r\n if u in self.edges[v]:\r\n self.edges[v][u].capacity += f\r\n else:\r\n self.add_edge(v, u, capacity=f)\r\n flow_in -= f\r\n flow += f\r\n return flow\r\n\r\n flow = 0\r\n while True:\r\n self.bfs(src)\r\n if self.lv[sink] is None:\r\n return flow\r\n flow += flow_to_sink(src, inf)\r\n\r\n def ford_fulkerson(self):\r\n pass\r\n\r\n def push_relabel(self):\r\n pass\r\n\r\n def floyd_warshall(self):\r\n n = self.__N\r\n d = [[inf] * n for _ in range(n)]\r\n for u in range(n):\r\n d[u][u] = 0\r\n for v, e in self.edges[u].items():\r\n d[u][v] = e.weight\r\n for w in range(n):\r\n for u in range(n):\r\n for v in range(n):\r\n d[u][v] = min(d[u][v], d[u][w] + d[w][v])\r\n return d\r\n\r\n def dijkstra(self, src, paths_cnt=False, mod=None):\r\n dist = [inf] * self.__N\r\n dist[src] = 0\r\n visited = [False] * self.__N\r\n paths = [0] * self.__N\r\n paths[src] = 1\r\n q = [(0, src)]\r\n while q:\r\n d, u = heappop(q)\r\n if visited[u]:\r\n continue\r\n visited[u] = True\r\n for v, e in self.edges[u].items():\r\n dv = d + e.weight\r\n if dv > dist[v]:\r\n continue\r\n elif dv == dist[v]:\r\n paths[v] += paths[u]\r\n if mod:\r\n paths[v] %= mod\r\n continue\r\n paths[v], dist[v] = paths[u], dv\r\n heappush(q, (dv, v))\r\n if paths_cnt:\r\n return dist, paths\r\n else:\r\n return dist\r\n\r\n def astar(self, src, tgt, heuristic_func):\r\n cost = [inf] * self.__N\r\n q = [(heuristic_func(src, tgt), 0, src)]\r\n while q:\r\n _, c, u = heappop(q)\r\n if u == tgt:\r\n return c\r\n if cost[u] != inf:\r\n continue\r\n cost[u] = c\r\n for v, e in self.edges[u].items():\r\n if cost[v] != inf:\r\n continue\r\n h = heuristic_func(v, tgt)\r\n nc = c + e.weight\r\n heappush(q, (h + nc, nc, v))\r\n return inf\r\n\r\n def bellman_ford(self, src):\r\n n = self.__N\r\n d = [inf] * n\r\n d[src] = 0\r\n for _ in range(n - 1):\r\n for u in range(n):\r\n for v, e in self.edges[u].items():\r\n d[v] = min(d[v], d[u] + e.weight)\r\n for u in range(n):\r\n for v, e in self.edges[u].items():\r\n if d[u] + e.weight < d[v]:\r\n raise Exception(\"found negative cycle.\")\r\n return d\r\n\r\n def bfs01(self, src=0):\r\n d = [inf] * self.__N\r\n d[src] = 0\r\n q = deque([src])\r\n while q:\r\n u = q.popleft()\r\n for v, e in self.edges[u].items():\r\n dv = d[u] + e.weight\r\n if d[v] <= dv:\r\n continue\r\n d[v] = dv\r\n if e.weight:\r\n q.append(v)\r\n else:\r\n q.appendleft(v)\r\n return d\r\n\r\n def find_ancestors(self): # tree doubling.\r\n self.__ancestors = ancestors = [self.parent]\r\n for _ in range(max(self.depth).bit_length()):\r\n ancestors.append([ancestors[-1][u] for u in ancestors[-1]])\r\n\r\n def find_dist(self, u, v):\r\n return (\r\n self.dist[u]\r\n + self.dist[v]\r\n - 2 * self.dist[self.__find_lca(u, v)]\r\n )\r\n\r\n def __find_lca(self, u, v):\r\n du, dv = self.depth[u], self.depth[v]\r\n if du > dv:\r\n u, v = v, u\r\n du, dv = dv, du\r\n\r\n d = dv - du\r\n for i in range(d.bit_length()): # up-stream\r\n if d >> i & 1:\r\n v = self.__ancestors[i][v]\r\n if v == u:\r\n return v\r\n\r\n for i in range(\r\n du.bit_length() - 1, -1, -1\r\n ): # find direct child of LCA.\r\n nu, nv = self.__ancestors[i][u], self.__ancestors[i][v]\r\n if nu == nv:\r\n continue\r\n u, v = nu, nv\r\n\r\n return self.__ancestors[0][u]\r\n\r\n def init_dsu(self): # disjoint set union (union-find)\r\n n = self.__N\r\n self.parent = list(range(n))\r\n self.rank = [0] * n\r\n self.size = [1] * n\r\n\r\n def find(self, u):\r\n if self.parent[u] == u:\r\n return u\r\n self.parent[u] = self.find(self.parent[u])\r\n return self.parent[u]\r\n\r\n def unite(self, u, v):\r\n u, v = self.find(u), self.find(v)\r\n if u == v:\r\n return\r\n if self.rank[u] < self.rank[v]:\r\n u, v = v, u\r\n self.parent[v] = u\r\n self.size[u] += self.size[v]\r\n self.rank[u] = max(self.rank[u], self.rank[v] + 1)\r\n\r\n def same(self, u, v):\r\n return self.find(u) == self.find(v)\r\n\r\n def scc(self): # strongly connected components\r\n n = self.__N\r\n visited, q, root, r = [False] * n, [], [None] * n, 0\r\n gg = self.__class__(n)\r\n for u in range(n):\r\n for v in self.edges[u]:\r\n gg.add_edge(v, u)\r\n\r\n def dfs(u):\r\n if visited[u]:\r\n return\r\n visited[u] = True\r\n for v in self.edges[u]:\r\n dfs(v)\r\n q.append(u)\r\n\r\n def rev_dfs(u, r):\r\n if root[u] is not None:\r\n return\r\n root[u] = r\r\n for v in gg.edges[u]:\r\n rev_dfs(v, r)\r\n\r\n for u in range(n):\r\n dfs(u)\r\n for u in q[::-1]:\r\n rev_dfs(u, r)\r\n r += 1\r\n return root\r\n\r\n def kruskal(self): # minimum spanning tree\r\n n = self.__N\r\n uf = self.__class__(n)\r\n uf.init_dsu()\r\n edges = sorted(\r\n [\r\n (u, v, e.weight)\r\n for u in range(n)\r\n for v, e in self.edges[u].items()\r\n ],\r\n key=lambda x: x[2],\r\n )\r\n g = self.__class__(n)\r\n d = 0\r\n for u, v, w in edges:\r\n if uf.same(u, v):\r\n continue\r\n uf.unite(u, v)\r\n g.add_edge(u, v, weight=w)\r\n d += w\r\n return g, d\r\n\r\n def prim(self, src=0, return_parent=False): # minimum spanning tree\r\n n = self.__N\r\n g = self.__class__(n)\r\n parent, visited, dist = [None] * n, [False] * n, 0\r\n q = [(0, (src, src))]\r\n while q:\r\n d, (w, u) = heappop(q)\r\n if visited[u]:\r\n continue\r\n visited[u], parent[u] = True, w\r\n dist += d\r\n g.add_edge(w, u, weight=d)\r\n for v, e in self.edges[u].items():\r\n if not visited[v]:\r\n heappush(q, (e.weight, (u, v)))\r\n if return_parent:\r\n return g, dist, parent\r\n return g, dist\r\n\r\n def boruvka(self): # minimum spanning tree\r\n n = self.__N\r\n uf = self.__class__(n)\r\n uf.init_dsu()\r\n g = self.__class__(n)\r\n d = 0\r\n\r\n def dfs(u):\r\n if visited[u]:\r\n return (inf, (None, None))\r\n visited[u] = True\r\n cand = []\r\n for v, e in self.edges[u].items():\r\n if uf.same(u, v):\r\n cand.append(dfs(v))\r\n continue\r\n cand.append((e.weight, (u, v)))\r\n return sorted(cand)[0]\r\n\r\n while len(set(uf.parent)) != 1:\r\n edges, visited = [], [False] * n\r\n for u in range(n):\r\n if visited[u]:\r\n continue\r\n edges.append(dfs(u))\r\n for w, (u, v) in edges:\r\n if uf.same(u, v):\r\n continue\r\n g.add_edge(u, v, weight=w)\r\n uf.unite(u, v)\r\n d += w\r\n for u in range(n):\r\n uf.find(u)\r\n\r\n return g, d\r\n\r\n def tsp(self): # traveling salesperson problem\r\n pass\r\n\r\n @staticmethod\r\n def triangle_area(p0, p1, p2, signed=False):\r\n x1, y1, x2, y2 = (\r\n p1[0] - p0[0],\r\n p1[1] - p0[1],\r\n p2[0] - p0[0],\r\n p2[1] - p0[1],\r\n )\r\n return (\r\n (x1 * y2 - x2 * y1) / 2 if signed else abs(x1 * y2 - x2 * y1) / 2\r\n )\r\n\r\n @classmethod\r\n def intersect(cls, seg1, seg2):\r\n (p1, p2), (p3, p4) = seg1, seg2\r\n t1 = cls.triangle_area(p1, p2, p3, signed=True)\r\n t2 = cls.triangle_area(p1, p2, p4, signed=True)\r\n t3 = cls.triangle_area(p3, p4, p1, signed=True)\r\n t4 = cls.triangle_area(p3, p4, p2, signed=True)\r\n return (t1 * t2 < 0) & (t3 * t4 < 0)\r\n\r\n\r\ndef cumxor(a):\r\n return reduce(xor, a, 0)\r\n\r\n\r\ndef cumor(a):\r\n return reduce(or_, a, 0)\r\n\r\n\r\ndef bit_count(n):\r\n cnt = 0\r\n while n:\r\n cnt += n & 1\r\n n >>= 1\r\n return cnt\r\n\r\n\r\nclass AtCoder:\r\n class ABC001:\r\n @staticmethod\r\n def a():\r\n h1, h2 = map(int, sys.stdin.read().split())\r\n print(h1 - h2)\r\n\r\n @staticmethod\r\n def d():\r\n def to_minuites(x):\r\n q, r = divmod(x, 100)\r\n return 60 * q + r\r\n\r\n def to_hmform(x):\r\n q, r = divmod(x, 60)\r\n return 100 * q + r\r\n\r\n n = int(sys.stdin.readline().rstrip())\r\n term = [0] * 2001\r\n for _ in range(n):\r\n s, e = map(\r\n to_minuites,\r\n map(int, sys.stdin.readline().rstrip().split(\"-\")),\r\n )\r\n s = s // 5 * 5\r\n e = (e + 4) // 5 * 5\r\n term[s] += 1\r\n term[e + 1] -= 1\r\n for i in range(2000):\r\n term[i + 1] += term[i]\r\n\r\n res = []\r\n raining = False\r\n for i in range(2001):\r\n if term[i]:\r\n if not raining:\r\n s = i\r\n raining = True\r\n elif raining:\r\n res.append((s, i - 1))\r\n raining = False\r\n for s, e in res:\r\n print(f\"{to_hmform(s):04}-{to_hmform(e):04}\")\r\n\r\n class ABC002:\r\n @staticmethod\r\n def a():\r\n print(max(map(int, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n vowels = set(\"aeiou\")\r\n print(\r\n \"\".join(\r\n [\r\n c\r\n for c in sys.stdin.readline().rstrip()\r\n if c not in vowels\r\n ]\r\n )\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n print(\r\n GeometryTopology.triangle_area(\r\n *map(int, sys.stdin.readline().split())\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n edges = set(\r\n (x - 1, y - 1)\r\n for x, y in zip(*[map(int, sys.stdin.read().split())] * 2)\r\n )\r\n print(\r\n max(\r\n len(s)\r\n for i in range(1, 1 << n)\r\n for s in [[j for j in range(n) if i >> j & 1]]\r\n if all(\r\n (x, y) in edges\r\n for x, y in itertools.combinations(s, 2)\r\n )\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m = map(int, sys.stdin.readline().split())\r\n relations = [1 << i for i in range(n)]\r\n for x, y in zip(*[map(int, sys.stdin.read().split())] * 2):\r\n relations[x] |= 1 << (y - 1)\r\n relations[y] |= 1 << (x - 1)\r\n res = 0\r\n for i in range(1 << n):\r\n s, cnt = (1 << n) - 1, 0\r\n for j in range(n):\r\n if i >> j & 1:\r\n t &= relations[j] | 1 << j\r\n cnt += 1\r\n if s & i == i:\r\n res = max(res, cnt)\r\n print(res)\r\n\r\n class ABC003:\r\n @staticmethod\r\n def a():\r\n print((int(sys.stdin.readline().rstrip()) + 1) * 5000)\r\n\r\n @staticmethod\r\n def b():\r\n atcoder = set(\"atcoder\")\r\n s, t = sys.stdin.read().split()\r\n print(\r\n all(\r\n s[i] == t[i]\r\n or s[i] == \"@\"\r\n and t[i] in atcoder\r\n or t[i] == \"@\"\r\n and s[i] in atcoder\r\n for i in range(len(s))\r\n )\r\n and \"You can win\"\r\n or \"You will lose\"\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *r = map(int, sys.stdin.read().split())\r\n print(reduce(lambda x, y: (x + y) / 2, sorted(r)[-k:], 0))\r\n\r\n class ABC004:\r\n @staticmethod\r\n def a():\r\n print(int(sys.stdin.readline().rstrip()) * 2)\r\n\r\n @staticmethod\r\n def b():\r\n for l in [sys.stdin.readline().rstrip() for _ in range(4)][::-1]:\r\n print(l[::-1])\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip()) % 30\r\n res = list(range(1, 7))\r\n for i in range(n):\r\n i %= 5\r\n res[i], res[i + 1] = res[i + 1], res[i]\r\n print(*res, sep=\"\")\r\n\r\n class ABC005:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(y // x)\r\n\r\n @staticmethod\r\n def b():\r\n n, *t = map(int, sys.stdin.read().split())\r\n print(min(t))\r\n\r\n @staticmethod\r\n def c():\r\n t = int(sys.stdin.readline().rstrip())\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n m = int(sys.stdin.readline().rstrip())\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n i = 0\r\n for p in b:\r\n if i == n:\r\n print(\"no\")\r\n return\r\n while p - a[i] > t:\r\n i += 1\r\n if i == n:\r\n print(\"no\")\r\n return\r\n if a[i] > p:\r\n print(\"no\")\r\n return\r\n i += 1\r\n print(\"yes\")\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n d = np.array(\r\n [sys.stdin.readline().split() for _ in range(n)], np.int64\r\n )\r\n s = d.cumsum(axis=0).cumsum(axis=1)\r\n s = np.pad(s, 1)\r\n max_del = np.zeros((n + 1, n + 1), dtype=np.int64)\r\n for y in range(1, n + 1):\r\n for x in range(1, n + 1):\r\n max_del[y, x] = np.amax(\r\n s[y : n + 1, x : n + 1]\r\n - s[0 : n - y + 1, x : n + 1]\r\n - s[y : n + 1, 0 : n - x + 1]\r\n + s[0 : n - y + 1, 0 : n - x + 1]\r\n )\r\n res = np.arange(n**2 + 1)[:, None]\r\n i = np.arange(1, n + 1)\r\n res = max_del[i, np.minimum(res // i, n)].max(axis=1)\r\n q = int(sys.stdin.readline().rstrip())\r\n p = np.array(sys.stdin.read().split(), dtype=np.int64)\r\n print(*res[p], sep=\"\\n\")\r\n\r\n class ABC006:\r\n @staticmethod\r\n def a():\r\n n = sys.stdin.readline().rstrip()\r\n if \"3\" in n:\r\n print(\"YES\")\r\n elif int(n) % 3 == 0:\r\n print(\"YES\")\r\n else:\r\n print(\"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n mod = 10007\r\n a = np.eye(N=3, k=-1, dtype=np.int64)\r\n a[0] = 1\r\n n = int(sys.stdin.readline().rstrip())\r\n a = Algebra.matrix_pow(a, n - 1, mod)\r\n print(a[2][0])\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n cnt = [0, 0, 0]\r\n if m == 1:\r\n cnt = [-1, -1, -1]\r\n else:\r\n if m & 1:\r\n m -= 3\r\n cnt[1] += 1\r\n n -= 1\r\n cnt[2] = m // 2 - n\r\n cnt[0] = n - cnt[2]\r\n if cnt[0] < 0 or cnt[1] < 0 or cnt[2] < 0:\r\n print(-1, -1, -1)\r\n else:\r\n print(*cnt, sep=\" \")\r\n\r\n @staticmethod\r\n def d():\r\n n, *c = map(int, sys.stdin.read().split())\r\n lis = [inf] * n\r\n for x in c:\r\n lis[bi_l(lis, x)] = x\r\n print(n - bi_l(lis, inf))\r\n\r\n class ABC007:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n - 1)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n if s == \"a\":\r\n print(-1)\r\n else:\r\n print(\"a\")\r\n\r\n @staticmethod\r\n def c():\r\n r, c = map(int, sys.stdin.readline().split())\r\n sy, sx = map(int, sys.stdin.readline().split())\r\n gy, gx = map(int, sys.stdin.readline().split())\r\n sy -= 1\r\n sx -= 1\r\n gy -= 1\r\n gx -= 1\r\n maze = [sys.stdin.readline().rstrip() for _ in range(r)]\r\n queue = deque([(sy, sx)])\r\n dist = np.full((r, c), np.inf)\r\n dist[sy, sx] = 0\r\n while queue:\r\n y, x = queue.popleft()\r\n for i, j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\r\n i += y\r\n j += x\r\n if maze[i][j] == \"#\" or dist[i, j] != np.inf:\r\n continue\r\n dist[i, j] = dist[y, x] + 1\r\n queue.append((i, j))\r\n print(int(dist[gy, gx]))\r\n\r\n @staticmethod\r\n def d():\r\n ng = set([4, 9])\r\n\r\n def count(d):\r\n return d if d <= 4 else d - 1\r\n\r\n def f(n):\r\n x = [int(d) for d in str(n)]\r\n flg = True\r\n dp = 0\r\n for d in x:\r\n dp = dp * 8 + flg * count(d)\r\n if d in ng:\r\n flg = False\r\n return n - (dp + flg)\r\n\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(f(b) - f(a - 1))\r\n\r\n class ABC008:\r\n @staticmethod\r\n def a():\r\n s, t = map(int, sys.stdin.readline().split())\r\n print(t - s + 1)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n res = defaultdict(int)\r\n for name in s:\r\n res[name] += 1\r\n print(sorted(res.items(), key=lambda x: x[1])[-1][0])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n c = n - np.count_nonzero(a[:, None] % a, axis=1)\r\n print(np.sum((c + 1) // 2 / c))\r\n\r\n @staticmethod\r\n def d():\r\n w, h, n, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*([iter(xy)] * 2))\r\n\r\n @lru_cache(maxsize=None)\r\n def count(x1, y1, x2, y2):\r\n res = 0\r\n for x, y in xy:\r\n if not (x1 <= x <= x2 and y1 <= y <= y2):\r\n continue\r\n cnt = (x2 - x1) + (y2 - y1) + 1\r\n cnt += count(x1, y1, x - 1, y - 1)\r\n cnt += count(x1, y + 1, x - 1, y2)\r\n cnt += count(x + 1, y1, x2, y - 1)\r\n cnt += count(x + 1, y + 1, x2, y2)\r\n res = max(res, cnt)\r\n return res\r\n\r\n print(count(1, 1, w, h))\r\n\r\n class ABC009:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((n + 1) // 2)\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n print(sorted(set(a))[-2])\r\n\r\n @staticmethod\r\n def c():\r\n n, k = map(int, sys.stdin.readline().split())\r\n s = list(sys.stdin.readline().rstrip())\r\n cost = [1] * n\r\n r = k\r\n for i in range(n - 1):\r\n q = []\r\n for j in range(i + 1, n):\r\n if s[j] < s[i] and cost[i] + cost[j] <= r:\r\n heappush(q, (s[j], cost[i] + cost[j], -j))\r\n if not q:\r\n continue\r\n _, c, j = heappop(q)\r\n j = -j\r\n s[i], s[j] = s[j], s[i]\r\n r -= c\r\n cost[i] = cost[j] = 0\r\n print(\"\".join(s))\r\n\r\n @staticmethod\r\n def d():\r\n k, m = map(int, sys.stdin.readline().split())\r\n a = np.array([int(x) for x in sys.stdin.readline().split()])\r\n c = np.array([int(x) for x in sys.stdin.readline().split()])\r\n mask = (1 << 32) - 1\r\n d = np.eye(k, k, -1, dtype=np.uint32) * mask\r\n d[0] = c\r\n if m <= k:\r\n print(a[m - 1])\r\n return\r\n # print(Algebra.bitwise_mat_pow(d, m-k))\r\n # print(Algebra.bitwise_dot(Algebra.bitwise_mat_pow(d, m-k), a[::-1].reshape(-1, 1))[0].item())\r\n print(\r\n Algebra.bitwise_dot(\r\n Algebra.bitwise_mat_pow(d, m - k), a[::-1].reshape(-1, 1)\r\n )[0][0]\r\n )\r\n\r\n class ABC010:\r\n @staticmethod\r\n def a():\r\n print(sys.stdin.readline().rstrip() + \"pp\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n tot = 0\r\n for x in a:\r\n c = 0\r\n while x % 2 == 0 or x % 3 == 2:\r\n x -= 1\r\n c += 1\r\n tot += c\r\n print(tot)\r\n\r\n @staticmethod\r\n def c():\r\n sx, sy, gx, gy, t, v, n, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(-1, 2).T\r\n\r\n def dist(x1, y1, x2, y2):\r\n return np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\r\n\r\n ans = (\r\n \"YES\"\r\n if (dist(sx, sy, x, y) + dist(x, y, gx, gy) <= v * t).any()\r\n else \"NO\"\r\n )\r\n print(ans)\r\n\r\n @staticmethod\r\n def d():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n p = [int(x) for x in sys.stdin.readline().split()]\r\n x, y = [], []\r\n for _ in range(e):\r\n a, b = map(int, sys.stdin.readline().split())\r\n x.append(a)\r\n y.append(b)\r\n x.append(b)\r\n y.append(a)\r\n for a in p:\r\n x.append(a)\r\n y.append(n)\r\n if not x:\r\n print(0)\r\n return\r\n c = [1] * len(x)\r\n min_cut = maximum_flow(\r\n csr_matrix((c, (x, y)), (n + 1, n + 1)), source=0, sink=n\r\n ).flow_value\r\n print(min_cut)\r\n\r\n @staticmethod\r\n def d_2():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n graph = nx.DiGraph()\r\n graph.add_nodes_from(range(n + 1))\r\n for p in [int(x) for x in sys.stdin.readline().split()]:\r\n graph.add_edge(p, n, capacity=1)\r\n for _ in range(e):\r\n a, b = map(int, sys.stdin.readline().split())\r\n graph.add_edge(a, b, capacity=1)\r\n graph.add_edge(b, a, capacity=1)\r\n print(nx.minimum_cut_value(graph, 0, n))\r\n\r\n @staticmethod\r\n def d_3():\r\n n, q, m = map(int, sys.stdin.readline().split())\r\n g = GeometryTopology.Graph(n + 1)\r\n # for i in range(n+1): g.add_node(i)\r\n for p in [int(x) for x in sys.stdin.readline().split()]:\r\n g.add_edge(p, n, capacity=1)\r\n for a, b in zip(*[map(int, sys.stdin.read().split())] * 2):\r\n g.add_edge(a, b, capacity=1)\r\n g.add_edge(b, a, capacity=1)\r\n print(g.dinic(0, n))\r\n\r\n class ABC011:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n % 12 + 1)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(s[0].upper() + s[1:].lower())\r\n\r\n @staticmethod\r\n def c():\r\n n, *ng = map(int, sys.stdin.read().split())\r\n ng = set(ng)\r\n if n in ng:\r\n print(\"NO\")\r\n else:\r\n r = 100\r\n while n > 0:\r\n if r == 0:\r\n print(\"NO\")\r\n return\r\n for i in range(3, 0, -1):\r\n if (n - i) in ng:\r\n continue\r\n n -= i\r\n r -= 1\r\n break\r\n else:\r\n print(\"NO\")\r\n return\r\n print(\"YES\")\r\n\r\n @staticmethod\r\n def d():\r\n n, d, x, y = map(int, sys.stdin.read().split())\r\n x, y = abs(x), abs(y)\r\n if x % d or y % d:\r\n print(0)\r\n return\r\n x, y = x // d, y // d\r\n r = n - (x + y)\r\n if r < 0 or r & 1:\r\n print(0)\r\n return\r\n\r\n res = 0\r\n half_p = pow(1 / 2, n)\r\n for d in range(r // 2 + 1): # 0 <= d <= r//2, south\r\n south, north = d, y + d\r\n west = (r - 2 * d) // 2\r\n res += (\r\n half_p\r\n * comb(n, south, exact=True)\r\n * comb(n - south, north, exact=True)\r\n * comb(n - south - north, west, exact=True)\r\n * half_p\r\n )\r\n print(res)\r\n\r\n class ABC012:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(b, a)\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n h, n = divmod(n, 3600)\r\n m, s = divmod(n, 60)\r\n print(f\"{h:02}:{m:02}:{s:02}\")\r\n\r\n @staticmethod\r\n def c():\r\n n = 2025 - int(sys.stdin.readline().rstrip())\r\n res = []\r\n for i in range(1, 10):\r\n if n % i != 0 or n // i > 9:\r\n continue\r\n res.append(f\"{i} x {n//i}\")\r\n print(*sorted(res), sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abt = map(int, sys.stdin.read().split())\r\n a, b, t = np.array(abt).reshape(m, 3).T\r\n res = shortest_path(\r\n csr_matrix((t, (a - 1, b - 1)), (n, n)),\r\n method=\"FW\",\r\n directed=False,\r\n )\r\n print(res.max(axis=-1).min().astype(np.int64))\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m, *abt = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b, t in zip(*[iter(abt)] * 3):\r\n a -= 1\r\n b -= 1\r\n g.add_edge(a, b, weight=t)\r\n g.add_edge(b, a, weight=t)\r\n\r\n print(min(max(d) for d in g.floyd_warshall()))\r\n\r\n class ABC013:\r\n @staticmethod\r\n def a():\r\n print(ord(sys.stdin.readline().rstrip()) - ord(\"A\") + 1)\r\n\r\n @staticmethod\r\n def b():\r\n a, b = map(int, sys.stdin.read().split())\r\n d = abs(a - b)\r\n print(min(d, 10 - d))\r\n\r\n @staticmethod\r\n def c():\r\n n, h, a, b, c, d, e = map(int, sys.stdin.read().split())\r\n y = np.arange(n + 1)\r\n x = (n * e - h - (d + e) * y) // (b + e) + 1\r\n np.maximum(x, 0, out=x)\r\n np.minimum(x, n - y, out=x)\r\n print(np.amin(a * x + c * y))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, d, *a = map(int, sys.stdin.read().split())\r\n res = list(range(n))\r\n\r\n def swap(i, j):\r\n res[i], res[j] = res[j], res[i]\r\n\r\n for i in a[::-1]:\r\n swap(i - 1, i)\r\n res = np.array(res)\r\n\r\n def binary_method(a, p):\r\n b = np.arange(n)\r\n while p:\r\n if p & 1:\r\n b = a[b]\r\n p >>= 1\r\n a = a[a]\r\n return b\r\n\r\n print(*(binary_method(res, d) + 1), sep=\"\\n\")\r\n\r\n class ABC014:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.read().split())\r\n print((a + b - 1) // b * b - a)\r\n\r\n @staticmethod\r\n def b():\r\n n, x, *a = map(int, sys.stdin.read().split())\r\n print(sum(a[i] for i in range(n) if x >> i & 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n a, b = np.array(ab).reshape(n, 2).T\r\n res = np.zeros(10**6 + 2, dtype=np.int64)\r\n np.add.at(res, a, 1)\r\n np.subtract.at(res, b + 1, 1)\r\n np.cumsum(res, out=res)\r\n print(res.max())\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n g = GeometryTopology.Graph(n)\r\n for _ in range(n - 1):\r\n x, y = map(int, sys.stdin.readline().split())\r\n x -= 1\r\n y -= 1\r\n g.add_edge(x, y, weight=1)\r\n g.add_edge(y, x, weight=1)\r\n\r\n g.bfs(0)\r\n g.find_ancestors()\r\n\r\n q, *ab = map(int, sys.stdin.read().split())\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n print(g.find_dist(a, b) + 1)\r\n\r\n class ABC015:\r\n @staticmethod\r\n def a():\r\n a, b = sys.stdin.read().split()\r\n print(a if len(a) > len(b) else b)\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n print(\r\n np.ceil(\r\n a[np.nonzero(a)[0]].sum() / np.count_nonzero(a)\r\n ).astype(np.int8)\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *t = map(int, sys.stdin.read().split())\r\n t = np.array(t).reshape(n, k)\r\n x = np.zeros((1, 1), dtype=np.int8)\r\n for i in range(n):\r\n x = x.reshape(-1, 1) ^ t[i]\r\n print(\"Found\" if np.count_nonzero(x == 0) > 0 else \"Nothing\")\r\n\r\n @staticmethod\r\n def d():\r\n w, n, k, *ab = map(int, sys.stdin.read().split())\r\n dp = np.zeros((k + 1, w + 1), dtype=np.int32)\r\n for a, b in zip(*[iter(ab)] * 2):\r\n np.maximum(dp[1:, a:], dp[:-1, :-a] + b, out=dp[1:, a:])\r\n print(dp[k][w])\r\n\r\n class ABC016:\r\n @staticmethod\r\n def a():\r\n m, d = map(int, sys.stdin.readline().split())\r\n print(\"YES\" if m % d == 0 else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n f1, f2 = a + b == c, a - b == c\r\n if f1 & f2:\r\n print(\"?\")\r\n elif f1 & (~f2):\r\n print(\"+\")\r\n elif (~f1) & f2:\r\n print(\"-\")\r\n else:\r\n print(\"!\")\r\n\r\n @staticmethod\r\n def c():\r\n n, _, *ab = map(int, sys.stdin.read().split())\r\n f = [0] * n\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n f[a] |= 1 << b\r\n f[b] |= 1 << a\r\n res = [\r\n bit_count(\r\n cumor(f[j] for j in range(n) if f[i] >> j & 1)\r\n & ~(f[i] | 1 << i)\r\n )\r\n for i in range(n)\r\n ]\r\n print(*res, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n sx, sy, gx, gy = map(int, sys.stdin.readline().split())\r\n seg1 = ((sx, sy), (gx, gy))\r\n n = int(sys.stdin.readline().rstrip())\r\n p1 = (\r\n np.array(sys.stdin.read().split(), dtype=np.int64)\r\n .reshape(n, 2)\r\n .T\r\n )\r\n p2 = np.hstack((p1[:, 1:], p1[:, :1]))\r\n seg2 = (p1, p2)\r\n print(\r\n np.count_nonzero(GeometryTopology.intersect(seg1, seg2)) // 2\r\n + 1\r\n )\r\n\r\n class ABC017:\r\n @staticmethod\r\n def a():\r\n s, e = (\r\n np.array(sys.stdin.read().split(), dtype=np.int16)\r\n .reshape(3, 2)\r\n .T\r\n )\r\n print((s // 10 * e).sum())\r\n\r\n @staticmethod\r\n def b():\r\n choku_tail = set(\"ch, o, k, u\".split(\", \"))\r\n\r\n def is_choku(s):\r\n if s == \"\":\r\n return True\r\n if len(s) >= 1 and (s[-1] in choku_tail) and is_choku(s[:-1]):\r\n return True\r\n if len(s) >= 2 and (s[-2:] in choku_tail) and is_choku(s[:-2]):\r\n return True\r\n return False\r\n\r\n print(\"YES\" if is_choku(sys.stdin.readline().rstrip()) else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *lrs = map(int, sys.stdin.read().split())\r\n l, r, s = np.array(lrs).reshape(n, 3).T\r\n score = np.zeros((m + 1,), dtype=np.int32)\r\n np.add.at(score, l - 1, s)\r\n np.subtract.at(score, r, s)\r\n np.cumsum(score, out=score)\r\n print(s.sum() - score[:m].min())\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *f = map(int, sys.stdin.read().split())\r\n prev = [0] * (n + 1)\r\n tmp = defaultdict(int)\r\n for i in range(n):\r\n prev[i + 1] = tmp[f[i]]\r\n tmp[f[i]] = i + 1\r\n\r\n dp = [0] * (n + 1)\r\n dp[0] = 1\r\n l, s = 0, dp[0]\r\n for i in range(1, n + 1):\r\n while l < prev[i]:\r\n s = (s - dp[l]) % MOD\r\n l += 1\r\n dp[i] = s\r\n s = (s + dp[i]) % MOD\r\n print(dp[n])\r\n\r\n class ABC018:\r\n @staticmethod\r\n def a():\r\n (*a,) = map(int, sys.stdin.read().split())\r\n a = sorted(enumerate(a), key=lambda x: -x[1])\r\n res = [None] * 3\r\n for i in range(3):\r\n res[a[i][0]] = i + 1\r\n print(*res, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n n, *lr = map(int, sys.stdin.read().split())\r\n for l, r in zip(*[iter(lr)] * 2):\r\n l -= 1\r\n r -= 1\r\n s = s[:l] + s[l : r + 1][::-1] + s[r + 1 :]\r\n print(s)\r\n\r\n @staticmethod\r\n def c():\r\n r, c, k = map(int, sys.stdin.readline().split())\r\n s = np.array([list(s) for s in sys.stdin.read().split()])\r\n s = np.pad(s, 1, constant_values=\"x\")\r\n\r\n a = np.zeros_like(s, dtype=np.float64)\r\n a[s == \"o\"] = np.inf\r\n for i in range(1, r + 1):\r\n np.minimum(a[i - 1, :] + 1, a[i, :], out=a[i, :])\r\n for i in range(r, 0, -1):\r\n np.minimum(a[i + 1, :] + 1, a[i, :], out=a[i, :])\r\n for j in range(1, c + 1):\r\n np.minimum(a[:, j - 1] + 1, a[:, j], out=a[:, j])\r\n for j in range(c, 0, -1):\r\n np.minimum(a[:, j + 1] + 1, a[:, j], out=a[:, j])\r\n print(np.count_nonzero(a >= k))\r\n\r\n @staticmethod\r\n def c_2():\r\n r, c, k = map(int, sys.stdin.readline().split())\r\n s = np.array([list(s) for s in sys.stdin.read().split()])\r\n s = np.pad(s, 1, constant_values=\"x\")\r\n a = (s == \"o\").astype(np.int16)\r\n a = distance_transform_cdt(a, metric=\"taxicab\")\r\n print(np.count_nonzero(a >= k))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, p, q, r, *xyz = map(int, sys.stdin.read().split())\r\n x, y, z = np.array(xyz).reshape(r, 3).T\r\n h = np.zeros((n, m), dtype=np.int32)\r\n h[x - 1, y - 1] = z\r\n g = np.array([*itertools.combinations(range(n), p)])\r\n print(np.sort(h[g].sum(axis=1), axis=1)[:, -q:].sum(axis=1).max())\r\n\r\n class ABC019:\r\n @staticmethod\r\n def a():\r\n (*a,) = map(int, sys.stdin.readline().split())\r\n print(sorted(a)[1])\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip() + \"$\"\r\n cnt = 0\r\n prev = \"$\"\r\n t = \"\"\r\n for c in s:\r\n if c == prev:\r\n cnt += 1\r\n continue\r\n t += prev + str(cnt)\r\n prev = c\r\n cnt = 1\r\n print(t[2:])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n res = set()\r\n for x in a:\r\n while not x & 1:\r\n x >>= 1\r\n res.add(x)\r\n print(len(res))\r\n\r\n @staticmethod\r\n def d():\r\n def inquire(u, v):\r\n print(f\"? {u} {v}\".format(u, v), flush=True)\r\n return int(sys.stdin.readline().rstrip())\r\n\r\n n = int(sys.stdin.readline().rstrip())\r\n u = sorted([(inquire(1, v), v) for v in range(2, n + 1)])[-1][1]\r\n d = max((inquire(u, v)) for v in range(1, n + 1) if u != v)\r\n print(f\"! {d}\")\r\n\r\n class ABC020:\r\n @staticmethod\r\n def a():\r\n print(\r\n \"ABC\"\r\n if int(sys.stdin.readline().rstrip()) == 1\r\n else \"chokudai\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n a, b = sys.stdin.readline().split()\r\n print(int(a + b) * 2)\r\n\r\n @staticmethod\r\n def c():\r\n h, w, t = map(int, sys.stdin.readline().split())\r\n s = [list(s) for s in sys.stdin.read().split()]\r\n for i in range(h):\r\n for j in range(w):\r\n if s[i][j] == \"S\":\r\n sy, sx = i, j\r\n if s[i][j] == \"G\":\r\n gy, gx = i, j\r\n s[sy][sx] = s[gy][gx] = \".\"\r\n source, target = sy * w + sx, gy * w + gx\r\n\r\n def heuristic_function(u, v=target):\r\n uy, ux = divmod(u, w)\r\n vy, vx = divmod(v, w)\r\n return abs(vy - uy) + abs(ux - vx)\r\n\r\n def min_time(x):\r\n g = GeometryTopology.Graph(h * w)\r\n # g = nx.DiGraph()\r\n\r\n for i in range(h):\r\n for j in range(w):\r\n u = i * w + j\r\n if i > 0:\r\n g.add_edge(\r\n u,\r\n (i - 1) * w + j,\r\n weight=(1 if s[i - 1][j] == \".\" else x),\r\n )\r\n if i < h - 1:\r\n g.add_edge(\r\n u,\r\n (i + 1) * w + j,\r\n weight=(1 if s[i + 1][j] == \".\" else x),\r\n )\r\n if j > 0:\r\n g.add_edge(\r\n u,\r\n i * w + j - 1,\r\n weight=(1 if s[i][j - 1] == \".\" else x),\r\n )\r\n if j < w - 1:\r\n g.add_edge(\r\n u,\r\n i * w + j + 1,\r\n weight=(1 if s[i][j + 1] == \".\" else x),\r\n )\r\n\r\n return g.dijkstra(source)[target]\r\n return g.astar(source, target, heuristic_function)\r\n # return nx.dijkstra_path_length(g, source, target)\r\n # return nx.astar_path_length(g, source, target, heuristic_function)\r\n\r\n def binary_search():\r\n lo, hi = 1, t + 1\r\n while lo + 1 < hi:\r\n x = (lo + hi) // 2\r\n if min_time(x) > t:\r\n hi = x\r\n else:\r\n lo = x\r\n return lo\r\n\r\n print(binary_search())\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.readline().split())\r\n div = sorted(NumberTheory.find_divisors(k))\r\n l = len(div)\r\n s = [0] * l\r\n for i, d in enumerate(div):\r\n s[i] = (1 + n // d) * (n // d) // 2 * d % MOD\r\n for i in range(l - 1, -1, -1):\r\n for j in range(i + 1, l):\r\n if div[j] % div[i]:\r\n continue\r\n s[i] = (s[i] - s[j]) % MOD\r\n\r\n print(\r\n sum(s[i] * k // div[i] % MOD for i in range(l)) % MOD\r\n ) # ans is LCM.\r\n\r\n class ABC021:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n s = [1 << i for i in range(5) if n >> i & 1]\r\n print(len(s), *s, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def b():\r\n n, a, b, k, *p = map(int, sys.stdin.read().split())\r\n print(\"YES\" if len(set(p) | set([a, b])) == k + 2 else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, a, b, m, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(m, 2).T - 1\r\n a -= 1\r\n b -= 1\r\n g = csgraph_to_dense(\r\n csr_matrix((np.ones(m), (x, y)), (n, n), dtype=np.int8)\r\n )\r\n g = np.logical_or(g, g.T)\r\n paths = np.zeros(n, dtype=np.int64).reshape(-1, 1)\r\n paths[a, 0] = 1\r\n while not paths[b, 0]:\r\n paths = np.dot(g, paths) % MOD\r\n print(paths[b, 0])\r\n\r\n @staticmethod\r\n def c_2():\r\n n, a, b, m, *xy = map(int, sys.stdin.read().split())\r\n a -= 1\r\n b -= 1\r\n g = GeometryTopology.Graph()\r\n\r\n for x, y in zip(*[iter(xy)] * 2):\r\n x -= 1\r\n y -= 1\r\n g.add_edge(x, y, weight=1)\r\n g.add_edge(y, x, weight=1)\r\n\r\n dist, paths = g.dijkstra(a, paths_cnt=True, mod=MOD)\r\n print(paths[b])\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.read().split())\r\n cn = Combinatorics.CombinationsMod()\r\n print(cn(n + k - 1, k))\r\n\r\n class ABC022:\r\n @staticmethod\r\n def a():\r\n n, s, t, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n np.cumsum(a, out=a)\r\n print(((s <= a) & (a <= t)).sum())\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n c = Counter(a)\r\n print(sum(c.values()) - len(c))\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *uvl = map(int, sys.stdin.read().split())\r\n u, v, l = np.array(uvl).reshape(m, 3).T\r\n u -= 1\r\n v -= 1\r\n g = csgraph_to_dense(csr_matrix((l, (u, v)), (n, n)))\r\n g += g.T\r\n g[g == 0] = np.inf\r\n dist0 = g[0].copy()\r\n g[0] = 0\r\n g[:, 0] = 0\r\n dist = shortest_path(g, method=\"FW\", directed=False)\r\n u, v = np.array([*itertools.combinations(range(1, n), 2)]).T\r\n res = (dist0[u] + dist[u, v] + dist0[v]).min()\r\n print(-1 if res == np.inf else int(res))\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n c = np.array(ab).reshape(2, n, 2)\r\n g = c.mean(axis=1)\r\n d = np.sqrt(((c - g[:, None, :]) ** 2).sum(axis=-1)).sum(axis=1)\r\n print(d[1] / d[0])\r\n\r\n class ABC023:\r\n @staticmethod\r\n def a():\r\n print(sum(divmod(int(sys.stdin.readline().rstrip()), 10)))\r\n\r\n @staticmethod\r\n def b():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n t = \"b\"\r\n for i in range(n // 2):\r\n if i % 3 == 0:\r\n t = \"a\" + t + \"c\"\r\n elif i % 3 == 1:\r\n t = \"c\" + t + \"a\"\r\n else:\r\n t = \"b\" + t + \"b\"\r\n print(n // 2 if t == s else -1)\r\n\r\n @staticmethod\r\n def b_2():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n if n & 1 ^ 1:\r\n print(-1)\r\n return\r\n a = list(\"abc\")\r\n i = (1 - n // 2) % 3\r\n for c in s:\r\n if c != a[i]:\r\n print(-1)\r\n return\r\n i = (i + 1) % 3\r\n print(n // 2)\r\n\r\n @staticmethod\r\n def c():\r\n h, w, k, n, *rc = map(int, sys.stdin.read().split())\r\n r, c = np.array(rc).reshape(n, 2).T - 1\r\n rb = np.bincount(r, minlength=h)\r\n cb = np.bincount(c, minlength=w)\r\n rbb = np.bincount(rb, minlength=k + 1)\r\n cbb = np.bincount(cb, minlength=k + 1)\r\n tot = (rbb[: k + 1] * cbb[k::-1]).sum()\r\n real = np.bincount(rb[r] + cb[c] - 1, minlength=k + 1)\r\n print(tot - real[k - 1] + real[k])\r\n\r\n @staticmethod\r\n def d():\r\n n, *hs = map(int, sys.stdin.read().split())\r\n h, s = np.array(hs).reshape(n, 2).T\r\n\r\n t = np.arange(n)\r\n\r\n def is_ok(x):\r\n return np.all(np.sort((x - h) // s) >= t)\r\n\r\n def binary_search():\r\n lo, hi = 0, 10**14\r\n while lo + 1 < hi:\r\n x = (lo + hi) // 2\r\n if is_ok(x):\r\n hi = x\r\n else:\r\n lo = x\r\n return hi\r\n\r\n print(binary_search())\r\n\r\n class ABC024:\r\n @staticmethod\r\n def a():\r\n a, b, c, k, s, t = map(int, sys.stdin.read().split())\r\n print(a * s + b * t - c * (s + t) * (s + t >= k))\r\n\r\n @staticmethod\r\n def b():\r\n n, t, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n print(np.minimum(a[1:] - a[:-1], t).sum() + t)\r\n\r\n @staticmethod\r\n def c():\r\n n, d, k, *lrst = map(int, sys.stdin.read().split())\r\n lrst = np.array(lrst)\r\n lr = lrst[: 2 * d].reshape(d, 2)\r\n s, t = lrst[2 * d :].reshape(k, 2).T\r\n day = np.zeros((k,), dtype=np.int32)\r\n for i in range(d):\r\n l, r = lr[i]\r\n move = (l <= s) & (s <= r) & (s != t)\r\n reach = move & (l <= t) & (t <= r)\r\n s[move & (s < t)] = r\r\n s[move & (s > t)] = l\r\n s[reach] = t[reach]\r\n day[reach] = i + 1\r\n print(*day, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n a, b, c = map(int, sys.stdin.read().split())\r\n p = MOD\r\n denom = pow(a * b % p - b * c % p + c * a % p, p - 2, p)\r\n w = (b * c - a * b) % p * denom % p\r\n h = (b * c - a * c) % p * denom % p\r\n print(h, w)\r\n\r\n class ABC025:\r\n @staticmethod\r\n def a():\r\n s, n = sys.stdin.read().split()\r\n n = int(n)\r\n i, j = divmod(n - 1, 5)\r\n print(s[i] + s[j])\r\n\r\n @staticmethod\r\n def b():\r\n n, a, b = map(int, sys.stdin.readline().split())\r\n res = defaultdict(int)\r\n for _ in range(n):\r\n s, d = sys.stdin.readline().split()\r\n d = int(d)\r\n res[s] += min(max(d, a), b)\r\n res = res[\"East\"] - res[\"West\"]\r\n if res == 0:\r\n ans = 0\r\n elif res > 0:\r\n ans = f\"East {res}\"\r\n else:\r\n ans = f\"West {-res}\"\r\n print(ans)\r\n\r\n @staticmethod\r\n def c():\r\n b = [0] * 6\r\n for i in range(2):\r\n (*row,) = map(int, sys.stdin.readline().split())\r\n for j in range(3):\r\n b[i * 3 + j] = row[j]\r\n c = [0] * 8\r\n for i in range(3):\r\n (*row,) = map(int, sys.stdin.readline().split())\r\n for j in range(2):\r\n c[i * 3 + j] = row[j]\r\n tot = sum(b) + sum(c)\r\n\r\n @lru_cache(maxsize=None)\r\n def f(s=tuple(0 for _ in range(9))):\r\n if all(s):\r\n res = 0\r\n for i in range(6):\r\n res += (s[i] == s[i + 3]) * b[i]\r\n for i in range(8):\r\n res += (s[i] == s[i + 1]) * c[i]\r\n return res\r\n cand = [i for i in range(9) if not s[i]]\r\n flg = len(cand) & 1\r\n s = list(s)\r\n res = []\r\n for i in cand:\r\n s[i] = (flg ^ 1) + 1\r\n res.append(f(tuple(s)))\r\n s[i] = 0\r\n return sorted(res, reverse=flg)[0]\r\n\r\n a = f()\r\n b = tot - a\r\n print(a)\r\n print(b)\r\n\r\n class ABC026:\r\n @staticmethod\r\n def a():\r\n a = int(sys.stdin.readline().rstrip())\r\n print(a // 2 * (a - a // 2))\r\n\r\n @staticmethod\r\n def b():\r\n n, *r = map(int, sys.stdin.read().split())\r\n s = np.pi * np.array([0] + r) ** 2\r\n s.sort()\r\n res = s[n::-2].sum() - s[n - 1 :: -2].sum()\r\n print(res)\r\n\r\n @staticmethod\r\n def c():\r\n n, *b = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph()\r\n for i in range(1, n):\r\n g.add_edge(b[i - 1] - 1, i, weight=1)\r\n\r\n def f(u=0):\r\n if not g.edges[u]:\r\n return 1\r\n s = [f(v) for v in g.edges[u]]\r\n return max(s) + min(s) + 1\r\n\r\n print(f())\r\n\r\n @staticmethod\r\n def d():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n\r\n def f(t):\r\n return a * t + b * np.sin(c * t * np.pi) - 100\r\n\r\n print(optimize.brenth(f, 0, 200))\r\n\r\n class ABC027:\r\n @staticmethod\r\n def a():\r\n l = [int(l) for l in sys.stdin.readline().split()]\r\n l.sort()\r\n print(l[2] if l[0] == l[1] else l[0])\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n m, r = divmod(sum(a), n)\r\n if r:\r\n print(-1)\r\n return\r\n population = 0\r\n towns = 0\r\n cnt = 0\r\n for x in a:\r\n population += x\r\n towns += 1\r\n if population / towns != m:\r\n cnt += 1\r\n continue\r\n population, towns = 0, 0\r\n print(cnt)\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n flg = n.bit_length() & 1 ^ 1\r\n t = 0\r\n x = 1\r\n while x <= n:\r\n t += 1\r\n x = 2 * x + 1 if t & 1 ^ flg else 2 * x\r\n print(\"Aoki\" if t & 1 else \"Takahashi\")\r\n\r\n class ABC028:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(\r\n \"Bad\"\r\n if n < 60\r\n else \"Good\"\r\n if n < 90\r\n else \"Great\"\r\n if n < 100\r\n else \"Perfect\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n cnt = Counter(s)\r\n print(*[cnt.get(c, 0) for c in \"ABCDEF\"])\r\n\r\n @staticmethod\r\n def c():\r\n a, b, c, d, e = map(int, sys.stdin.readline().split())\r\n print(max(b + c + e, a + d + e))\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.readline().split())\r\n c = 3 * 2 * (n - k) * (k - 1) + 3 * (n - 1) + 1\r\n print(c / n**3)\r\n\r\n class ABC029:\r\n @staticmethod\r\n def a():\r\n print(sys.stdin.readline().rstrip() + \"s\")\r\n\r\n @staticmethod\r\n def b():\r\n print(sum(\"r\" in s for s in sys.stdin.read().split()))\r\n\r\n @staticmethod\r\n def c():\r\n print(\r\n *[\r\n \"\".join(s)\r\n for s in itertools.product(\r\n \"abc\", repeat=int(sys.stdin.readline().rstrip())\r\n )\r\n ],\r\n sep=\"\\n\",\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(\r\n sum(\r\n n // 10 ** (i + 1) * 10**i\r\n + min(max((n % 10 ** (i + 1) - 10**i + 1), 0), 10**i)\r\n for i in range(9)\r\n )\r\n )\r\n\r\n class ABC030:\r\n @staticmethod\r\n def a():\r\n a, b, c, d = map(int, sys.stdin.readline().split())\r\n e, f = b * c, d * a\r\n print(\"TAKAHASHI\" if e > f else \"AOKI\" if f > e else \"DRAW\")\r\n\r\n @staticmethod\r\n def b():\r\n n, m = map(int, sys.stdin.readline().split())\r\n n = (n % 12 + m / 60) * 30\r\n m *= 6\r\n d = abs(n - m)\r\n print(min(d, 360 - d))\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n x, y = map(int, sys.stdin.readline().split())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n\r\n t = 0\r\n p = 1\r\n cnt = 0\r\n while True:\r\n if p:\r\n i = bi_l(a, t)\r\n if i == n:\r\n break\r\n t = a[i] + x\r\n else:\r\n i = bi_l(b, t)\r\n if i == m:\r\n break\r\n t = b[i] + y\r\n cnt += 1\r\n p ^= 1\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, a = map(int, sys.stdin.readline().split())\r\n a -= 1\r\n k = sys.stdin.readline().rstrip()\r\n b = [int(x) - 1 for x in sys.stdin.readline().split()]\r\n\r\n c = [None] * n\r\n for i in range(n + 1):\r\n if str(i) == k:\r\n print(a + 1)\r\n return\r\n if c[a] is not None:\r\n l, d = i - c[a], c[a]\r\n break\r\n c[a] = i\r\n a = b[a]\r\n\r\n r = [None] * len(k)\r\n r[0] = 1\r\n for i in range(len(k) - 1):\r\n r[i + 1] = r[i] * 10 % l\r\n k = [int(c) for c in k][::-1]\r\n d = (sum(r[i] * k[i] for i in range(len(k))) - d) % l\r\n for _ in range(d):\r\n a = b[a]\r\n print(a + 1)\r\n\r\n @staticmethod\r\n def d_2():\r\n n, a, k, *b = map(int, sys.stdin.read().split())\r\n a -= 1\r\n b = [x - 1 for x in b]\r\n c = [None] * n\r\n for i in range(n + 1):\r\n if i == k:\r\n print(a + 1)\r\n return\r\n if c[a] is not None:\r\n for _ in range((k - c[a]) % (i - c[a])):\r\n a = b[a]\r\n print(a + 1)\r\n return\r\n c[a] = i\r\n a = b[a]\r\n\r\n class ABC031:\r\n @staticmethod\r\n def a():\r\n a, d = map(int, sys.stdin.readline().split())\r\n if a > d:\r\n a, d = d, a\r\n print((a + 1) * d)\r\n\r\n @staticmethod\r\n def b():\r\n l, h, n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n res = np.maximum(l - a, 0)\r\n res[a > h] = -1\r\n print(*res, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n np.cumsum(a[::2], out=a[::2])\r\n np.cumsum(a[1::2], out=a[1::2])\r\n a = list(a) + [0] * 2\r\n\r\n def score(i, j):\r\n if i > j:\r\n i, j = j, i\r\n if (j - i) & 1:\r\n x, y = a[j - 1] - a[i - 2], a[j] - a[i - 1]\r\n else:\r\n x, y = a[j] - a[i - 2], a[j - 1] - a[i - 1]\r\n return x, y\r\n\r\n res = -inf\r\n for i in range(n):\r\n s = -inf\r\n for j in range(n):\r\n if i == j:\r\n continue\r\n x, y = score(i, j)\r\n if y > s:\r\n s, t = y, x\r\n res = max(res, t)\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n k, m = map(int, sys.stdin.readline().split())\r\n (*vw,) = zip(*[iter(sys.stdin.read().split())] * 2)\r\n for l in itertools.product((1, 2, 3), repeat=k):\r\n s = dict()\r\n for v, w in vw:\r\n i = 0\r\n for d in v:\r\n d = int(d) - 1\r\n j = i + l[d]\r\n if j > len(w):\r\n break\r\n t = w[i:j]\r\n if d in s and s[d] != t:\r\n break\r\n s[d] = t\r\n i = j\r\n else:\r\n if i == len(w):\r\n continue\r\n break\r\n else:\r\n for i in range(k):\r\n print(s[i])\r\n return\r\n\r\n class ABC032:\r\n @staticmethod\r\n def a():\r\n a, b, n = map(int, sys.stdin.read().split())\r\n l = NumberTheory.lcm(a, b)\r\n print((n + l - 1) // l * l)\r\n\r\n @staticmethod\r\n def b():\r\n s, k = sys.stdin.read().split()\r\n k = int(k)\r\n res = set()\r\n for i in range(len(s) - k + 1):\r\n res.add(s[i : i + k])\r\n print(len(res))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *s = map(int, sys.stdin.read().split())\r\n if 0 in s:\r\n print(n)\r\n return\r\n if k == 0:\r\n print(0)\r\n return\r\n res, tmp, l = 0, 1, 0\r\n for r in range(n):\r\n tmp *= s[r]\r\n while tmp > k:\r\n tmp //= s[l]\r\n l += 1\r\n res = max(res, r - l + 1)\r\n\r\n print(res)\r\n\r\n class ABC033:\r\n @staticmethod\r\n def a():\r\n print(\r\n \"SAME\"\r\n if len(set(sys.stdin.readline().rstrip())) == 1\r\n else \"DIFFERENT\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = dict()\r\n for _ in range(n):\r\n s, p = sys.stdin.readline().split()\r\n res[s] = int(p)\r\n tot = sum(res.values())\r\n for s, p in res.items():\r\n if p > tot / 2:\r\n print(s)\r\n return\r\n print(\"atcoder\")\r\n\r\n @staticmethod\r\n def c():\r\n s = sys.stdin.readline().rstrip()\r\n print(sum(not \"0\" in f for f in s.split(\"+\")))\r\n\r\n class ABC034:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Better\" if y > x else \"Worse\")\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n + 1 if n & 1 else n - 1)\r\n\r\n @staticmethod\r\n def c():\r\n h, w = map(int, sys.stdin.read().split())\r\n choose = Combinatorics.CombinationsMod()\r\n print(choose(h + w - 2, h - 1))\r\n\r\n @staticmethod\r\n def d():\r\n n, k, *wp = map(int, sys.stdin.read().split())\r\n w, p = np.array(wp).reshape(-1, 2).T\r\n\r\n def f(x):\r\n return np.sort(w * (p - x))[-k:].sum()\r\n\r\n print(optimize.bisect(f, 0, 100))\r\n\r\n class ABC035:\r\n @staticmethod\r\n def a():\r\n w, h = map(int, sys.stdin.readline().split())\r\n print(\"4:3\" if 4 * h == 3 * w else \"16:9\")\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n y = x = z = 0\r\n for c in s:\r\n if c == \"?\":\r\n z += 1\r\n elif c == \"L\":\r\n x -= 1\r\n elif c == \"R\":\r\n x += 1\r\n elif c == \"D\":\r\n y -= 1\r\n elif c == \"U\":\r\n y += 1\r\n d = abs(y) + abs(x)\r\n print(d + z if t == \"1\" else max(d - z, (d - z) & 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, q, *lr = map(int, sys.stdin.read().split())\r\n l, r = np.array(lr).reshape(q, 2).T\r\n res = np.zeros(n + 1, dtype=int)\r\n np.add.at(res, l - 1, 1)\r\n np.subtract.at(res, r, 1)\r\n np.cumsum(res, out=res)\r\n res = res & 1\r\n print(\"\".join(map(str, res[:-1])))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, t = map(int, sys.stdin.readline().split())\r\n point = np.array(sys.stdin.readline().split(), dtype=int)\r\n a, b, c = (\r\n np.array(sys.stdin.read().split(), dtype=np.int64)\r\n .reshape(m, 3)\r\n .T\r\n )\r\n a -= 1\r\n b -= 1\r\n d_1 = shortest_path(\r\n csr_matrix((c, (a, b)), (n, n)),\r\n method=\"D\",\r\n directed=True,\r\n indices=0,\r\n )\r\n d_2 = shortest_path(\r\n csr_matrix((c, (b, a)), (n, n)),\r\n method=\"D\",\r\n directed=True,\r\n indices=0,\r\n )\r\n print(int(np.amax((t - (d_1 + d_2)) * point)))\r\n\r\n class ABC036:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print((b + a - 1) // a)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n n = int(n)\r\n for j in range(n):\r\n row = \"\"\r\n for i in range(n - 1, -1, -1):\r\n row += s[i][j]\r\n print(row)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n b = [None] * n\r\n prev = None\r\n j = -1\r\n for i, x in sorted(enumerate(a), key=lambda x: x[1]):\r\n if x != prev:\r\n j += 1\r\n b[i] = j\r\n prev = x\r\n print(*b, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n edges = [[] for _ in range(n)]\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n edges[a].append(b)\r\n edges[b].append(a)\r\n parent = [None] * n\r\n\r\n def count(u):\r\n black, white = 1, 1\r\n for v in edges[u]:\r\n if v == parent[u]:\r\n continue\r\n parent[v] = u\r\n b, w = count(v)\r\n black *= w\r\n black %= MOD\r\n white *= (b + w) % MOD\r\n white %= MOD\r\n return black, white\r\n\r\n print(sum(count(0)) % MOD)\r\n\r\n class ABC037:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(c // min(a, b))\r\n\r\n @staticmethod\r\n def b():\r\n n, q, *lrt = map(int, sys.stdin.read().split())\r\n a = np.zeros(n, dtype=int)\r\n for l, r, t in zip(*[iter(lrt)] * 3):\r\n a[l - 1 : r] = t\r\n print(*a, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = np.array([0] + a)\r\n np.cumsum(a, out=a)\r\n s = (a[k:] - a[:-k]).sum()\r\n print(s)\r\n\r\n @staticmethod\r\n def d():\r\n h, w, *a = map(int, sys.stdin.read().split())\r\n p = [None] * (h * w)\r\n\r\n def paths(k):\r\n if p[k]:\r\n return p[k]\r\n p[k] = 1\r\n i, j = divmod(k, w)\r\n if j > 0 and a[k] > a[k - 1]:\r\n p[k] += paths(k - 1)\r\n if j < w - 1 and a[k] > a[k + 1]:\r\n p[k] += paths(k + 1)\r\n if i > 0 and a[k] > a[k - w]:\r\n p[k] += paths(k - w)\r\n if i < h - 1 and a[k] > a[k + w]:\r\n p[k] += paths(k + w)\r\n p[k] %= MOD\r\n return p[k]\r\n\r\n print(sum(paths(i) for i in range(h * w)) % MOD)\r\n\r\n class ABC038:\r\n @staticmethod\r\n def a():\r\n s = sys.stdin.readline().rstrip()\r\n print(\"YES\" if s[-1] == \"T\" else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c, d = map(int, sys.stdin.read().split())\r\n print(\"YES\" if a == c or b == c or a == d or b == d else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n cnt = n\r\n tmp = 1\r\n for i in range(n):\r\n if a[i + 1] > a[i]:\r\n tmp += 1\r\n else:\r\n cnt += tmp * (tmp - 1) // 2\r\n tmp = 1\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, *wh = map(int, sys.stdin.read().split())\r\n a = [\r\n x[1]\r\n for x in sorted(\r\n zip(*[iter(wh)] * 2), key=lambda x: (x[0], -x[1])\r\n )\r\n ]\r\n print(bi_l(DP.LIS(a), inf))\r\n\r\n class ABC039:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print((a * b + b * c + c * a) * 2)\r\n\r\n @staticmethod\r\n def b():\r\n x = int(sys.stdin.readline().rstrip())\r\n for n in range(1, int(x**0.5) + 1):\r\n if pow(n, 4) == x:\r\n print(n)\r\n return\r\n\r\n @staticmethod\r\n def c():\r\n board = \"WBWBWWBWBWBW\" * 3\r\n convert = \"Do, *, Re, *, Mi, Fa, *, So, *, La, *, Si\".split(\", \")\r\n s = sys.stdin.readline().rstrip()\r\n print(convert[board.index(s)])\r\n\r\n @staticmethod\r\n def d():\r\n h, w = map(int, sys.stdin.readline().split())\r\n s = \"\".join(sys.stdin.read().split())\r\n white = set()\r\n for i in range(h * w):\r\n if s[i] == \"#\":\r\n continue\r\n l = 0 if i % w == 0 else -1\r\n r = 0 if (i + 1) % w == 0 else 1\r\n white |= {\r\n i + dy + dx\r\n for dy in range(-w, w + 1, w)\r\n for dx in range(l, r + 1)\r\n }\r\n black_before = set(range(h * w)) - white\r\n black_after = set()\r\n for i in black_before:\r\n l = 0 if i % w == 0 else -1\r\n r = 0 if (i + 1) % w == 0 else 1\r\n black_after |= {\r\n i + dy + dx\r\n for dy in range(-w, w + 1, w)\r\n for dx in range(l, r + 1)\r\n }\r\n black_after &= set(range(h * w))\r\n for i in range(h * w):\r\n if s[i] == \"#\" and not i in black_after:\r\n print(\"impossible\")\r\n return\r\n print(\"possible\")\r\n for i in range(h):\r\n print(\r\n \"\".join(\r\n [\r\n \"#\" if i * w + j in black_before else \".\"\r\n for j in range(w)\r\n ]\r\n )\r\n )\r\n\r\n class ABC040:\r\n @staticmethod\r\n def a():\r\n n, x = map(int, sys.stdin.readline().split())\r\n print(min(x - 1, n - x))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = inf\r\n for i in range(1, int(n**0.5) + 1):\r\n res = min(res, n // i - i + n % i)\r\n print(res)\r\n\r\n @staticmethod\r\n def c():\r\n n, *h = map(int, sys.stdin.read().split())\r\n h = [h[0]] + h\r\n cost = [None] * (n + 1)\r\n cost[0] = cost[1] = 0\r\n for i in range(2, n + 1):\r\n cost[i] = min(\r\n cost[i - 2] + abs(h[i] - h[i - 2]),\r\n cost[i - 1] + abs(h[i] - h[i - 1]),\r\n )\r\n print(cost[n])\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n uf = GeometryTopology.Graph(n)\r\n uf.init_dsu()\r\n queue = []\r\n for _ in range(m):\r\n a, b, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2 * y), a - 1, b - 1))\r\n q = int(sys.stdin.readline().rstrip())\r\n for i in range(q):\r\n v, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2 * y + 1), v - 1, i))\r\n res = [None] * q\r\n while queue:\r\n y, i, j = heappop(queue)\r\n if y & 1:\r\n res[j] = uf.size[uf.find(i)]\r\n else:\r\n uf.unite(i, j)\r\n print(*res, sep=\"\\n\")\r\n\r\n class ABC041:\r\n @staticmethod\r\n def a():\r\n s, i = sys.stdin.read().split()\r\n i = int(i)\r\n print(s[i - 1])\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n ans = a * b % MOD * c % MOD\r\n print(ans)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n for i, h in sorted(enumerate(a), key=lambda x: -x[1]):\r\n print(i + 1)\r\n\r\n @staticmethod\r\n def d():\r\n n, _, *xy = map(int, sys.stdin.read().split())\r\n g = [0] * n\r\n for x, y in zip(*[iter(xy)] * 2):\r\n g[x - 1] |= 1 << (y - 1)\r\n res = [0] * (1 << n)\r\n res[0] = 1\r\n for i in range(1 << n):\r\n for j in range(n):\r\n if i >> j & 1 ^ 1:\r\n continue\r\n if not (g[j] & i):\r\n res[i] += res[i & ~(1 << j)]\r\n print(res[-1])\r\n\r\n class ABC042:\r\n @staticmethod\r\n def a():\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n c = Counter(a)\r\n print(\"YES\" if c[5] == 2 and c[7] == 1 else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n n, l, *s = sys.stdin.read().split()\r\n print(\"\".join(sorted(s)))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *d = sys.stdin.read().split()\r\n l = len(n)\r\n ok = sorted(set(string.digits) - set(d))\r\n cand = [\r\n int(\"\".join(p)) for p in itertools.product(ok, repeat=l)\r\n ] + [int(min(x for x in ok if x > \"0\") + min(ok) * l)]\r\n print(cand[bi_l(cand, int(n))])\r\n\r\n @staticmethod\r\n def d():\r\n h, w, a, b = map(int, sys.stdin.read().split())\r\n combinations = Combinatorics.CombinationsMod(\r\n n=2 * 10**5, mod=MOD\r\n )\r\n i = np.arange(h - a, h)\r\n ng = np.sum(\r\n combinations(i + b - 1, i)\r\n * combinations(h - i + w - b - 2, h - 1 - i)\r\n % MOD\r\n )\r\n print((combinations(h + w - 2, h - 1) - ng) % MOD)\r\n\r\n class ABC043:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((1 + n) * n // 2)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n t = \"\"\r\n for c in s:\r\n if c == \"B\":\r\n t = t[:-1]\r\n else:\r\n t += c\r\n print(t)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n x = np.around(a.sum() / n).astype(int)\r\n print(np.sum((a - x) ** 2))\r\n\r\n @staticmethod\r\n def d():\r\n s = sys.stdin.readline().rstrip()\r\n n = len(s)\r\n for i in range(n - 1):\r\n if s[i] == s[i + 1]:\r\n print(i + 1, i + 2)\r\n return\r\n for i in range(n - 2):\r\n if s[i] == s[i + 2]:\r\n print(i + 1, i + 3)\r\n return\r\n print(-1, -1)\r\n\r\n class ABC044:\r\n @staticmethod\r\n def a():\r\n n, k, x, y = map(int, sys.stdin.read().split())\r\n print(min(n, k) * x + max(0, n - k) * y)\r\n\r\n @staticmethod\r\n def b():\r\n res = set(\r\n c & 1 for c in Counter(sys.stdin.readline().rstrip()).values()\r\n )\r\n print(\"Yes\" if len(res) == 1 and res.pop() == 0 else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n, a, *x = map(int, sys.stdin.read().split())\r\n dp = np.zeros((n + 1, 2501), dtype=np.int64)\r\n dp[0, 0] = 1\r\n for v in x:\r\n dp[1:, v:] += dp[:-1, :-v]\r\n i = np.arange(1, n + 1)\r\n print(dp[i, i * a].sum())\r\n\r\n @staticmethod\r\n def c_2():\r\n n, a, *x = map(int, sys.stdin.read().split())\r\n for i in range(n):\r\n x[i] -= a\r\n\r\n s = defaultdict(int)\r\n s[0] = 1\r\n for i in range(n):\r\n ns = s.copy()\r\n for k, v in s.items():\r\n ns[k + x[i]] += v\r\n s = ns\r\n print(s[0] - 1)\r\n\r\n @staticmethod\r\n def d():\r\n pass\r\n\r\n class ABC045:\r\n @staticmethod\r\n def a():\r\n a, b, h = map(int, sys.stdin.read().split())\r\n print((a + b) * h // 2)\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = sys.stdin.read().split()\r\n d = {\"a\": a[::-1], \"b\": b[::-1], \"c\": c[::-1]}\r\n nx = \"a\"\r\n while 1:\r\n if not d[nx]:\r\n print(nx.upper())\r\n return\r\n d[nx], nx = d[nx][:-1], d[nx][-1]\r\n\r\n @staticmethod\r\n def c():\r\n def c(l):\r\n return pow(2, max(0, l - 1))\r\n\r\n s = sys.stdin.readline().rstrip()\r\n n = len(s)\r\n print(\r\n sum(\r\n int(s[i : j + 1]) * c(i) * c(n - 1 - j)\r\n for i in range(n)\r\n for j in range(i, n)\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n h, w, n, *ab = map(int, sys.stdin.read().split())\r\n c = defaultdict(int)\r\n for y, x in zip(*[iter(ab)] * 2):\r\n y -= 1\r\n x -= 1\r\n for dy, dx in itertools.product(range(-1, 2), repeat=2):\r\n i, j = y + dy, x + dx\r\n if not (0 < i < h - 1 and 0 < j < w - 1):\r\n continue\r\n c[(i, j)] += 1\r\n c = Counter(c.values())\r\n c[0] = (h - 2) * (w - 2) - sum(c.values())\r\n for i in range(10):\r\n print(c[i])\r\n\r\n class ABC046:\r\n @staticmethod\r\n def a():\r\n print(len(set(sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n n, k = map(int, sys.stdin.readline().split())\r\n print(k * pow(k - 1, n - 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n a, b = 1, 1\r\n for x, y in zip(*[iter(xy)] * 2):\r\n n = max((a + x - 1) // x, (b + y - 1) // y)\r\n a, b = n * x, n * y\r\n print(a + b)\r\n\r\n @staticmethod\r\n def d():\r\n c = Counter(sys.stdin.readline().rstrip())\r\n print((c[\"g\"] - c[\"p\"]) // 2)\r\n\r\n class ABC047:\r\n @staticmethod\r\n def a():\r\n c = sorted(map(int, sys.stdin.readline().split()))\r\n print(\"Yes\" if c[0] + c[1] == c[2] else \"No\")\r\n\r\n @staticmethod\r\n def b():\r\n w, h, n, *xyf = map(int, sys.stdin.read().split())\r\n l, r, d, u = 0, w, 0, h\r\n for x, y, f in zip(*[iter(xyf)] * 3):\r\n if f == 1:\r\n l = max(l, x)\r\n if f == 2:\r\n r = min(r, x)\r\n if f == 3:\r\n d = max(d, y)\r\n if f == 4:\r\n u = min(u, y)\r\n print(max(0, r - l) * max(0, u - d))\r\n\r\n @staticmethod\r\n def c():\r\n s = sys.stdin.readline().rstrip()\r\n print(sum(s[i] != s[i + 1] for i in range(len(s) - 1)))\r\n\r\n @staticmethod\r\n def d():\r\n mn, mx, c = inf, -1, 0\r\n n, t, *a = map(int, sys.stdin.read().split())\r\n for p in a:\r\n if p - mn == mx:\r\n c += 1\r\n elif p - mn > mx:\r\n mx, c = p - mn, 1\r\n mn = min(mn, p)\r\n print(c)\r\n\r\n class ABC048:\r\n @staticmethod\r\n def a():\r\n def initial(s):\r\n return s[0].upper()\r\n\r\n print(\"\".join(map(initial, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n a, b, x = map(int, sys.stdin.readline().split())\r\n print(\r\n b // x - (a - 1) // x\r\n ) # if a=0, (a-1)/x is rounded down to -1.\r\n\r\n @staticmethod\r\n def c():\r\n n, x, *a = map(int, sys.stdin.read().split())\r\n cnt = prev = 0\r\n for i in range(n):\r\n d = prev + a[i] - x\r\n prev = a[i]\r\n if d <= 0:\r\n continue\r\n cnt += d\r\n prev -= d\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n s = sys.stdin.readline().rstrip()\r\n print(\"First\" if len(s) & 1 ^ (s[0] == s[-1]) else \"Second\")\r\n\r\n class ABC049:\r\n @staticmethod\r\n def a():\r\n vowels = set(\"aeiou\")\r\n print(\r\n \"vowel\"\r\n if sys.stdin.readline().rstrip() in vowels\r\n else \"consonant\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n h, w, *s = sys.stdin.read().split()\r\n for l in s:\r\n for _ in range(2):\r\n print(l)\r\n\r\n @staticmethod\r\n def c():\r\n t = set(\"dream, dreamer, erase, eraser\".split(\", \"))\r\n\r\n def obtainable(s):\r\n while True:\r\n for i in range(5, 8):\r\n if s[-i:] in t:\r\n s = s[:-i]\r\n if not s:\r\n return True\r\n break\r\n else:\r\n return False\r\n\r\n s = sys.stdin.readline().rstrip()\r\n print(\"YES\" if obtainable(s) else \"NO\")\r\n\r\n @staticmethod\r\n def d():\r\n n, k, l = map(int, sys.stdin.readline().split())\r\n uf1 = GeometryTopology.Graph(n)\r\n uf1.init_dsu()\r\n uf2 = GeometryTopology.Graph(n)\r\n uf2.init_dsu()\r\n\r\n def add_edges(uf, m):\r\n for _ in range(m):\r\n x, y = map(int, sys.stdin.readline().split())\r\n x -= 1\r\n y -= 1\r\n uf.unite(x, y)\r\n\r\n add_edges(uf1, k)\r\n add_edges(uf2, l)\r\n\r\n g = defaultdict(list)\r\n for i in range(n):\r\n g[(uf1.find(i), uf2.find(i))].append(i)\r\n\r\n res = [None] * n\r\n for a in g:\r\n for i in g[a]:\r\n res[i] = len(g[a])\r\n\r\n print(*res, sep=\" \")\r\n\r\n class ABC050:\r\n @staticmethod\r\n def a():\r\n print(eval(sys.stdin.readline().rstrip()))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n t = np.array(sys.stdin.readline().split(), dtype=np.int64)\r\n m, *px = map(int, sys.stdin.read().split())\r\n p, x = np.array(px).reshape(m, 2).T\r\n p -= 1\r\n print(*(t.sum() + x - t[p]), sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = Counter(a)\r\n if n & 1 and not (\r\n a[0] == 1 and all(a[i] == 2 for i in range(2, n, 2))\r\n ):\r\n print(0)\r\n return\r\n if ~n & 1 and any(a[i] != 2 for i in range(1, n, 2)):\r\n print(0)\r\n return\r\n print(pow(2, n // 2, MOD))\r\n\r\n @staticmethod\r\n def d():\r\n pass\r\n\r\n class ABC051:\r\n @staticmethod\r\n def a():\r\n print(\" \".join(sys.stdin.readline().rstrip().split(\",\")))\r\n\r\n @staticmethod\r\n def b():\r\n k, s = map(int, sys.stdin.readline().split())\r\n tot = 0\r\n for x in range(k + 1):\r\n if s - x < 0:\r\n break\r\n if s - x > 2 * k:\r\n continue\r\n tot += s - x + 1 if s - x <= k else 2 * k - (s - x) + 1\r\n print(tot)\r\n\r\n @staticmethod\r\n def c():\r\n x1, y1, x2, y2 = map(int, sys.stdin.readline().split())\r\n dx, dy = x2 - x1, y2 - y1\r\n print(\r\n \"U\" * dy\r\n + \"R\" * (dx + 1)\r\n + \"D\" * (dy + 1)\r\n + \"L\" * (dx + 1)\r\n + \"U\"\r\n + \"L\"\r\n + \"U\" * (dy + 1)\r\n + \"R\" * (dx + 1)\r\n + \"D\" * (dy + 1)\r\n + \"L\" * dx\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abc = map(int, sys.stdin.read().split())\r\n x = np.arange(n)\r\n a, b, c = np.array(abc).reshape(m, 3).T\r\n a -= 1\r\n b -= 1\r\n d = shortest_path(\r\n csr_matrix((c, (a, b)), shape=(n, n)),\r\n method=\"FW\",\r\n directed=False,\r\n ).astype(np.int64)\r\n print(\r\n m\r\n - np.any(\r\n d[x, a[:, None]] + c[:, None] == d[x, b[:, None]], axis=1\r\n ).sum()\r\n )\r\n\r\n class ABC052:\r\n @staticmethod\r\n def a():\r\n a, b, c, d = map(int, sys.stdin.readline().split())\r\n print(max(a * b, c * d))\r\n\r\n @staticmethod\r\n def b():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n a = [0] * (n + 1)\r\n for i in range(n):\r\n a[i + 1] = a[i] + (1 if s[i] == \"I\" else -1)\r\n print(max(a))\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n pn = NumberTheory.PrimeNumbers(n)\r\n s = 1\r\n for c in pn.factorize_factorial(n).values():\r\n s = s * (c + 1) % MOD\r\n print(s)\r\n\r\n @staticmethod\r\n def d():\r\n n, a, b, *x = map(int, sys.stdin.read().split())\r\n x = np.array(x)\r\n print(np.minimum((x[1:] - x[:-1]) * a, b).sum())\r\n\r\n class ABC053:\r\n @staticmethod\r\n def a():\r\n print(\r\n \"ABC\" if int(sys.stdin.readline().rstrip()) < 1200 else \"ARC\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(len(s) - s.find(\"A\") - s[::-1].find(\"Z\"))\r\n\r\n @staticmethod\r\n def c():\r\n x = int(sys.stdin.readline().rstrip())\r\n q, r = divmod(x, 11)\r\n print(2 * q + (r + 5) // 6)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n print(n - ((n - len(set(a)) + 1) // 2 * 2))\r\n\r\n class ABC054:\r\n @staticmethod\r\n def a():\r\n def f(x):\r\n return (x + 11) % 13\r\n\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(\"Alice\" if f(a) > f(b) else \"Bob\" if f(a) < f(b) else \"Draw\")\r\n\r\n @staticmethod\r\n def b():\r\n n, m = map(int, sys.stdin.readline().split())\r\n a = [sys.stdin.readline().rstrip() for _ in range(n)]\r\n b = [sys.stdin.readline().rstrip() for _ in range(m)]\r\n\r\n for i in range(n - m + 1):\r\n for j in range(n - m + 1):\r\n for y in range(m):\r\n for x in range(m):\r\n if a[i + y][j + x] == b[y][x]:\r\n continue\r\n break\r\n else:\r\n continue\r\n break\r\n else:\r\n print(\"Yes\")\r\n return\r\n print(\"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *ab = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n g.add_edge(a, b)\r\n g.add_edge(b, a)\r\n\r\n cnt = 0\r\n stack = [(0, 1)]\r\n while stack:\r\n u, s = stack.pop()\r\n if s == (1 << n) - 1:\r\n cnt += 1\r\n continue\r\n for v in g.edges[u]:\r\n if s >> v & 1:\r\n continue\r\n stack.append((v, s | 1 << v))\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, ma, mb, *abc = map(int, sys.stdin.read().split())\r\n dp = np.full((401, 401), np.inf)\r\n dp[0, 0] = 0\r\n for a, b, c in zip(*[iter(abc)] * 3):\r\n np.minimum(dp[a:, b:], dp[:-a, :-b] + c, out=dp[a:, b:])\r\n i = np.arange(1, 400 // max(ma, mb) + 1)\r\n res = dp[i * ma, i * mb].min()\r\n print(int(res) if res != np.inf else -1)\r\n\r\n class ABC055:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(800 * n - 200 * (n // 15))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n fac, _ = Algebra.generate_fac_ifac(n, MOD)\r\n print(fac[-1])\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n print(m // 2 if m <= 2 * n else n + (m - 2 * n) // 4)\r\n\r\n @staticmethod\r\n def d():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n s = [1 if c == \"o\" else 0 for c in s]\r\n\r\n def possible(t):\r\n for i in range(1, n - 1):\r\n t[i + 1] = t[i - 1] ^ t[i] ^ s[i]\r\n return (\r\n (t[0] ^ s[0] ^ t[1] ^ t[-1])\r\n | (t[-1] ^ s[-1] ^ t[-2] ^ t[0])\r\n ) ^ 1\r\n\r\n for fst in [(1, 0), (0, 1), (1, 1), (0, 0)]:\r\n t = [None] * n\r\n t[0], t[1] = fst[0], fst[1]\r\n if possible(t):\r\n print(\"\".join(\"S\" if x == 1 else \"W\" for x in t))\r\n return\r\n print(-1)\r\n\r\n class ABC056:\r\n @staticmethod\r\n def a():\r\n def to_i(c):\r\n return 1 if c == \"H\" else 0\r\n\r\n a, b = map(to_i, sys.stdin.readline().split())\r\n print(\"D\" if a ^ b else \"H\")\r\n\r\n @staticmethod\r\n def b():\r\n w, a, b = map(int, sys.stdin.readline().split())\r\n if a > b:\r\n a, b = b, a\r\n print(max(b - (a + w), 0))\r\n\r\n @staticmethod\r\n def c():\r\n x = int(sys.stdin.readline().rstrip())\r\n print(int(math.ceil(math.sqrt(2 * x + 1 / 4) - 0.5)))\r\n\r\n @staticmethod\r\n def d():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = sorted(min(x, k) for x in a)\r\n\r\n def necessary(i):\r\n dp = np.zeros(k, dtype=np.bool)\r\n dp[0] = True\r\n for j in range(n):\r\n if j == i:\r\n continue\r\n dp[a[j] :] += dp[: -a[j]]\r\n return np.any(dp[k - a[i] :])\r\n\r\n def binary_search():\r\n lo, hi = -1, n\r\n while hi - lo > 1:\r\n i = (lo + hi) // 2\r\n if necessary(i):\r\n hi = i\r\n else:\r\n lo = i\r\n return hi\r\n\r\n print(binary_search())\r\n\r\n class ABC057:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print((a + b) % 24)\r\n\r\n @staticmethod\r\n def b():\r\n n, m, *I = map(int, sys.stdin.read().split())\r\n I = np.array(I).reshape(-1, 2)\r\n ab, cd = I[:n], I[n:]\r\n print(\r\n *(\r\n np.argmin(\r\n np.absolute(ab[:, None] - cd).sum(axis=-1), axis=-1\r\n )\r\n + 1\r\n ),\r\n sep=\"\\n\",\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n divs = NumberTheory.find_divisors(n)\r\n print(len(str(divs[bi_l(divs, math.sqrt(n))])))\r\n\r\n @staticmethod\r\n def d():\r\n c = Combinatorics.choose\r\n n, a, b, *v = map(int, sys.stdin.read().split())\r\n v.sort()\r\n print(sum(v[-a:]) / a)\r\n l, r = bi_l(v, v[-a]), bi_r(v, v[-a])\r\n print(\r\n sum(\r\n c(r - l, i)\r\n for i in range(r - n + a, r - max(l, n - b) + 1)\r\n )\r\n if r == n\r\n else c(r - l, r - n + a)\r\n )\r\n\r\n class ABC058:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(\"YES\" if c - b == b - a else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n a = \"\"\r\n for i in range(len(t)):\r\n a += s[i] + t[i]\r\n if len(s) > len(t):\r\n a += s[-1]\r\n print(a)\r\n\r\n @staticmethod\r\n def c():\r\n n, *s = sys.stdin.read().split()\r\n res = {c: 100 for c in string.ascii_lowercase}\r\n for counter in map(Counter, s):\r\n for (\r\n c,\r\n x,\r\n ) in res.items():\r\n res[c] = min(x, counter[c])\r\n t = \"\"\r\n for c, x in sorted(res.items()):\r\n t += c * x\r\n print(t)\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy[:n]), np.array(xy[n:])\r\n print(\r\n (x * (np.arange(n) + 1) - np.cumsum(x)).sum()\r\n % MOD\r\n * ((y * (np.arange(m) + 1) - np.cumsum(y)).sum() % MOD)\r\n % MOD\r\n )\r\n\r\n class ABC059:\r\n @staticmethod\r\n def a():\r\n def initial(s):\r\n return s[0].upper()\r\n\r\n print(\"\".join(map(initial, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n a, b = sys.stdin.read().split()\r\n la, lb = len(a), len(b)\r\n print(\r\n \"GREATER\"\r\n if la > lb\r\n else \"LESS\"\r\n if la < lb\r\n else \"GREATER\"\r\n if a > b\r\n else \"LESS\"\r\n if a < b\r\n else \"EQUAL\"\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n c = s = 0\r\n for i in range(n):\r\n s += a[i]\r\n if i & 1 and s >= 0:\r\n c += s + 1\r\n s = -1\r\n elif i & 1 ^ 1 and s <= 0:\r\n c += 1 - s\r\n s = 1\r\n c1 = c\r\n c = s = 0\r\n for i in range(n):\r\n s += a[i]\r\n if i & 1 and s <= 0:\r\n c += 1 - s\r\n s = 1\r\n elif i & 1 ^ 1 and s >= 0:\r\n c += s + 1\r\n s = -1\r\n c2 = c\r\n print(min(c1, c2))\r\n\r\n @staticmethod\r\n def d():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Brown\" if abs(x - y) <= 1 else \"Alice\")\r\n\r\n class ABC060:\r\n @staticmethod\r\n def a():\r\n a, b, c = sys.stdin.readline().split()\r\n print(\"YES\" if a[-1] == b[0] and b[-1] == c[0] else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(\"NO\" if c % NumberTheory.gcd(a, b) else \"YES\")\r\n\r\n @staticmethod\r\n def c():\r\n n, t, *a = map(int, sys.stdin.read().split())\r\n print(sum(min(a[i + 1] - a[i], t) for i in range(n - 1)) + t)\r\n\r\n @staticmethod\r\n def d():\r\n pass\r\n\r\n class ABC061:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(\"Yes\" if a <= c <= b else \"No\")\r\n\r\n @staticmethod\r\n def b():\r\n n, m, *ab = map(int, sys.stdin.read().split())\r\n ab = np.array(ab) - 1\r\n g = np.zeros(n, dtype=np.int32)\r\n np.add.at(g, ab, 1)\r\n print(*g, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *ab = map(int, sys.stdin.read().split())\r\n ab = np.transpose(np.array(ab).reshape(n, 2))\r\n a, b = ab[:, np.argsort(ab[0])]\r\n print(a[np.cumsum(b) >= k][0])\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abc = map(int, sys.stdin.read().split())\r\n a, b, c = np.array(abc).reshape(m, 3).T\r\n a -= 1\r\n b -= 1\r\n c *= -1\r\n g = csr_matrix(\r\n ([1] * (m + 1), (np.append(a, n - 1), np.append(b, 0))), (n, n)\r\n )\r\n _, labels = connected_components(g, connection=\"strong\")\r\n bl = (labels[a] == labels[0]) & (labels[b] == labels[0])\r\n g = csr_matrix((c[bl], (a[bl], b[bl])), (n, n))\r\n try:\r\n print(\r\n -shortest_path(g, method=\"BF\", directed=True, indices=0)[\r\n -1\r\n ].astype(int)\r\n )\r\n except:\r\n print(\"inf\")\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m, *abc = map(int, sys.stdin.read().split())\r\n a, b, c = np.array(abc).reshape(m, 3).T\r\n a -= 1\r\n b -= 1\r\n c *= -1\r\n d = np.full(n, np.inf)\r\n d[0] = 0\r\n for _ in range(n - 1):\r\n np.minimum.at(d, b, d[a] + c)\r\n neg_cycle = np.zeros(n, dtype=np.bool)\r\n for _ in range(n):\r\n np.logical_or.at(neg_cycle, b, d[a] + c < d[b])\r\n np.minimum.at(d, b, d[a] + c)\r\n print(inf if neg_cycle[-1] else -d[-1].astype(int))\r\n\r\n class ABC062:\r\n @staticmethod\r\n def a():\r\n g = [0, 2, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0]\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Yes\" if g[x - 1] == g[y - 1] else \"No\")\r\n\r\n @staticmethod\r\n def b():\r\n h, w = map(int, sys.stdin.readline().split())\r\n a = np.array(\r\n [list(s) for s in sys.stdin.read().split()], dtype=\"U1\"\r\n )\r\n a = np.pad(a, pad_width=1, constant_values=\"#\")\r\n for s in a:\r\n print(\"\".join(s))\r\n\r\n @staticmethod\r\n def c():\r\n h, w = map(int, sys.stdin.readline().split())\r\n if h * w % 3 == 0:\r\n print(0)\r\n return\r\n\r\n def minimize(h, w):\r\n return min(\r\n h,\r\n *(\r\n s[-1] - s[0]\r\n for x in range(w // 3, w // 3 + 2)\r\n for s in (\r\n sorted(\r\n [\r\n h * x,\r\n h // 2 * (w - x),\r\n (h + 1) // 2 * (w - x),\r\n ]\r\n ),\r\n )\r\n ),\r\n )\r\n\r\n print(min(minimize(h, w), minimize(w, h)))\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n\r\n def optimize(a):\r\n a = list(a)\r\n l, r = a[:n], a[n:]\r\n heapify(l)\r\n s = [None] * (n + 1)\r\n s[0] = sum(l)\r\n for i in range(n):\r\n x = heappop(l)\r\n heappush(l, max(x, r[i]))\r\n s[i + 1] = s[i] + max(0, r[i] - x)\r\n return np.array(s)\r\n\r\n print(\r\n (\r\n optimize(a[: 2 * n]) + optimize(-a[-1 : n - 1 : -1])[::-1]\r\n ).max()\r\n )\r\n\r\n class ABC063:\r\n @staticmethod\r\n def a():\r\n a = sum(map(int, sys.stdin.readline().split()))\r\n print(\"error\" if a >= 10 else a)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(\"yes\" if len(set(s)) == len(s) else \"no\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n s = a.sum()\r\n if s % 10:\r\n print(s)\r\n elif not np.count_nonzero(a % 10):\r\n print(0)\r\n else:\r\n print(s - a[a % 10 != 0].min())\r\n\r\n @staticmethod\r\n def d():\r\n n, a, b, *h = map(int, sys.stdin.read().split())\r\n h = np.array(h)\r\n d = a - b\r\n\r\n def possible(c):\r\n hh = h.copy()\r\n np.maximum(hh - b * c, 0, out=hh)\r\n return ((hh + d - 1) // d).sum() <= c\r\n\r\n def binary_search():\r\n lo, hi = 0, 10**9\r\n while hi - lo > 1:\r\n c = (lo + hi) // 2\r\n if possible(c):\r\n hi = c\r\n else:\r\n lo = c\r\n return hi\r\n\r\n print(binary_search())\r\n\r\n class ABC064:\r\n @staticmethod\r\n def a():\r\n r, g, b = map(int, sys.stdin.readline().split())\r\n print(\"NO\" if (10 * g + b) % 4 else \"YES\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a.sort()\r\n print(a[-1] - a[0])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.bincount(np.minimum(np.array(a) // 400, 8), minlength=9)\r\n mx = np.count_nonzero(a[:-1]) + a[-1]\r\n mn = max(mx - a[-1], 1)\r\n print(mn, mx)\r\n\r\n @staticmethod\r\n def d():\r\n n, s = sys.stdin.read().split()\r\n l = r = 0\r\n for c in s:\r\n if c == \"(\":\r\n r += 1\r\n else:\r\n if r == 0:\r\n l += 1\r\n else:\r\n r -= 1\r\n print(\"(\" * l + s + \")\" * r)\r\n\r\n class ABC065:\r\n @staticmethod\r\n def a():\r\n x, a, b = map(int, sys.stdin.readline().split())\r\n y = -a + b\r\n print(\"delicious\" if y <= 0 else \"safe\" if y <= x else \"dangerous\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = [int(x) - 1 for x in sys.stdin.read().split()]\r\n i = 0\r\n for c in range(n):\r\n i = a[i]\r\n if i == 1:\r\n print(c + 1)\r\n return\r\n print(-1)\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n d = abs(n - m)\r\n if d >= 2:\r\n print(0)\r\n return\r\n fac, _ = Algebra.generate_fac_ifac(10**5)\r\n print(fac[n] * fac[m] * (1 if d else 2) % MOD)\r\n\r\n @staticmethod\r\n def d():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(n, 2).T\r\n i = np.argsort(x)\r\n ax, bx, cx = (\r\n i[:-1],\r\n i[1:],\r\n x[\r\n i[1:],\r\n ]\r\n - x[i[:-1]],\r\n )\r\n i = np.argsort(y)\r\n ay, by, cy = (\r\n i[:-1],\r\n i[1:],\r\n y[\r\n i[1:],\r\n ]\r\n - y[i[:-1]],\r\n )\r\n e = np.vstack(\r\n [np.hstack([ax, ay]), np.hstack([bx, by]), np.hstack([cx, cy])]\r\n )\r\n e = e[:, np.argsort(e[-1])]\r\n _, i = np.unique(e[:-1], return_index=True, axis=1)\r\n a, b, c = e[:, i]\r\n print(\r\n minimum_spanning_tree(csr_matrix((c, (a, b)), (n, n)))\r\n .astype(np.int64)\r\n .sum()\r\n )\r\n\r\n @staticmethod\r\n def d_2():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n x, y = xy[::2], xy[1::2]\r\n g = GeometryTopology.Graph(n)\r\n\r\n def make(a):\r\n b = sorted(enumerate(a), key=lambda x: x[1])\r\n for i in range(n - 1):\r\n u, v, w = b[i][0], b[i + 1][0], b[i + 1][1] - b[i][1]\r\n for u, v in [(v, u), (u, v)]:\r\n if not v in g.edges[u]:\r\n g.add_edge(u, v, weight=w)\r\n else:\r\n g.edges[u][v].weight = min(g.edges[u][v].weight, w)\r\n\r\n make(x)\r\n make(y)\r\n _, d = g.kruskal()\r\n # _, d = g.prim()\r\n # _, d = g.boruvka()\r\n print(d)\r\n\r\n class ABC066:\r\n @staticmethod\r\n def a():\r\n print(sum(sorted(map(int, sys.stdin.readline().split()))[:-1]))\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n\r\n def f(s):\r\n n = len(s) // 2\r\n return s[:n] == s[n:]\r\n\r\n for i in range(len(s) - 2, 0, -2):\r\n if f(s[:i]):\r\n print(i)\r\n return\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n b = deque()\r\n for i in range(n):\r\n if i & 1:\r\n b.appendleft(a[i])\r\n else:\r\n b.append(a[i])\r\n if n & 1:\r\n b.reverse()\r\n print(*b)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n tmp = [None] * (n + 1)\r\n for i in range(n + 1):\r\n if tmp[a[i]] is not None:\r\n d = tmp[a[i]] + n - i\r\n break\r\n tmp[a[i]] = i\r\n k = np.arange(1, n + 2)\r\n c = Combinatorics.CombinationsMod(n + 1, MOD)\r\n print(*((c(n + 1, k) - c(d, k - 1)) % MOD), sep=\"\\n\")\r\n\r\n class ABC067:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n g.add_edge(a, b)\r\n g.add_edge(b, a)\r\n d1, d2 = g.bfs(0), g.bfs(n - 1)\r\n print(\r\n \"Fennec\"\r\n if sum(d1[i] <= d2[i] for i in range(n)) > n // 2\r\n else \"Snuke\"\r\n )\r\n\r\n class ABC068:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n k = int(sys.stdin.readline().rstrip())\r\n n = 50\r\n print(n)\r\n q, r = divmod(k, n)\r\n a = np.arange(n - 1, -1, -1) + q\r\n a[:r] += 1\r\n print(*a)\r\n\r\n class ABC069:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n h, w, n, *a = map(int, sys.stdin.read().split())\r\n c = [i + 1 for i in range(n) for j in range(a[i])]\r\n for i in range(h):\r\n row = c[i * w : (i + 1) * w]\r\n if i & 1:\r\n row = row[::-1]\r\n print(*row)\r\n\r\n class ABC070:\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n g = GeometryTopology.Graph(n)\r\n for _ in range(n - 1):\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n a -= 1\r\n b -= 1\r\n g.add_edge(a, b, weight=c)\r\n g.add_edge(b, a, weight=c)\r\n q, k = map(int, sys.stdin.readline().split())\r\n d = g.bfs(k - 1)\r\n for _ in range(q):\r\n x, y = map(int, sys.stdin.readline().split())\r\n x -= 1\r\n y -= 1\r\n print(d[x] + d[y])\r\n\r\n class ABC071:\r\n @staticmethod\r\n def d():\r\n n, *s = sys.stdin.read().split()\r\n n = int(n)\r\n s = list(zip(*s))\r\n dp = [0] * n\r\n dp[0] = 3 if s[0][0] == s[0][1] else 6\r\n for i in range(1, n):\r\n dp[i] = dp[i - 1]\r\n if s[i][0] == s[i - 1][0]:\r\n continue\r\n dp[i] *= (\r\n 2\r\n if s[i - 1][0] == s[i - 1][1]\r\n else 3\r\n if s[i][0] != s[i][1]\r\n else 1\r\n )\r\n dp[i] %= MOD\r\n print(dp[-1])\r\n\r\n class ABC072:\r\n @staticmethod\r\n def d():\r\n n, *p = map(int, sys.stdin.read().split())\r\n p += [-1]\r\n cnt, i = 0, 0\r\n while i < n:\r\n if p[i] == i + 1:\r\n cnt += p[i] == i + 1\r\n if p[i + 1] == i + 2:\r\n i += 1\r\n i += 1\r\n print(cnt)\r\n\r\n class ABC073:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n n, m, r, *I = map(int, sys.stdin.read().split())\r\n I = np.array(I)\r\n a, b, c = I[r:].reshape(m, 3).T\r\n d = shortest_path(\r\n csr_matrix((c, (a - 1, b - 1)), (n, n)),\r\n method=\"FW\",\r\n directed=False,\r\n ).astype(np.int32)\r\n r = np.array([*itertools.permutations(I[:r] - 1)])\r\n print((d[r[:, :-1], r[:, 1:]].sum(axis=1)).min())\r\n\r\n class ABC074:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a, dtype=np.int32).reshape(n, n)\r\n b = shortest_path(a, method=\"FW\").astype(np.int32)\r\n if (b < a).any():\r\n print(-1)\r\n return\r\n np.fill_diagonal(b, 10**9)\r\n a[np.any(b[:, None] + b <= a[:, :, None], axis=2)] = 0\r\n print(a.sum() // 2)\r\n\r\n class ABC075:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n n, k, *xy = map(int, sys.stdin.read().split())\r\n xy = np.array(xy).reshape(n, 2)\r\n x_y = xy.copy()[np.argsort(xy[:, 0])]\r\n y_x = xy.copy()[np.argsort(xy[:, 1])]\r\n comb = np.array([*itertools.combinations(range(n), 2)])\r\n i1, i2 = comb.T\r\n j1, j2 = comb[None, :].T\r\n s = (y_x[:, 1][i2] - y_x[:, 1][i1]) * (\r\n x_y[:, 0][j2] - x_y[:, 0][j1]\r\n )\r\n c = np.zeros((n + 1, n + 1), dtype=np.int64)\r\n for i in range(n):\r\n c[i + 1, 1:] += c[i, 1:] + (y_x[i, 0] <= x_y[:, 0])\r\n a = c[i2 + 1, j2 + 1] - c[i2 + 1, j1] - c[i1, j2 + 1] + c[i1, j1]\r\n print(s[a >= k].min())\r\n\r\n class ABC076:\r\n @staticmethod\r\n def d():\r\n n, *tv = map(int, sys.stdin.read().split())\r\n t, v = np.array(tv).reshape(2, n)\r\n t = np.pad(t, pad_width=[2, 1], constant_values=0)\r\n np.cumsum(t, out=t)\r\n l, r = t[:-1], t[1:]\r\n v = np.pad(v, pad_width=[1, 1], constant_values=0)\r\n x = np.arange(0, r[-1] + 0.1, 0.5, dtype=np.float32)[:, None]\r\n # y = np.stack([v-(x-l), np.zeros(r[-1]*2+1, dtype=np.float32)[:,None]+v, v+(x-r)]).max(axis=0).min(axis=1)\r\n mx = v - (x - l)\r\n np.maximum(mx, v, out=mx)\r\n np.maximum(mx, v + (x - r), out=mx)\r\n y = mx.min(axis=1)\r\n print(((y[:-1] + y[1:]) / 4).sum())\r\n\r\n class ABC077:\r\n @staticmethod\r\n def d():\r\n k = int(sys.stdin.readline().rstrip())\r\n g = GeometryTopology.Graph(k)\r\n for i in range(k):\r\n g.add_edge(i, i * 10 % k, weight=0)\r\n g.add_edge(i, (i + 1) % k, update=False, weight=1)\r\n print(1 + g.bfs01(1)[0])\r\n\r\n class ABC078:\r\n @staticmethod\r\n def d():\r\n n, z, w, *a = map(int, sys.stdin.read().split())\r\n print(\r\n abs(a[0] - w)\r\n if n == 1\r\n else max(abs(a[-1] - w), abs(a[-1] - a[-2]))\r\n )\r\n\r\n class ABC079:\r\n @staticmethod\r\n def d():\r\n h, w, *I = map(int, sys.stdin.read().split())\r\n I = np.array(I)\r\n c = I[:100].reshape(10, 10)\r\n a = I[100:].reshape(h, w)\r\n c = shortest_path(c.T, method=\"D\", indices=1).astype(np.int32)\r\n print(c[a[a != -1]].sum())\r\n\r\n class ABC080:\r\n @staticmethod\r\n def d():\r\n n, c, *stc = map(int, sys.stdin.read().split())\r\n using = np.zeros((c, 10**5 + 2), dtype=np.int8)\r\n s, t, c = np.array(stc).reshape(n, 3).T\r\n np.add.at(using, (c - 1, s), 1)\r\n np.subtract.at(using, (c - 1, t + 1), 1)\r\n np.cumsum(using, axis=1, out=using)\r\n print(np.count_nonzero(using, axis=0).max())\r\n\r\n class ABC081:\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n i = np.argmax(np.absolute(a))\r\n # a +=\r\n print(2 * n)\r\n for j in range(n):\r\n print(i + 1, j + 1)\r\n if a[i] >= 0:\r\n for j in range(n - 1):\r\n print(j + 1, j + 2)\r\n else:\r\n for j in range(n - 1, 0, -1):\r\n print(j + 1, j)\r\n\r\n class ABC082:\r\n pass\r\n\r\n class ABC083:\r\n pass\r\n\r\n class ABC084:\r\n pass\r\n\r\n class ABC085:\r\n pass\r\n\r\n class ABC086:\r\n pass\r\n\r\n class ABC087:\r\n pass\r\n\r\n class ABC088:\r\n pass\r\n\r\n class ABC089:\r\n pass\r\n\r\n class ABC090:\r\n pass\r\n\r\n class ABC091:\r\n pass\r\n\r\n class ABC092:\r\n pass\r\n\r\n class ABC093:\r\n pass\r\n\r\n class ABC094:\r\n pass\r\n\r\n class ABC095:\r\n pass\r\n\r\n class ABC096:\r\n pass\r\n\r\n class ABC097:\r\n pass\r\n\r\n class ABC098:\r\n pass\r\n\r\n class ABC099:\r\n pass\r\n\r\n class ABC100:\r\n pass\r\n\r\n class ABC101:\r\n pass\r\n\r\n class ABC102:\r\n pass\r\n\r\n class ABC103:\r\n pass\r\n\r\n class ABC104:\r\n pass\r\n\r\n class ABC105:\r\n pass\r\n\r\n class ABC106:\r\n pass\r\n\r\n class ABC107:\r\n pass\r\n\r\n class ABC108:\r\n pass\r\n\r\n class ABC109:\r\n pass\r\n\r\n class ABC110:\r\n pass\r\n\r\n class ABC111:\r\n pass\r\n\r\n class ABC112:\r\n pass\r\n\r\n class ABC113:\r\n pass\r\n\r\n class ABC114:\r\n pass\r\n\r\n class ABC115:\r\n pass\r\n\r\n class ABC116:\r\n pass\r\n\r\n class ABC117:\r\n pass\r\n\r\n class ABC118:\r\n pass\r\n\r\n class ABC119:\r\n pass\r\n\r\n class ABC120:\r\n pass\r\n\r\n class ABC121:\r\n pass\r\n\r\n class ABC122:\r\n pass\r\n\r\n class ABC123:\r\n pass\r\n\r\n class ABC124:\r\n pass\r\n\r\n class ABC125:\r\n pass\r\n\r\n class ABC126:\r\n pass\r\n\r\n class ABC127:\r\n pass\r\n\r\n class ABC128:\r\n pass\r\n\r\n class ABC129:\r\n pass\r\n\r\n class ABC130:\r\n pass\r\n\r\n class ABC131:\r\n pass\r\n\r\n class ABC132:\r\n pass\r\n\r\n class ABC133:\r\n pass\r\n\r\n class ABC134:\r\n pass\r\n\r\n class ABC135:\r\n pass\r\n\r\n class ABC136:\r\n pass\r\n\r\n class ABC137:\r\n pass\r\n\r\n class ABC138:\r\n pass\r\n\r\n class ABC139:\r\n pass\r\n\r\n class ABC140:\r\n pass\r\n\r\n class ABC141:\r\n pass\r\n\r\n class ABC142:\r\n pass\r\n\r\n class ABC143:\r\n pass\r\n\r\n class ABC144:\r\n pass\r\n\r\n class ABC145:\r\n pass\r\n\r\n class ABC146:\r\n pass\r\n\r\n class ABC147:\r\n pass\r\n\r\n class ABC148:\r\n pass\r\n\r\n class ABC149:\r\n pass\r\n\r\n class ABC150:\r\n pass\r\n\r\n class ABC151:\r\n pass\r\n\r\n class ABC152:\r\n pass\r\n\r\n class ABC153:\r\n pass\r\n\r\n class ABC154:\r\n pass\r\n\r\n class ABC155:\r\n pass\r\n\r\n class ABC156:\r\n pass\r\n\r\n class ABC157:\r\n pass\r\n\r\n class ABC158:\r\n pass\r\n\r\n class ABC159:\r\n pass\r\n\r\n class ABC160:\r\n pass\r\n\r\n class ABC161:\r\n pass\r\n\r\n class ABC162:\r\n pass\r\n\r\n class ABC163:\r\n pass\r\n\r\n class ABC164:\r\n pass\r\n\r\n class ABC165:\r\n pass\r\n\r\n class ABC166:\r\n pass\r\n\r\n class ABC167:\r\n pass\r\n\r\n class ABC168:\r\n pass\r\n\r\n class ABC169:\r\n pass\r\n\r\n class ABC170:\r\n @staticmethod\r\n def a():\r\n x = [int(x) for x in sys.stdin.readline().split()]\r\n for i in range(5):\r\n if x[i] != i + 1:\r\n print(i + 1)\r\n break\r\n\r\n @staticmethod\r\n def b():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Yes\" if 2 * x <= y <= 4 * x and y % 2 == 0 else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n x, n, *p = map(int, sys.stdin.read().split())\r\n a = list(set(range(102)) - set(p))\r\n a = [(abs(y - x), y) for y in a]\r\n print(sorted(a)[0][1])\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n cand = set(a)\r\n cnt = 0\r\n for x, c in sorted(Counter(a).items()):\r\n cnt += c == 1 and x in cand\r\n cand -= set(range(x * 2, 10**6 + 1, x))\r\n print(cnt)\r\n\r\n @staticmethod\r\n def e():\r\n n, q = map(int, sys.stdin.readline().split())\r\n queue = []\r\n m = 2 * 10**5\r\n infants = [[] for _ in range(m)]\r\n highest_rate = [None] * m\r\n where = [None] * n\r\n rate = [None] * n\r\n\r\n def entry(i, k):\r\n where[i] = k\r\n while infants[k]:\r\n r, j = heappop(infants[k])\r\n if where[j] != k or j == i:\r\n continue\r\n if rate[i] >= -r:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (r, j))\r\n break\r\n else:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (-rate[i], i))\r\n\r\n def transfer(i, k):\r\n now = where[i]\r\n while infants[now]:\r\n r, j = heappop(infants[now])\r\n if where[j] != now or j == i:\r\n continue\r\n if highest_rate[now] != -r:\r\n highest_rate[now] = -r\r\n heappush(queue, (-r, now, j))\r\n heappush(infants[now], (r, j))\r\n break\r\n else:\r\n highest_rate[now] = None\r\n entry(i, k)\r\n\r\n def inquire():\r\n while True:\r\n r, k, i = heappop(queue)\r\n if where[i] != k or r != highest_rate[k]:\r\n continue\r\n heappush(queue, (r, k, i))\r\n return r\r\n\r\n for i in range(n):\r\n a, b = map(int, sys.stdin.readline().split())\r\n rate[i] = a\r\n entry(i, b - 1)\r\n for _ in range(q):\r\n c, d = map(int, sys.stdin.readline().split())\r\n transfer(c - 1, d - 1)\r\n print(inquire())\r\n\r\n class ABC171:\r\n @staticmethod\r\n def a():\r\n c = sys.stdin.readline().rstrip()\r\n print(\"A\" if c < \"a\" else \"a\")\r\n\r\n @staticmethod\r\n def b():\r\n n, k, *p = map(int, sys.stdin.read().split())\r\n print(sum(sorted(p)[:k]))\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n n -= 1\r\n l = 1\r\n while True:\r\n if n < pow(26, l):\r\n break\r\n n -= pow(26, l)\r\n l += 1\r\n res = \"\".join(\r\n [chr(ord(\"a\") + d) for d in NumberTheory.base_convert(n, 26)][\r\n ::-1\r\n ]\r\n )\r\n res = \"a\" * (l - len(res)) + res\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n s = sum(a)\r\n cnt = Counter(a)\r\n q = int(sys.stdin.readline().rstrip())\r\n for _ in range(q):\r\n b, c = map(int, sys.stdin.readline().split())\r\n s += (c - b) * cnt[b]\r\n print(s)\r\n cnt[c] += cnt[b]\r\n cnt[b] = 0\r\n\r\n @staticmethod\r\n def e():\r\n n, *a = map(int, sys.stdin.read().split())\r\n s = 0\r\n for x in a:\r\n s ^= x\r\n b = map(lambda x: x ^ s, a)\r\n print(*b, sep=\" \")\r\n\r\n class ABC172:\r\n @staticmethod\r\n def a():\r\n a = int(sys.stdin.readline().rstrip())\r\n print(a * (1 + a + a**2))\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n print(sum(s[i] != t[i] for i in range(len(s))))\r\n\r\n @staticmethod\r\n def c():\r\n n, m, k = map(int, sys.stdin.readline().split())\r\n a = [0] + [int(x) for x in sys.stdin.readline().split()]\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n (*sa,) = itertools.accumulate(a)\r\n (*sb,) = itertools.accumulate(b)\r\n res = 0\r\n for i in range(n + 1):\r\n r = k - sa[i]\r\n if r < 0:\r\n break\r\n res = max(res, i + bi_r(sb, r))\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n f = np.zeros(n + 1, dtype=np.int64)\r\n for i in range(1, n + 1):\r\n f[i::i] += 1\r\n print((np.arange(1, n + 1) * f[1:]).sum())\r\n\r\n class ABC173:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n charge = (n + 999) // 1000 * 1000 - n\r\n print(charge)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n c = Counter(s)\r\n for v in \"AC, WA, TLE, RE\".split(\", \"):\r\n print(f\"{v} x {c[v]}\")\r\n\r\n @staticmethod\r\n def c():\r\n h, w, k = map(int, sys.stdin.readline().split())\r\n c = [sys.stdin.readline().rstrip() for _ in range(h)]\r\n tot = 0\r\n for i in range(1 << h):\r\n for j in range(1 << w):\r\n cnt = 0\r\n for y in range(h):\r\n for x in range(w):\r\n if i >> y & 1 or j >> x & 1:\r\n continue\r\n cnt += c[y][x] == \"#\"\r\n tot += cnt == k\r\n print(tot)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a.sort(reverse=True)\r\n res = (\r\n a[0]\r\n + sum(a[1 : 1 + (n - 2) // 2]) * 2\r\n + a[1 + (n - 2) // 2] * (n & 1)\r\n )\r\n print(res)\r\n\r\n @staticmethod\r\n def e():\r\n MOD = 10**9 + 7\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n minus = [x for x in a if x < 0]\r\n plus = [x for x in a if x > 0]\r\n if len(plus) + len(minus) // 2 * 2 >= k: # plus\r\n (*minus,) = map(abs, minus)\r\n minus.sort(reverse=True)\r\n plus.sort(reverse=True)\r\n cand = []\r\n if len(minus) & 1:\r\n minus = minus[:-1]\r\n for i in range(0, len(minus) - 1, 2):\r\n cand.append(minus[i] * minus[i + 1] % MOD)\r\n if k & 1:\r\n res = plus[0]\r\n plus = plus[1:]\r\n else:\r\n res = 1\r\n if len(plus) & 1:\r\n plus = plus[:-1]\r\n for i in range(0, len(plus) - 1, 2):\r\n cand.append(plus[i] * plus[i + 1] % MOD)\r\n cand.sort(reverse=True)\r\n for x in cand[: k // 2]:\r\n res *= x\r\n res %= MOD\r\n print(res)\r\n elif 0 in a:\r\n print(0)\r\n else:\r\n cand = sorted(map(abs, a))\r\n res = 1\r\n for i in range(k):\r\n res *= cand[i]\r\n res %= MOD\r\n res = MOD - res\r\n print(res)\r\n pass\r\n\r\n class ABC174:\r\n @staticmethod\r\n def a():\r\n print(\"Yes\" if int(sys.stdin.readline().rstrip()) >= 30 else \"No\")\r\n\r\n class ABC178:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n s = int(sys.stdin.readline().rstrip())\r\n if s == 0:\r\n print(1)\r\n return\r\n elif s == 1:\r\n print(0)\r\n return\r\n c = np.eye(3, k=-1, dtype=np.int64)\r\n c[0, 0] = c[0, 2] = 1\r\n a = np.array([0, 0, 1])\r\n print(Algebra.dot(Algebra.matrix_pow(c, s - 2), a)[0])\r\n\r\n class ABC179:\r\n @staticmethod\r\n def a():\r\n s = sys.stdin.readline().rstrip()\r\n print(s + \"s\" if s[-1] != \"s\" else s + \"es\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *d = map(int, sys.stdin.read().split())\r\n d = np.array(d).reshape(n, 2).T\r\n d = np.equal(d[0], d[1]).astype(int)\r\n dd = d.copy()\r\n dd[1:] += d[:-1]\r\n dd[:-1] += d[1:]\r\n print(\"Yes\" if (dd >= 3).any() else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = (n // np.arange(1, n + 1)).sum() - len(\r\n NumberTheory.find_divisors(n)\r\n )\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n mod = 998244353\r\n n, k, *lr = map(int, sys.stdin.read().split())\r\n l, r = np.array(lr).reshape(k, -1).T\r\n\r\n @njit((i8, i8[:], i8[:]), cache=True)\r\n def solve(n, l, r):\r\n res = np.zeros(n * 2, dtype=np.int64)\r\n res[0], res[1] = 1, -1\r\n for i in range(n - 1):\r\n res[i + 1] = (res[i + 1] + res[i]) % mod\r\n res[i + l] = (res[i + l] + res[i]) % mod\r\n res[i + r + 1] = (res[i + r + 1] - res[i]) % mod\r\n print(res[n - 1])\r\n\r\n solve(n, l, r)\r\n\r\n @staticmethod\r\n def e():\r\n n, x, m = map(int, sys.stdin.readline().split())\r\n res = [-1 for _ in range(m)]\r\n s = 0\r\n loop = np.zeros(m, dtype=np.int64)\r\n for i in range(m + 1):\r\n if i == n:\r\n print(s)\r\n return\r\n if res[x] != -1:\r\n l, loop = i - res[x], loop[res[x] : i]\r\n q, r = divmod(n - i, l)\r\n print(s + q * loop.sum() + loop[:r].sum())\r\n return\r\n res[x], loop[i] = i, x\r\n s += x\r\n x = x**2 % m\r\n\r\n class ABC180:\r\n @staticmethod\r\n def a():\r\n n, a, b = map(int, sys.stdin.readline().split())\r\n print(n - a + b)\r\n\r\n @staticmethod\r\n def b():\r\n n, *x = map(int, sys.stdin.read().split())\r\n x = np.absolute(np.array(x))\r\n print(x.sum())\r\n print(np.sqrt((x**2).sum()))\r\n print(x.max())\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n div = NumberTheory.find_divisors(n)\r\n print(*div, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n x, y, a, b = map(int, sys.stdin.readline().split())\r\n cnt = 0\r\n while x * a <= x + b:\r\n x *= a\r\n if x >= y:\r\n print(cnt)\r\n return\r\n cnt += 1\r\n cnt += (y - x - 1) // b\r\n print(cnt)\r\n\r\n @staticmethod\r\n def e():\r\n n, *xyz = map(int, sys.stdin.read().split())\r\n\r\n xyz = list(zip(*[iter(xyz)] * 3))\r\n dist = [[0] * n for _ in range(n)]\r\n for i in range(n):\r\n a, b, c = xyz[i]\r\n for j in range(n):\r\n p, q, r = xyz[j]\r\n dist[i][j] = abs(p - a) + abs(q - b) + max(0, r - c)\r\n\r\n dp = [[inf] * n for _ in range(1 << n)]\r\n dp[0][0] = 0\r\n for s in range(1 << n):\r\n for i in range(n):\r\n t = s | (1 << i)\r\n for j in range(n):\r\n dp[t][i] = min(dp[t][i], dp[s][j] + dist[j][i])\r\n print(dp[-1][0])\r\n\r\n @staticmethod\r\n def f(): # rewrite with jit compiling later.\r\n n, m, l = map(int, sys.stdin.readline().split())\r\n c = Combinatorics.CombinationsMod(n, MOD)\r\n path = np.zeros(n + 1, dtype=np.int64)\r\n path[1] = path[2] = 1\r\n for i in range(3, n + 1):\r\n path[i] = path[i - 1] * i % MOD\r\n cycle = np.zeros(n + 1, dtype=np.int64)\r\n cycle[1:] = path[:-1]\r\n dp = np.zeros((n + 1, m + 1), dtype=np.int64)\r\n\r\n def f(l):\r\n dp[:, :] = 0\r\n dp[0, 0] = 1\r\n for i in range(n):\r\n for j in range(m + 1):\r\n k = np.arange(1, min(l, n - i, m - j + 1) + 1)\r\n dp[i + k, j + k - 1] += (\r\n dp[i, j]\r\n * c(n - i - 1, k - 1)\r\n % MOD\r\n * path[k]\r\n % MOD\r\n )\r\n dp[i + k, j + k - 1] %= MOD\r\n k = np.arange(2, min(l, n - i, m - j) + 1)\r\n dp[i + k, j + k] += (\r\n dp[i, j]\r\n * c(n - i - 1, k - 1)\r\n % MOD\r\n * cycle[k]\r\n % MOD\r\n )\r\n dp[i + k, j + k] %= MOD\r\n return dp[n, m]\r\n\r\n print((f(l) - f(l - 1)) % MOD)\r\n\r\n @staticmethod\r\n def f_2(): # PyPy\r\n n, m, l = map(int, sys.stdin.readline().split())\r\n c = Combinatorics.CombinationsMod(n, MOD)\r\n path = [0] * (n + 1)\r\n path[1] = path[2] = 1\r\n for i in range(3, n + 1):\r\n path[i] = path[i - 1] * i % MOD\r\n cycle = [0] + path[:-1]\r\n\r\n def f(l):\r\n dp = [[0] * (m + 1) for _ in range(n + 1)]\r\n dp[0][0] = 1\r\n for i in range(n):\r\n for j in range(m + 1):\r\n for k in range(1, min(l, n - i, m - j + 1) + 1):\r\n dp[i + k][j + k - 1] += (\r\n dp[i][j]\r\n * c(n - i - 1, k - 1)\r\n % MOD\r\n * path[k]\r\n % MOD\r\n )\r\n dp[i + k][j + k - 1] %= MOD\r\n for k in range(1, min(l, n - i, m - j) + 1):\r\n dp[i + k][j + k] += (\r\n dp[i][j]\r\n * c(n - i - 1, k - 1)\r\n % MOD\r\n * cycle[k]\r\n % MOD\r\n )\r\n dp[i + k][j + k] %= MOD\r\n\r\n return dp[n][m]\r\n\r\n print((f(l) - f(l - 1)) % MOD)\r\n\r\n class ARC106:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n a = 1\r\n while pow(3, a) <= n:\r\n m = n - pow(3, a)\r\n b = 1\r\n while pow(5, b) <= m:\r\n if pow(5, b) == m:\r\n print(a, b)\r\n return\r\n b += 1\r\n a += 1\r\n print(-1)\r\n\r\n @staticmethod\r\n def b():\r\n n, m = map(int, sys.stdin.readline().split())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n uf = GeometryTopology.Graph(n)\r\n uf.init_dsu()\r\n for _ in range(m):\r\n c, d = map(int, sys.stdin.readline().split())\r\n c -= 1\r\n d -= 1\r\n uf.unite(c, d)\r\n\r\n visited = [False] * n\r\n ga = [[] for _ in range(n)]\r\n gb = [[] for _ in range(n)]\r\n for i in range(n):\r\n r = uf.find(i)\r\n ga[r].append(a[i])\r\n gb[r].append(b[i])\r\n print(\r\n \"Yes\"\r\n if all(sum(ga[i]) == sum(gb[i]) for i in range(n))\r\n else \"No\"\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n if m < 0:\r\n print(-1)\r\n return\r\n if n == 1:\r\n if m != 0:\r\n print(-1)\r\n return\r\n print(1, 2)\r\n return\r\n\r\n if m >= n - 1:\r\n print(-1)\r\n return\r\n l, r = 1, 10**9\r\n print(l, r)\r\n for _ in range(n - 2 - m):\r\n l += 1\r\n r -= 1\r\n print(l, r)\r\n r = l\r\n for _ in range(m + 1):\r\n l, r = r + 1, r + 2\r\n print(l, r)\r\n\r\n @staticmethod\r\n def d():\r\n mod = 998244353\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n b = np.zeros((k + 1, n), dtype=np.int64)\r\n b[0] = 1\r\n for i in range(k):\r\n b[i + 1] = b[i] * a % mod\r\n s = b.sum(axis=1) % mod\r\n inv_2 = pow(2, mod - 2, mod)\r\n c = Combinatorics.CombinationsMod(mod=mod)\r\n for x in range(1, k + 1):\r\n l = np.arange(x + 1)\r\n print(\r\n (\r\n (c(x, l) * s[l] % mod * s[l][::-1] % mod).sum() % mod\r\n - pow(2, x, mod) * s[x]\r\n )\r\n % mod\r\n * inv_2\r\n % mod\r\n )\r\n\r\n @staticmethod\r\n def e():\r\n pass\r\n\r\n @staticmethod\r\n def f():\r\n pass\r\n\r\n class ACL001:\r\n @staticmethod\r\n def a():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*[iter(xy)] * 2)\r\n print(xy)\r\n pass\r\n\r\n class TDPC:\r\n @staticmethod\r\n def t():\r\n pass\r\n\r\n class MSolutions2020:\r\n @staticmethod\r\n def a():\r\n x = int(sys.stdin.readline().rstrip())\r\n x -= 400\r\n print(8 - x // 200)\r\n\r\n @staticmethod\r\n def b():\r\n r, g, b, k = map(int, sys.stdin.read().split())\r\n while k and g <= r:\r\n g *= 2\r\n k -= 1\r\n while k and b <= g:\r\n b *= 2\r\n k -= 1\r\n print(\"Yes\" if r < g < b else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n for i in range(k, n):\r\n print(\"Yes\" if a[i] > a[i - k] else \"No\")\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n m = 1000\r\n s = 0\r\n for i in range(n):\r\n if a[i + 1] == a[i]:\r\n continue\r\n elif a[i + 1] > a[i]:\r\n cnt = m // a[i]\r\n m -= a[i] * cnt\r\n s += cnt\r\n else:\r\n m += a[i] * s\r\n s = 0\r\n print(m)\r\n\r\n\r\nclass Codeforces:\r\n class CR676div2:\r\n @staticmethod\r\n def a():\r\n t = int(sys.stdin.readline().rstrip())\r\n for _ in range(t):\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(a ^ b)\r\n\r\n @staticmethod\r\n def b():\r\n t = int(sys.stdin.readline().rstrip())\r\n for _ in range(t):\r\n n = int(sys.stdin.readline().rstrip())\r\n s = [list(sys.stdin.readline().rstrip()) for _ in range(n)]\r\n s[0][0] = s[-1][-1] = \"0\"\r\n for i in range(n):\r\n for j in range(n):\r\n s[i][j] = int(s[i][j])\r\n\r\n def can_goal(g, c=0):\r\n visited = [0] * n\r\n stack = [(0, 0)]\r\n visited[0] |= 1 << 0\r\n while stack:\r\n y, x = stack.pop()\r\n for dy, dx in [(-1, 0), (0, -1), (1, 0), (0, 1)]:\r\n i, j = y + dy, x + dx\r\n if i < 0 or i >= n or j < 0 or j >= n:\r\n continue\r\n if i == j == n - 1:\r\n return True\r\n if visited[i] >> j & 1:\r\n continue\r\n visited[i] |= 1 << j\r\n if g[i][j] != c:\r\n continue\r\n stack.append((i, j))\r\n return False\r\n\r\n if not (can_goal(s, 0) or can_goal(s, 1)):\r\n print(0)\r\n continue\r\n\r\n flg = 0\r\n for i in range(n):\r\n for j in range(n):\r\n if i == j == 0 or i == j == n - 1:\r\n continue\r\n s[i][j] ^= 1\r\n if not (can_goal(s, 0) or can_goal(s, 1)):\r\n print(1)\r\n print(i + 1, j + 1)\r\n flg = 1\r\n break\r\n s[i][j] ^= 1\r\n if flg:\r\n break\r\n if flg:\r\n continue\r\n\r\n print(2)\r\n if s[0][1] == s[1][0]:\r\n print(n, n - 1)\r\n print(n - 1, n)\r\n continue\r\n\r\n if s[0][1] == s[-1][-2]:\r\n print(1, 2)\r\n print(n - 1, n)\r\n else:\r\n print(1, 2)\r\n print(n, n - 1)\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n\r\nclass ProjectEuler:\r\n @staticmethod\r\n def p1():\r\n def f(n, x):\r\n return (x + n // x * x) * (n // x) // 2\r\n\r\n n = 1000\r\n ans = f(n - 1, 3) + f(n - 1, 5) - f(n - 1, 15)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p2():\r\n fib = [1, 2]\r\n while fib[-1] < 4 * 10**6:\r\n fib.append(fib[-1] + fib[-2])\r\n print(sum(fib[1:-1:3]))\r\n\r\n @staticmethod\r\n def p3():\r\n pn = NumberTheory.PrimeNumbers()\r\n res = pn.factorize(600851475143)\r\n print(max(res.keys()))\r\n\r\n @staticmethod\r\n def p4():\r\n def is_palindrome(n):\r\n n = str(n)\r\n return n == n[::-1]\r\n\r\n cand = []\r\n for a in range(100, 1000):\r\n for b in range(a, 1000):\r\n n = a * b\r\n if is_palindrome(n):\r\n cand.append(n)\r\n print(max(cand))\r\n\r\n @staticmethod\r\n def p5():\r\n pn = NumberTheory.PrimeNumbers()\r\n res = defaultdict(int)\r\n for i in range(1, 21):\r\n for p, c in pn.factorize(i).items():\r\n res[p] = max(res[p], c)\r\n ans = 1\r\n for p, c in res.items():\r\n ans *= pow(p, c)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p6():\r\n a = np.arange(101)\r\n b = np.cumsum(a**2)\r\n a = a.cumsum()\r\n print(a[100] ** 2 - b[100])\r\n\r\n @staticmethod\r\n def p7():\r\n nt = NumberTheory.PrimeNumbers()\r\n print(sorted(nt)[10000])\r\n\r\n @staticmethod\r\n def p8():\r\n n = \"7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450\"\r\n n = [int(d) for d in list(n)]\r\n res = 0\r\n for i in range(988):\r\n x = 1\r\n for j in range(13):\r\n x *= n[i + j]\r\n res = max(res, x)\r\n print(res)\r\n\r\n @staticmethod\r\n def p9():\r\n for a in range(1, 997):\r\n for b in range(a, 998 - a):\r\n c = 1000 - a - b\r\n if a**2 + b**2 == c**2:\r\n print(a * b * c)\r\n return\r\n\r\n @staticmethod\r\n def p10():\r\n pn = NumberTheory.PrimeNumbers(2 * 10**6 + 1)\r\n print(sum(pn))\r\n\r\n @staticmethod\r\n def p11():\r\n grid = \"08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48\"\r\n print(grid)\r\n\r\n pass\r\n\r\n\r\nclass Yukicoder:\r\n def __init__(self):\r\n pass\r\n\r\n def __call__(self):\r\n print(1)\r\n\r\n\r\nclass AOJ:\r\n @staticmethod\r\n def ALDS1_12_A():\r\n n, *a = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for i in range(n - 1):\r\n for j in range(i + 1, n):\r\n if a[i * n + j] == -1:\r\n continue\r\n g.add_edge(i, j, weight=a[i * n + j])\r\n g.add_edge(j, i, weight=a[i * n + j])\r\n _, d = g.kruskal()\r\n # _, d = g.prim()\r\n # _, d = g.boruvka()\r\n print(d)\r\n\r\n @staticmethod\r\n def GRL_3_C(): # strongly connected components\r\n n, m = map(int, sys.stdin.readline().split())\r\n g = GeometryTopology.Graph(n)\r\n for _ in range(m):\r\n g.add_edge(*map(int, sys.stdin.readline().split()))\r\n r = g.scc()\r\n q, *uv = map(int, sys.stdin.read().split())\r\n for u, v in zip(*[iter(uv)] * 2):\r\n print(int(r[u] == r[v]))\r\n\r\n\r\nclass YosupoJudge:\r\n @staticmethod\r\n def Directed_MST():\r\n n, m, s, *abc = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b, c in zip(*[iter(abc)] * 3):\r\n g.add_edge(a, b, weight=c)\r\n _, d, p = g.prim(src=s, return_parent=True)\r\n print(d)\r\n print(*p)\r\n\r\n @staticmethod\r\n def Manhattan_MST():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # AtCoder.ABC179.f()\r\n # AtCoder.ABC060.d()\r\n AtCoder.ABC081.d()\r\n # AtCoder.ARC106.d()\r\n # YosupoJudge.Directed_MST()\r\n pass\r\n", "import sys\r\n\r\nimport numpy as np\r\n\r\nn, a, b = map(int, sys.stdin.readline().split())\r\ns, d = np.array(sys.stdin.read().split()).reshape(-1, 2).T\r\nd = d.astype(np.int64)\r\n\r\n\r\ndef main():\r\n d[d < a] = a\r\n d[d > b] = b\r\n w = s == \"West\"\r\n d[w] = np.negative(d[w])\r\n res = np.sum(d)\r\n\r\n if res == 0:\r\n return 0\r\n elif res > 0:\r\n return \"East {0}\".format(res)\r\n else:\r\n return \"West {0}\".format(abs(res))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ans = main()\r\n print(ans)\r\n", "import sys\r\nimport typing\r\n\r\nimport numba as nb\r\nimport numpy as np\r\n\r\n\r\[email protected](\r\n (nb.i8[:], ),\r\n cache=True,\r\n)\r\ndef solve(\r\n s: np.ndarray,\r\n) -> typing.NoReturn:\r\n mod = 10 ** 9 + 7\r\n n = s.size\r\n dp = np.zeros(n + 2, np.int64)\r\n dp[0] = 1\r\n for i in range(n):\r\n j = i - 1\r\n while j >= 0:\r\n if s[j] == s[i]: break\r\n dp[i + 2] += dp[j + 1]\r\n j -= 1\r\n else:\r\n dp[i + 2] += 1\r\n dp[i + 2] %= mod\r\n print(dp[2:].sum())\r\n\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n s = np.frombuffer(\r\n sys.stdin.buffer.readline().rstrip(),\r\n dtype='b',\r\n ).astype(np.int64) - ord('a')\r\n solve(s)\r\n\r\n\r\nmain()\n", "import sys\r\n\r\nimport numpy as np\r\n\r\nn, h, a, b, c, d, e = map(int, sys.stdin.read().split())\r\n\r\n\r\ndef cost(x, y):\r\n return a * x + c * y\r\n\r\n\r\ndef main():\r\n x = np.arange(n + 1, dtype=np.int64)\r\n y = (n * e - (b + e) * x - h + (d + e)) // (d + e)\r\n y = np.maximum(y, 0) # マイナス日食べることはできない\r\n y = np.minimum(\r\n y, n - x\r\n ) # xを固定した時にx+y(x+y >= n+1)日以上食べ続けないと体調を崩す場合でも、n日までのことだけを考えれば良い。\r\n\r\n costs = cost(x, y)\r\n return np.amin(costs)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ans = main()\r\n print(ans)\r\n", "import sys\r\nimport typing\r\n\r\nimport numba as nb\r\nimport numpy as np\r\n\r\n\r\[email protected]((nb.i8[:], ), cache=True)\r\ndef solve(a: np.ndarray) -> typing.NoReturn:\r\n n = len(a)\r\n prev = np.empty(n, np.int64)\r\n last = np.zeros(26, np.int64)\r\n for i in range(n):\r\n prev[i] = last[a[i]]\r\n last[a[i]] = i + 1\r\n mod = 10 ** 9 + 7\r\n\r\n dp = np.zeros(n + 3, np.int64)\r\n for i in range(2, n + 2):\r\n j = prev[i - 2]\r\n dp[i] = dp[i - 2] - dp[j - 1] + (j == 0)\r\n dp[i] = (dp[i] + dp[i - 1]) % mod\r\n print(dp[n + 1])\r\n\r\ndef main() -> typing.NoReturn:\r\n a = np.array([ord(x) - 97 for x in input()])\r\n solve(a)\r\n\r\n\r\nmain()\r\n", "import itertools\r\nimport math\r\nimport string\r\nimport sys\r\nfrom bisect import bisect_left as bi_l\r\nfrom bisect import bisect_right as bi_r\r\nfrom collections import Counter, defaultdict, deque\r\nfrom heapq import heappop, heappush\r\nfrom operator import or_, xor\r\n\r\ninf = float(\"inf\")\r\nfrom functools import lru_cache, reduce\r\n\r\nsys.setrecursionlimit(10**6)\r\nMOD = 10**9 + 7\r\n# MOD = 998244353\r\n\r\nglobal using_numpy\r\nusing_numpy = False\r\nimport networkx as nx\r\nimport numpy as np\r\nfrom numba import jit\r\nfrom scipy import optimize\r\nfrom scipy.ndimage import distance_transform_cdt\r\nfrom scipy.sparse import csr_matrix\r\nfrom scipy.sparse.csgraph import (\r\n csgraph_to_dense,\r\n maximum_flow,\r\n minimum_spanning_tree,\r\n shortest_path,\r\n)\r\nfrom scipy.spatial import ConvexHull\r\nfrom scipy.special import comb\r\n\r\n\r\nclass Algebra:\r\n class Mint(int):\r\n def __init__(self, n, mod=MOD):\r\n self.value = n\r\n self.mod = mod\r\n\r\n def __str__(self):\r\n return f\"{self.value}\"\r\n\r\n def __add__(self, x):\r\n return self.__class__((self.value + x.value) % self.mod)\r\n\r\n def __sub__(self, x):\r\n return self.__class__((self.value - x.value) % self.mod)\r\n\r\n def __mul__(self, x):\r\n return self.__class__((self.value * x.value) % self.mod)\r\n\r\n def __pow__(self, x):\r\n return self.__class__(pow(self.value, x.value, self.mod))\r\n\r\n def __lt__(self, x):\r\n return self.value < x.value\r\n\r\n def __le__(self, x):\r\n return self.value <= x.value\r\n\r\n def __eq__(self, x):\r\n return self.value == x.value\r\n\r\n def __ne__(self, x):\r\n return self.value != x.value\r\n\r\n def __gt__(self, x):\r\n return self.value > x.value\r\n\r\n def __ge__(self, x):\r\n return self.value >= x.value\r\n\r\n class SemiGroup:\r\n pass\r\n\r\n class Monoid:\r\n pass\r\n\r\n class Group:\r\n pass\r\n\r\n class SemiRing:\r\n pass\r\n\r\n class Ring:\r\n pass\r\n\r\n @staticmethod\r\n def identity(n):\r\n if using_numpy:\r\n return np.identity(n, dtype=np.int64)\r\n else:\r\n a = [[0] * n for _ in range(n)]\r\n for i in range(n):\r\n a[i][i] = 1\r\n return a\r\n\r\n @staticmethod\r\n def dot(a, b):\r\n if using_numpy:\r\n return np.dot(a, b)\r\n else:\r\n assert len(a[0]) == len(b)\r\n c = [[0] * len(b[0]) for _ in range(len(a))]\r\n for i in range(len(a)):\r\n for j in range(len(b[0])):\r\n for k in range(len(b)):\r\n c[i][j] += a[i][k] * b[k][j]\r\n return c\r\n\r\n @classmethod\r\n def matrix_pow(cls, a, n, mod=10**9 + 7):\r\n m = len(a)\r\n b = cls.identity(m)\r\n while n:\r\n if n & 1:\r\n b = cls.dot(b, a)\r\n n >>= 1\r\n a = cls.dot(a, a)\r\n if using_numpy:\r\n a %= mod\r\n b %= mod\r\n else:\r\n for i in range(m):\r\n for j in range(m):\r\n a[i][j] %= mod\r\n b[i][j] %= mod\r\n return b\r\n\r\n @staticmethod\r\n def bitwise_dot(a, b):\r\n if using_numpy:\r\n return np.bitwise_xor.reduce(\r\n a[:, None, :] & b.T[None, :, :], axis=-1\r\n )\r\n else:\r\n assert len(a[0]) == len(b)\r\n c = [[0] * len(b[0]) for _ in range(len(a))]\r\n for i in range(len(a)):\r\n for j in range(len(b[0])):\r\n for k in range(len(b)):\r\n c[i][j] ^= a[i][k] & b[k][j]\r\n return c\r\n\r\n @classmethod\r\n def bitwise_mat_pow(cls, a, n):\r\n if n == 0:\r\n return np.eye(len(a), dtype=np.uint32) * ((1 << 32) - 1)\r\n res = cls.bitwise_mat_pow(a, n // 2)\r\n res = cls.bitwise_dot(res, res)\r\n return cls.bitwise_dot(res, a) if n & 1 else res\r\n\r\n\r\nclass NumberTheory:\r\n def __init__(self, n=2 * 10**6):\r\n self.n = n\r\n self.is_prime_number, self.prime_numbers = self.sieve_of_eratosthenes(\r\n n\r\n )\r\n\r\n def sieve_of_eratosthenes(self, n):\r\n if using_numpy:\r\n sieve = np.ones(n + 1, dtype=np.int32)\r\n sieve[:2] = 0\r\n for i in range(2, int(n**0.5) + 1):\r\n if sieve[i]:\r\n sieve[i * 2 :: i] = 0\r\n prime_numbers = np.flatnonzero(sieve)\r\n else:\r\n sieve = [1] * (n + 1)\r\n sieve[0] = sieve[1] = 0\r\n for i in range(2, int(n**0.5) + 1):\r\n if not sieve[i]:\r\n continue\r\n for j in range(i * 2, n + 1, i):\r\n sieve[j] = 0\r\n prime_numbers = [i for i in range(2, n + 1) if sieve[i]]\r\n return sieve, prime_numbers\r\n\r\n def prime_factorize(self, n):\r\n res = dict()\r\n if n < 2:\r\n return res\r\n border = int(n**0.5)\r\n for p in self.prime_numbers:\r\n if p > border:\r\n break\r\n while n % p == 0:\r\n res[p] = res.get(p, 0) + 1\r\n n //= p\r\n if n == 1:\r\n return res\r\n res[n] = 1\r\n return res\r\n\r\n def prime_factorize_factorial(self, n):\r\n res = dict()\r\n for i in range(2, n + 1):\r\n for p, c in self.prime_factorize(i).items():\r\n res[p] = res.get(p, 0) + c\r\n return res\r\n\r\n @classmethod\r\n @lru_cache(maxsize=None)\r\n def gcd(cls, a, b):\r\n return cls.gcd(b, a % b) if b else abs(a)\r\n\r\n @classmethod\r\n def lcm(cls, a, b):\r\n return abs(a // cls.gcd(a, b) * b)\r\n\r\n @staticmethod\r\n def find_divisors(n):\r\n divisors = []\r\n for i in range(1, int(n**0.5) + 1):\r\n if n % i:\r\n continue\r\n divisors.append(i)\r\n j = n // i\r\n if j != i:\r\n divisors.append(j)\r\n return sorted(divisors)\r\n\r\n @staticmethod\r\n def base_convert(n, b):\r\n if not n:\r\n return [0]\r\n res = []\r\n while n:\r\n n, r = divmod(n, b)\r\n if r < 0:\r\n n += 1\r\n r -= b\r\n res.append(r)\r\n return res\r\n\r\n\r\nmint = Algebra.Mint\r\n\r\n\r\nclass Combinatorics:\r\n def __init__(self, N=10**9, n=10**6, mod=10**9 + 7):\r\n self.mod = mod\r\n self.make_mod_tables(N, n)\r\n\r\n @classmethod\r\n @lru_cache(maxsize=None)\r\n def choose(cls, n, r, mod=None): # no mod, or mod ≠ prime\r\n if r > n or r < 0:\r\n return 0\r\n if r == 0:\r\n return 1\r\n res = cls.choose(n - 1, r, mod) + cls.choose(n - 1, r - 1, mod)\r\n if mod:\r\n res %= mod\r\n return res\r\n\r\n def cumprod(self, a):\r\n p = self.mod\r\n l = len(a)\r\n sql = int(np.sqrt(l) + 1)\r\n a = np.resize(a, sql**2).reshape(sql, sql)\r\n for i in range(sql - 1):\r\n a[:, i + 1] *= a[:, i]\r\n a[:, i + 1] %= p\r\n for i in range(sql - 1):\r\n a[i + 1] *= a[i, -1]\r\n a[i + 1] %= p\r\n return np.ravel(a)[:l]\r\n\r\n def make_mod_tables(self, N, n):\r\n p = self.mod\r\n if using_numpy:\r\n fac = np.arange(n + 1)\r\n fac[0] = 1\r\n fac = self.cumprod(fac)\r\n ifac = np.arange(n + 1, 0, -1)\r\n ifac[0] = pow(int(fac[-1]), p - 2, p)\r\n ifac = self.cumprod(ifac)[n::-1]\r\n n_choose = np.arange(N + 1, N - n, -1)\r\n n_choose[0] = 1\r\n n_choose[1:] = self.cumprod(n_choose[1:]) * ifac[1 : n + 1] % p\r\n else:\r\n fac = [None] * (n + 1)\r\n fac[0] = 1\r\n for i in range(n):\r\n fac[i + 1] = fac[i] * (i + 1) % p\r\n ifac = [None] * (n + 1)\r\n ifac[n] = pow(fac[n], p - 2, p)\r\n for i in range(n, 0, -1):\r\n ifac[i - 1] = ifac[i] * i % p\r\n n_choose = [None] * (n + 1)\r\n n_choose[0] = 1\r\n for i in range(n):\r\n n_choose[i + 1] = n_choose[i] * (N - i) % p\r\n for i in range(n + 1):\r\n n_choose[i] = n_choose[i] * ifac[i] % p\r\n self.fac, self.ifac, self.mod_n_choose = fac, ifac, n_choose\r\n\r\n def mod_choose(self, n, r):\r\n p = self.mod\r\n return self.fac[n] * self.ifac[r] % p * self.ifac[n - r] % p\r\n\r\n @classmethod\r\n def permutations(cls, a, r=None, i=0):\r\n a = list(a)\r\n n = len(a)\r\n if r is None:\r\n r = n\r\n res = []\r\n if r > n or i > r:\r\n return res\r\n if i == r:\r\n return [tuple(a[:r])]\r\n for j in range(i, n):\r\n a[i], a[j] = a[j], a[i]\r\n res += cls.permutations(a, r, i + 1)\r\n return res\r\n\r\n @staticmethod\r\n def combinations(a, r):\r\n a = tuple(a)\r\n n = len(a)\r\n if r > n:\r\n return\r\n indices = list(range(r))\r\n yield a[:r]\r\n while True:\r\n for i in range(r - 1, -1, -1):\r\n if indices[i] != i + n - r:\r\n break\r\n else:\r\n return\r\n indices[i] += 1\r\n for j in range(i + 1, r):\r\n indices[j] = indices[j - 1] + 1\r\n yield tuple(a[i] for i in indices)\r\n\r\n\r\nclass String:\r\n @staticmethod\r\n def z_algorithm(s):\r\n n = len(s)\r\n a = [0] * n\r\n a[0] = n\r\n l = r = -1\r\n for i in range(1, n):\r\n if r >= i:\r\n a[i] = min(a[i - l], r - i)\r\n while i + a[i] < n and s[i + a[i]] == s[a[i]]:\r\n a[i] += 1\r\n if i + a[i] >= r:\r\n l, r = i, i + a[i]\r\n return a\r\n\r\n\r\nclass GeometryTopology:\r\n class Graph:\r\n def __init__(self, nodes={}, edges={}):\r\n self.nodes = nodes\r\n self.edges = edges\r\n\r\n def add_node(self, v, **info):\r\n if not v in self.edges:\r\n self.edges[v] = {}\r\n if v in self.nodes:\r\n return\r\n self.nodes[v] = info\r\n\r\n def add_edge(self, u, v, **info):\r\n self.add_node(u)\r\n self.add_node(v)\r\n self.edges[u][v] = info\r\n\r\n def get_size(self):\r\n return len(self.nodes)\r\n\r\n def dinic(self, src, sink):\r\n def bfs():\r\n lv = {src: 0}\r\n q = deque([src])\r\n while q:\r\n u = q.popleft()\r\n for v, e in self.edges[u].items():\r\n if e[\"capacity\"] == 0 or v in lv:\r\n continue\r\n lv[v] = lv[u] + 1\r\n q.append(v)\r\n return lv\r\n\r\n def flow_to_sink(u, flow_in):\r\n if u == sink:\r\n return flow_in\r\n flow = 0\r\n for v, e in self.edges[u].items():\r\n cap = e[\"capacity\"]\r\n if cap == 0 or lv[v] <= lv[u]:\r\n continue\r\n f = flow_to_sink(v, min(flow_in, cap))\r\n if not f:\r\n continue\r\n self.edges[u][v][\"capacity\"] -= f\r\n if v in self.edges and u in self.edges[v]:\r\n self.edges[v][u][\"capacity\"] += f\r\n else:\r\n self.add_edge(v, u, capacity=f)\r\n flow_in -= f\r\n flow += f\r\n return flow\r\n\r\n flow = 0\r\n while True:\r\n lv = bfs()\r\n if not sink in lv:\r\n return flow\r\n flow += flow_to_sink(src, inf)\r\n\r\n def ford_fulkerson(self):\r\n pass\r\n\r\n def push_relabel(self):\r\n pass\r\n\r\n def floyd_warshall(self):\r\n d = {u: {v: inf for v in self.nodes} for u in self.nodes}\r\n for v in self.nodes:\r\n d[v][v] = 0\r\n for u in self.edges:\r\n for v in self.edges[u]:\r\n d[u][v] = self.edges[u][v][\"weight\"]\r\n for w in self.nodes:\r\n for u in self.nodes:\r\n for v in self.nodes:\r\n d[u][v] = min(d[u][v], d[u][w] + d[w][v])\r\n return d\r\n\r\n def dijkstra(self, src, paths_cnt=False, mod=None):\r\n dist = {v: inf for v in self.nodes}\r\n dist[src] = 0\r\n visited = set()\r\n paths = {v: 0 for v in self.nodes}\r\n paths[src] = 1\r\n q = [(0, src)]\r\n while q:\r\n d, u = heappop(q)\r\n if u in visited:\r\n continue\r\n visited.add(u)\r\n for v, e in self.edges[u].items():\r\n dv = d + e[\"weight\"]\r\n if dv > dist[v]:\r\n continue\r\n elif dv == dist[v]:\r\n paths[v] += paths[u]\r\n if mod:\r\n paths[v] %= mod\r\n continue\r\n paths[v] = paths[u]\r\n dist[v] = dv\r\n heappush(q, (dv, v))\r\n if paths_cnt:\r\n return dist, paths\r\n else:\r\n return dist\r\n\r\n def astar(self, src, tgt, heuristic_func):\r\n cost = {v: inf for v in self.nodes}\r\n q = [(heuristic_func(src, tgt), 0, src)]\r\n while q:\r\n s, c, u = heappop(q)\r\n if u == tgt:\r\n return c\r\n if cost[u] != inf:\r\n continue\r\n cost[u] = c\r\n for v, e in self.edges[u].items():\r\n if cost[v] != inf:\r\n continue\r\n h = heuristic_func(v, tgt)\r\n nc = c + e[\"weight\"]\r\n heappush(q, (h + nc, nc, v))\r\n return inf\r\n\r\n def init_tree(self, root=0):\r\n self.depth = {root: 0}\r\n self.dist = {root: 0}\r\n self.ancestors = [{root: root}]\r\n stack = [root]\r\n while stack:\r\n u = stack.pop()\r\n for v, e in self.edges[u].items():\r\n if v == self.ancestors[0][u]:\r\n continue\r\n self.dist[v] = self.dist[u] + e[\"weight\"]\r\n self.depth[v] = self.depth[u] + 1\r\n self.ancestors[0][v] = u\r\n stack.append(v)\r\n\r\n # tree doubling\r\n for _ in range(max(self.depth).bit_length()):\r\n ancestor = self.ancestors[-1]\r\n nxt_ancestor = {v: ancestor[ancestor[v]] for v in self.nodes}\r\n self.ancestors.append(nxt_ancestor)\r\n\r\n def find_dist(self, u, v):\r\n return (\r\n self.dist[u]\r\n + self.dist[v]\r\n - 2 * self.dist[self.find_lca(u, v)]\r\n )\r\n\r\n def find_lca(self, u, v):\r\n du, dv = self.depth[u], self.depth[v]\r\n if du > dv:\r\n u, v = v, u\r\n du, dv = dv, du\r\n\r\n d = dv - du\r\n for i in range((d).bit_length()): # up-stream\r\n if d >> i & 1:\r\n v = self.ancestors[i][v]\r\n if v == u:\r\n return v\r\n\r\n for i in range(\r\n du.bit_length() - 1, -1, -1\r\n ): # find direct child of LCA.\r\n nu, nv = self.ancestors[i][u], self.ancestors[i][v]\r\n if nu == nv:\r\n continue\r\n u, v = nu, nv\r\n\r\n return self.ancestors[0][u]\r\n\r\n @staticmethod\r\n def triangle_area(p0, p1, p2, signed=False):\r\n x1, y1, x2, y2 = (\r\n p1[0] - p0[0],\r\n p1[1] - p0[1],\r\n p2[0] - p0[0],\r\n p2[1] - p0[1],\r\n )\r\n return (\r\n (x1 * y2 - x2 * y1) / 2 if signed else abs(x1 * y2 - x2 * y1) / 2\r\n )\r\n\r\n @classmethod\r\n def intersect(cls, seg1, seg2):\r\n (p1, p2), (p3, p4) = seg1, seg2\r\n t1 = cls.triangle_area(p1, p2, p3, signed=True)\r\n t2 = cls.triangle_area(p1, p2, p4, signed=True)\r\n t3 = cls.triangle_area(p3, p4, p1, signed=True)\r\n t4 = cls.triangle_area(p3, p4, p2, signed=True)\r\n return (t1 * t2 < 0) & (t3 * t4 < 0)\r\n\r\n class UnionFind:\r\n def __init__(self, n=10**6):\r\n self.root = list(range(n))\r\n self.height = [0] * n\r\n self.size = [1] * n\r\n\r\n def find_root(self, u):\r\n if self.root[u] == u:\r\n return u\r\n self.root[u] = self.find_root(self.root[u])\r\n return self.root[u]\r\n\r\n def unite(self, u, v):\r\n ru = self.find_root(u)\r\n rv = self.find_root(v)\r\n if ru == rv:\r\n return\r\n hu = self.height[ru]\r\n hv = self.height[rv]\r\n if hu >= hv:\r\n self.root[rv] = ru\r\n self.size[ru] += self.size[rv]\r\n self.height[ru] = max(hu, hv + 1)\r\n else:\r\n self.root[ru] = rv\r\n self.size[rv] += self.size[ru]\r\n\r\n\r\ndef cumxor(a):\r\n return reduce(xor, a, 0)\r\n\r\n\r\ndef cumor(a):\r\n return reduce(or_, a, 0)\r\n\r\n\r\ndef bit_count(n):\r\n cnt = 0\r\n while n:\r\n cnt += n & 1\r\n n >>= 1\r\n return cnt\r\n\r\n\r\nclass AtCoder:\r\n class ABC001:\r\n @staticmethod\r\n def a():\r\n h1, h2 = map(int, sys.stdin.read().split())\r\n print(h1 - h2)\r\n\r\n @staticmethod\r\n def d():\r\n def to_minuites(x):\r\n q, r = divmod(x, 100)\r\n return 60 * q + r\r\n\r\n def to_hmform(x):\r\n q, r = divmod(x, 60)\r\n return 100 * q + r\r\n\r\n n = int(sys.stdin.readline().rstrip())\r\n term = [0] * 2001\r\n for _ in range(n):\r\n s, e = map(\r\n to_minuites,\r\n map(int, sys.stdin.readline().rstrip().split(\"-\")),\r\n )\r\n s = s // 5 * 5\r\n e = (e + 4) // 5 * 5\r\n term[s] += 1\r\n term[e + 1] -= 1\r\n for i in range(2000):\r\n term[i + 1] += term[i]\r\n\r\n res = []\r\n raining = False\r\n for i in range(2001):\r\n if term[i]:\r\n if not raining:\r\n s = i\r\n raining = True\r\n elif raining:\r\n res.append((s, i - 1))\r\n raining = False\r\n for s, e in res:\r\n print(f\"{to_hmform(s):04}-{to_hmform(e):04}\")\r\n\r\n class ABC002:\r\n @staticmethod\r\n def a():\r\n print(max(map(int, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n vowels = set(\"aeiou\")\r\n print(\r\n \"\".join(\r\n [\r\n c\r\n for c in sys.stdin.readline().rstrip()\r\n if c not in vowels\r\n ]\r\n )\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n print(\r\n GeometryTopology.triangle_area(\r\n *map(int, sys.stdin.readline().split())\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n edges = set(\r\n (x - 1, y - 1)\r\n for x, y in zip(*[map(int, sys.stdin.read().split())] * 2)\r\n )\r\n print(\r\n max(\r\n len(s)\r\n for i in range(1, 1 << n)\r\n for s in [[j for j in range(n) if i >> j & 1]]\r\n if all(\r\n (x, y) in edges\r\n for x, y in itertools.combinations(s, 2)\r\n )\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m = map(int, sys.stdin.readline().split())\r\n relations = [1 << i for i in range(n)]\r\n for x, y in zip(*[map(int, sys.stdin.read().split())] * 2):\r\n x -= 1\r\n y -= 1\r\n relations[x] |= 1 << y\r\n relations[y] |= 1 << x\r\n res = 0\r\n for i in range(1 << n):\r\n cnt = 0\r\n s = 0\r\n t = (1 << n) - 1\r\n for j in range(n):\r\n if i >> j & 1:\r\n s |= 1 << j\r\n t &= relations[j]\r\n cnt += 1\r\n if t & s == s:\r\n res = max(res, cnt)\r\n print(res)\r\n\r\n class ABC003:\r\n @staticmethod\r\n def a():\r\n print((int(sys.stdin.readline().rstrip()) + 1) * 5000)\r\n\r\n @staticmethod\r\n def b():\r\n atcoder = set(\"atcoder\")\r\n s, t = sys.stdin.read().split()\r\n print(\r\n all(\r\n s[i] == t[i]\r\n or s[i] == \"@\"\r\n and t[i] in atcoder\r\n or t[i] == \"@\"\r\n and s[i] in atcoder\r\n for i in range(len(s))\r\n )\r\n and \"You can win\"\r\n or \"You will lose\"\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *r = map(int, sys.stdin.read().split())\r\n print(reduce(lambda x, y: (x + y) / 2, sorted(r)[-k:], 0))\r\n\r\n class ABC004:\r\n @staticmethod\r\n def a():\r\n print(int(sys.stdin.readline().rstrip()) * 2)\r\n\r\n @staticmethod\r\n def b():\r\n for l in [sys.stdin.readline().rstrip() for _ in range(4)][::-1]:\r\n print(l[::-1])\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip()) % 30\r\n res = list(range(1, 7))\r\n for i in range(n):\r\n i %= 5\r\n res[i], res[i + 1] = res[i + 1], res[i]\r\n print(*res, sep=\"\")\r\n\r\n class ABC005:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(y // x)\r\n\r\n @staticmethod\r\n def b():\r\n n, *t = map(int, sys.stdin.read().split())\r\n print(min(t))\r\n\r\n @staticmethod\r\n def c():\r\n t = int(sys.stdin.readline().rstrip())\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n m = int(sys.stdin.readline().rstrip())\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n i = 0\r\n for p in b:\r\n if i == n:\r\n print(\"no\")\r\n return\r\n while p - a[i] > t:\r\n i += 1\r\n if i == n:\r\n print(\"no\")\r\n return\r\n if a[i] > p:\r\n print(\"no\")\r\n return\r\n i += 1\r\n print(\"yes\")\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n d = np.array(\r\n [sys.stdin.readline().split() for _ in range(n)], np.int64\r\n )\r\n s = d.cumsum(axis=0).cumsum(axis=1)\r\n s = np.pad(s, 1)\r\n max_del = np.zeros((n + 1, n + 1), dtype=np.int64)\r\n for y in range(1, n + 1):\r\n for x in range(1, n + 1):\r\n max_del[y, x] = np.amax(\r\n s[y : n + 1, x : n + 1]\r\n - s[0 : n - y + 1, x : n + 1]\r\n - s[y : n + 1, 0 : n - x + 1]\r\n + s[0 : n - y + 1, 0 : n - x + 1]\r\n )\r\n res = np.arange(n**2 + 1)[:, None]\r\n i = np.arange(1, n + 1)\r\n res = max_del[i, np.minimum(res // i, n)].max(axis=1)\r\n q = int(sys.stdin.readline().rstrip())\r\n p = np.array(sys.stdin.read().split(), dtype=np.int64)\r\n print(*res[p], sep=\"\\n\")\r\n\r\n class ABC006:\r\n @staticmethod\r\n def a():\r\n n = sys.stdin.readline().rstrip()\r\n if \"3\" in n:\r\n print(\"YES\")\r\n elif int(n) % 3 == 0:\r\n print(\"YES\")\r\n else:\r\n print(\"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n mod = 10007\r\n a = np.eye(N=3, k=-1, dtype=np.int64)\r\n a[0] = 1\r\n n = int(sys.stdin.readline().rstrip())\r\n a = Algebra.matrix_pow(a, n - 1, mod)\r\n print(a[2][0])\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n cnt = [0, 0, 0]\r\n if m == 1:\r\n cnt = [-1, -1, -1]\r\n else:\r\n if m & 1:\r\n m -= 3\r\n cnt[1] += 1\r\n n -= 1\r\n cnt[2] = m // 2 - n\r\n cnt[0] = n - cnt[2]\r\n if cnt[0] < 0 or cnt[1] < 0 or cnt[2] < 0:\r\n print(-1, -1, -1)\r\n else:\r\n print(*cnt, sep=\" \")\r\n\r\n @staticmethod\r\n def d():\r\n n, *c = map(int, sys.stdin.read().split())\r\n lis = [inf] * n\r\n for x in c:\r\n lis[bi_l(lis, x)] = x\r\n print(n - bi_l(lis, inf))\r\n\r\n class ABC007:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n - 1)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n if s == \"a\":\r\n print(-1)\r\n else:\r\n print(\"a\")\r\n\r\n @staticmethod\r\n def c():\r\n r, c = map(int, sys.stdin.readline().split())\r\n sy, sx = map(int, sys.stdin.readline().split())\r\n gy, gx = map(int, sys.stdin.readline().split())\r\n sy -= 1\r\n sx -= 1\r\n gy -= 1\r\n gx -= 1\r\n maze = [sys.stdin.readline().rstrip() for _ in range(r)]\r\n queue = deque([(sy, sx)])\r\n dist = np.full((r, c), np.inf)\r\n dist[sy, sx] = 0\r\n while queue:\r\n y, x = queue.popleft()\r\n for i, j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\r\n i += y\r\n j += x\r\n if maze[i][j] == \"#\" or dist[i, j] != np.inf:\r\n continue\r\n dist[i, j] = dist[y, x] + 1\r\n queue.append((i, j))\r\n print(int(dist[gy, gx]))\r\n\r\n @staticmethod\r\n def d():\r\n ng = set([4, 9])\r\n\r\n def count(d):\r\n return d if d <= 4 else d - 1\r\n\r\n def f(n):\r\n x = [int(d) for d in str(n)]\r\n flg = True\r\n dp = 0\r\n for d in x:\r\n dp = dp * 8 + flg * count(d)\r\n if d in ng:\r\n flg = False\r\n return n - (dp + flg)\r\n\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(f(b) - f(a - 1))\r\n\r\n class ABC008:\r\n @staticmethod\r\n def a():\r\n s, t = map(int, sys.stdin.readline().split())\r\n print(t - s + 1)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n res = defaultdict(int)\r\n for name in s:\r\n res[name] += 1\r\n print(sorted(res.items(), key=lambda x: x[1])[-1][0])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n c = n - np.count_nonzero(a[:, None] % a, axis=1)\r\n print(np.sum((c + 1) // 2 / c))\r\n\r\n @staticmethod\r\n def d():\r\n w, h, n, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*([iter(xy)] * 2))\r\n\r\n @lru_cache(maxsize=None)\r\n def count(x1, y1, x2, y2):\r\n res = 0\r\n for x, y in xy:\r\n if not (x1 <= x <= x2 and y1 <= y <= y2):\r\n continue\r\n cnt = (x2 - x1) + (y2 - y1) + 1\r\n cnt += count(x1, y1, x - 1, y - 1)\r\n cnt += count(x1, y + 1, x - 1, y2)\r\n cnt += count(x + 1, y1, x2, y - 1)\r\n cnt += count(x + 1, y + 1, x2, y2)\r\n res = max(res, cnt)\r\n return res\r\n\r\n print(count(1, 1, w, h))\r\n\r\n class ABC009:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((n + 1) // 2)\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n print(sorted(set(a))[-2])\r\n\r\n @staticmethod\r\n def c():\r\n n, k = map(int, sys.stdin.readline().split())\r\n s = list(sys.stdin.readline().rstrip())\r\n cost = [1] * n\r\n r = k\r\n for i in range(n - 1):\r\n q = []\r\n for j in range(i + 1, n):\r\n if s[j] < s[i] and cost[i] + cost[j] <= r:\r\n heappush(q, (s[j], cost[i] + cost[j], -j))\r\n if not q:\r\n continue\r\n _, c, j = heappop(q)\r\n j = -j\r\n s[i], s[j] = s[j], s[i]\r\n r -= c\r\n cost[i] = cost[j] = 0\r\n print(\"\".join(s))\r\n\r\n @staticmethod\r\n def d():\r\n k, m = map(int, sys.stdin.readline().split())\r\n a = np.array([int(x) for x in sys.stdin.readline().split()])\r\n c = np.array([int(x) for x in sys.stdin.readline().split()])\r\n mask = (1 << 32) - 1\r\n d = np.eye(k, k, -1, dtype=np.uint32) * mask\r\n d[0] = c\r\n if m <= k:\r\n print(a[m - 1])\r\n return\r\n # print(Algebra.bitwise_mat_pow(d, m-k))\r\n # print(Algebra.bitwise_dot(Algebra.bitwise_mat_pow(d, m-k), a[::-1].reshape(-1, 1))[0].item())\r\n print(\r\n Algebra.bitwise_dot(\r\n Algebra.bitwise_mat_pow(d, m - k), a[::-1].reshape(-1, 1)\r\n )[0][0]\r\n )\r\n\r\n class ABC010:\r\n @staticmethod\r\n def a():\r\n print(sys.stdin.readline().rstrip() + \"pp\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n tot = 0\r\n for x in a:\r\n c = 0\r\n while x % 2 == 0 or x % 3 == 2:\r\n x -= 1\r\n c += 1\r\n tot += c\r\n print(tot)\r\n\r\n @staticmethod\r\n def c():\r\n sx, sy, gx, gy, t, v, n, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(-1, 2).T\r\n\r\n def dist(x1, y1, x2, y2):\r\n return np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\r\n\r\n ans = (\r\n \"YES\"\r\n if (dist(sx, sy, x, y) + dist(x, y, gx, gy) <= v * t).any()\r\n else \"NO\"\r\n )\r\n print(ans)\r\n\r\n @staticmethod\r\n def d():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n p = [int(x) for x in sys.stdin.readline().split()]\r\n x, y = [], []\r\n for _ in range(e):\r\n a, b = map(int, sys.stdin.readline().split())\r\n x.append(a)\r\n y.append(b)\r\n x.append(b)\r\n y.append(a)\r\n for a in p:\r\n x.append(a)\r\n y.append(n)\r\n if not x:\r\n print(0)\r\n return\r\n c = [1] * len(x)\r\n min_cut = maximum_flow(\r\n csr_matrix((c, (x, y)), (n + 1, n + 1)), source=0, sink=n\r\n ).flow_value\r\n print(min_cut)\r\n\r\n @staticmethod\r\n def d_2():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n graph = nx.DiGraph()\r\n graph.add_nodes_from(range(n + 1))\r\n for p in [int(x) for x in sys.stdin.readline().split()]:\r\n graph.add_edge(p, n, capacity=1)\r\n for _ in range(e):\r\n a, b = map(int, sys.stdin.readline().split())\r\n graph.add_edge(a, b, capacity=1)\r\n graph.add_edge(b, a, capacity=1)\r\n print(nx.minimum_cut_value(graph, 0, n))\r\n\r\n @staticmethod\r\n def d_3():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n graph = GeometryTopology.Graph()\r\n for i in range(n + 1):\r\n graph.add_node(i)\r\n for p in [int(x) for x in sys.stdin.readline().split()]:\r\n graph.add_edge(p, n, capacity=1)\r\n for a, b in zip(*[map(int, sys.stdin.read().split())] * 2):\r\n graph.add_edge(a, b, capacity=1)\r\n graph.add_edge(b, a, capacity=1)\r\n print(graph.dinic(0, n))\r\n\r\n class ABC011:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n % 12 + 1)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(s[0].upper() + s[1:].lower())\r\n\r\n @staticmethod\r\n def c():\r\n n, *ng = map(int, sys.stdin.read().split())\r\n ng = set(ng)\r\n if n in ng:\r\n print(\"NO\")\r\n else:\r\n r = 100\r\n while n > 0:\r\n if r == 0:\r\n print(\"NO\")\r\n return\r\n for i in range(3, 0, -1):\r\n if (n - i) in ng:\r\n continue\r\n n -= i\r\n r -= 1\r\n break\r\n else:\r\n print(\"NO\")\r\n return\r\n print(\"YES\")\r\n\r\n @staticmethod\r\n def d():\r\n n, d, x, y = map(int, sys.stdin.read().split())\r\n x, y = abs(x), abs(y)\r\n if x % d or y % d:\r\n print(0)\r\n return\r\n x, y = x // d, y // d\r\n r = n - (x + y)\r\n if r < 0 or r & 1:\r\n print(0)\r\n return\r\n\r\n res = 0\r\n half_p = pow(1 / 2, n)\r\n for d in range(r // 2 + 1): # 0 <= d <= r//2, south\r\n south, north = d, y + d\r\n west = (r - 2 * d) // 2\r\n res += (\r\n half_p\r\n * comb(n, south, exact=True)\r\n * comb(n - south, north, exact=True)\r\n * comb(n - south - north, west, exact=True)\r\n * half_p\r\n )\r\n print(res)\r\n\r\n class ABC012:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(b, a)\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n h, n = divmod(n, 3600)\r\n m, s = divmod(n, 60)\r\n print(f\"{h:02}:{m:02}:{s:02}\")\r\n\r\n @staticmethod\r\n def c():\r\n n = 2025 - int(sys.stdin.readline().rstrip())\r\n res = []\r\n for i in range(1, 10):\r\n if n % i != 0 or n // i > 9:\r\n continue\r\n res.append(f\"{i} x {n//i}\")\r\n print(*sorted(res), sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abt = map(int, sys.stdin.read().split())\r\n a, b, t = np.array(abt).reshape(m, 3).T\r\n res = shortest_path(\r\n csr_matrix((t, (a - 1, b - 1)), (n, n)),\r\n method=\"FW\",\r\n directed=False,\r\n )\r\n print(res.max(axis=-1).min().astype(np.int64))\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m, *abt = map(int, sys.stdin.read().split())\r\n graph = GeometryTopology.Graph()\r\n for a, b, t in zip(*[iter(abt)] * 3):\r\n a -= 1\r\n b -= 1\r\n graph.add_edge(a, b, weight=t)\r\n graph.add_edge(b, a, weight=t)\r\n\r\n dist = graph.floyd_warshall()\r\n res = min([max(tmp.values()) for tmp in dist.values()])\r\n print(res)\r\n\r\n class ABC013:\r\n @staticmethod\r\n def a():\r\n print(ord(sys.stdin.readline().rstrip()) - ord(\"A\") + 1)\r\n\r\n @staticmethod\r\n def b():\r\n a, b = map(int, sys.stdin.read().split())\r\n d = abs(a - b)\r\n print(min(d, 10 - d))\r\n\r\n @staticmethod\r\n def c():\r\n n, h, a, b, c, d, e = map(int, sys.stdin.read().split())\r\n y = np.arange(n + 1)\r\n x = (n * e - h - (d + e) * y) // (b + e) + 1\r\n np.maximum(x, 0, out=x)\r\n np.minimum(x, n - y, out=x)\r\n print(np.amin(a * x + c * y))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, d, *a = map(int, sys.stdin.read().split())\r\n res = list(range(n))\r\n\r\n def swap(i, j):\r\n res[i], res[j] = res[j], res[i]\r\n\r\n for i in a[::-1]:\r\n swap(i - 1, i)\r\n\r\n group = [None] * n\r\n root = [None] * n\r\n index_in_group = [None] * n\r\n for i in range(n):\r\n if root[i] is not None:\r\n continue\r\n group[i] = []\r\n j = i\r\n for cnt in range(1, n + 1):\r\n index_in_group[j] = cnt - 1\r\n group[i].append(j)\r\n j = res[j]\r\n root[j] = i\r\n if j == i:\r\n break\r\n\r\n for i in range(n):\r\n g = group[root[i]]\r\n print(g[(index_in_group[i] + d) % len(g)] + 1)\r\n\r\n class ABC014:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.read().split())\r\n print((a + b - 1) // b * b - a)\r\n\r\n @staticmethod\r\n def b():\r\n n, x, *a = map(int, sys.stdin.read().split())\r\n print(sum(a[i] for i in range(n) if x >> i & 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n a, b = np.array(ab).reshape(n, 2).T\r\n res = np.zeros(10**6 + 2, dtype=np.int64)\r\n np.add.at(res, a, 1)\r\n np.subtract.at(res, b + 1, 1)\r\n np.cumsum(res, out=res)\r\n print(res.max())\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n # edges = [[] for _ in range(n)]\r\n g = GeometryTopology.Graph()\r\n for _ in range(n - 1):\r\n x, y = map(int, sys.stdin.readline().split())\r\n x -= 1\r\n y -= 1\r\n g.add_edge(x, y, weight=1)\r\n g.add_edge(y, x, weight=1)\r\n\r\n g.init_tree()\r\n\r\n # tree = GeometryTopology.TreeGraph(n, edges, 0)\r\n q, *ab = map(int, sys.stdin.read().split())\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n print(g.find_dist(a, b) + 1)\r\n\r\n class ABC015:\r\n @staticmethod\r\n def a():\r\n a, b = sys.stdin.read().split()\r\n print(a if len(a) > len(b) else b)\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n print(\r\n np.ceil(\r\n a[np.nonzero(a)[0]].sum() / np.count_nonzero(a)\r\n ).astype(np.int8)\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *t = map(int, sys.stdin.read().split())\r\n t = np.array(t).reshape(n, k)\r\n x = np.zeros((1, 1), dtype=np.int8)\r\n for i in range(n):\r\n x = x.reshape(-1, 1) ^ t[i]\r\n print(\"Found\" if np.count_nonzero(x == 0) > 0 else \"Nothing\")\r\n\r\n @staticmethod\r\n def d():\r\n w, n, k, *ab = map(int, sys.stdin.read().split())\r\n dp = np.zeros((k + 1, w + 1), dtype=np.int32)\r\n for a, b in zip(*[iter(ab)] * 2):\r\n prev = dp.copy()\r\n np.maximum(dp[1:, a:], prev[:-1, :-a] + b, out=dp[1:, a:])\r\n print(dp[k][w])\r\n\r\n class ABC016:\r\n @staticmethod\r\n def a():\r\n m, d = map(int, sys.stdin.readline().split())\r\n print(\"YES\" if m % d == 0 else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n f1, f2 = a + b == c, a - b == c\r\n if f1 & f2:\r\n print(\"?\")\r\n elif f1 & (~f2):\r\n print(\"+\")\r\n elif (~f1) & f2:\r\n print(\"-\")\r\n else:\r\n print(\"!\")\r\n\r\n @staticmethod\r\n def c():\r\n n, _, *ab = map(int, sys.stdin.read().split())\r\n friends = [0] * n\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n friends[a] |= 1 << b\r\n friends[b] |= 1 << a\r\n res = [\r\n bit_count(\r\n cumor(friends[j] for j in range(n) if friends[i] >> j & 1)\r\n & ~(friends[i] | 1 << i)\r\n )\r\n for i in range(n)\r\n ]\r\n print(*res, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n sx, sy, gx, gy = map(int, sys.stdin.readline().split())\r\n seg1 = ((sx, sy), (gx, gy))\r\n n = int(sys.stdin.readline().rstrip())\r\n p1 = (\r\n np.array(sys.stdin.read().split(), dtype=np.int64)\r\n .reshape(n, 2)\r\n .T\r\n )\r\n p2 = np.hstack((p1[:, 1:], p1[:, :1]))\r\n seg2 = (p1, p2)\r\n print(\r\n np.count_nonzero(GeometryTopology.intersect(seg1, seg2)) // 2\r\n + 1\r\n )\r\n\r\n class ABC017:\r\n @staticmethod\r\n def a():\r\n s, e = (\r\n np.array(sys.stdin.read().split(), dtype=np.int16)\r\n .reshape(3, 2)\r\n .T\r\n )\r\n print((s // 10 * e).sum())\r\n\r\n @staticmethod\r\n def b():\r\n choku_tail = set(\"ch, o, k, u\".split(\", \"))\r\n\r\n def is_choku(s):\r\n if s == \"\":\r\n return True\r\n if len(s) >= 1 and (s[-1] in choku_tail) and is_choku(s[:-1]):\r\n return True\r\n if len(s) >= 2 and (s[-2:] in choku_tail) and is_choku(s[:-2]):\r\n return True\r\n return False\r\n\r\n print(\"YES\" if is_choku(sys.stdin.readline().rstrip()) else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *lrs = map(int, sys.stdin.read().split())\r\n l, r, s = np.array(lrs).reshape(n, 3).T\r\n score = np.zeros((m + 1,), dtype=np.int32)\r\n np.add.at(score, l - 1, s)\r\n np.subtract.at(score, r, s)\r\n np.cumsum(score, out=score)\r\n print(s.sum() - score[:m].min())\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *f = map(int, sys.stdin.read().split())\r\n prev = [0] * (n + 1)\r\n tmp = defaultdict(int)\r\n for i in range(n):\r\n prev[i + 1] = tmp[f[i]]\r\n tmp[f[i]] = i + 1\r\n\r\n dp = [0] * (n + 1)\r\n dp[0] = 1\r\n l, s = 0, dp[0]\r\n for i in range(1, n + 1):\r\n while l < prev[i]:\r\n s = (s - dp[l]) % MOD\r\n l += 1\r\n dp[i] = s\r\n s = (s + dp[i]) % MOD\r\n print(dp[n])\r\n\r\n class ABC018:\r\n @staticmethod\r\n def a():\r\n (*a,) = map(int, sys.stdin.read().split())\r\n a = sorted(enumerate(a), key=lambda x: -x[1])\r\n res = [None] * 3\r\n for i in range(3):\r\n res[a[i][0]] = i + 1\r\n print(*res, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n n, *lr = map(int, sys.stdin.read().split())\r\n for l, r in zip(*[iter(lr)] * 2):\r\n l -= 1\r\n r -= 1\r\n s = s[:l] + s[l : r + 1][::-1] + s[r + 1 :]\r\n print(s)\r\n\r\n @staticmethod\r\n def c():\r\n r, c, k = map(int, sys.stdin.readline().split())\r\n s = np.array([list(s) for s in sys.stdin.read().split()])\r\n s = np.pad(s, 1, constant_values=\"x\")\r\n\r\n a = np.zeros_like(s, dtype=np.float64)\r\n a[s == \"o\"] = np.inf\r\n for i in range(1, r + 1):\r\n np.minimum(a[i - 1, :] + 1, a[i, :], out=a[i, :])\r\n for i in range(r, 0, -1):\r\n np.minimum(a[i + 1, :] + 1, a[i, :], out=a[i, :])\r\n for j in range(1, c + 1):\r\n np.minimum(a[:, j - 1] + 1, a[:, j], out=a[:, j])\r\n for j in range(c, 0, -1):\r\n np.minimum(a[:, j + 1] + 1, a[:, j], out=a[:, j])\r\n print(np.count_nonzero(a >= k))\r\n\r\n @staticmethod\r\n def c_2():\r\n r, c, k = map(int, sys.stdin.readline().split())\r\n s = np.array([list(s) for s in sys.stdin.read().split()])\r\n s = np.pad(s, 1, constant_values=\"x\")\r\n a = (s == \"o\").astype(np.int16)\r\n a = distance_transform_cdt(a, metric=\"taxicab\")\r\n print(np.count_nonzero(a >= k))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, p, q, r, *xyz = map(int, sys.stdin.read().split())\r\n x, y, z = np.array(xyz).reshape(r, 3).T\r\n h = np.zeros((n, m), dtype=np.int32)\r\n h[x - 1, y - 1] = z\r\n g = np.array([*itertools.combinations(range(n), p)])\r\n print(np.sort(h[g].sum(axis=1), axis=1)[:, -q:].sum(axis=1).max())\r\n\r\n class ABC019:\r\n @staticmethod\r\n def a():\r\n (*a,) = map(int, sys.stdin.readline().split())\r\n print(sorted(a)[1])\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip() + \"$\"\r\n cnt = 0\r\n prev = \"$\"\r\n t = \"\"\r\n for c in s:\r\n if c == prev:\r\n cnt += 1\r\n continue\r\n t += prev + str(cnt)\r\n prev = c\r\n cnt = 1\r\n print(t[2:])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n res = set()\r\n for x in a:\r\n while not x & 1:\r\n x >>= 1\r\n res.add(x)\r\n print(len(res))\r\n\r\n @staticmethod\r\n def d():\r\n def inquire(u, v):\r\n print(f\"? {u} {v}\".format(u, v), flush=True)\r\n return int(sys.stdin.readline().rstrip())\r\n\r\n n = int(sys.stdin.readline().rstrip())\r\n u = sorted([(inquire(1, v), v) for v in range(2, n + 1)])[-1][1]\r\n d = max((inquire(u, v)) for v in range(1, n + 1) if u != v)\r\n print(f\"! {d}\")\r\n\r\n class ABC020:\r\n @staticmethod\r\n def a():\r\n print(\r\n \"ABC\"\r\n if int(sys.stdin.readline().rstrip()) == 1\r\n else \"chokudai\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n a, b = sys.stdin.readline().split()\r\n print(int(a + b) * 2)\r\n\r\n @staticmethod\r\n def c():\r\n h, w, t = map(int, sys.stdin.readline().split())\r\n s = [list(s) for s in sys.stdin.read().split()]\r\n for i in range(h):\r\n for j in range(w):\r\n if s[i][j] == \"S\":\r\n sy, sx = i, j\r\n if s[i][j] == \"G\":\r\n gy, gx = i, j\r\n s[sy][sx] = s[gy][gx] = \".\"\r\n source, target = (sy, sx), (gy, gx)\r\n\r\n def heuristic_function(u, v=target):\r\n return abs(v[0] - u[0]) + abs(v[1] - u[0])\r\n\r\n def min_time(x):\r\n \"\"\"my lib\"\"\"\r\n graph = GeometryTopology.Graph()\r\n for i in range(h):\r\n for j in range(w):\r\n graph.add_node((i, j))\r\n for i in range(h):\r\n for j in range(w):\r\n if i > 0:\r\n graph.add_edge(\r\n (i, j),\r\n (i - 1, j),\r\n weight=(1 if s[i - 1][j] == \".\" else x),\r\n )\r\n if i < h - 1:\r\n graph.add_edge(\r\n (i, j),\r\n (i + 1, j),\r\n weight=(1 if s[i + 1][j] == \".\" else x),\r\n )\r\n if j > 0:\r\n graph.add_edge(\r\n (i, j),\r\n (i, j - 1),\r\n weight=(1 if s[i][j - 1] == \".\" else x),\r\n )\r\n if j < w - 1:\r\n graph.add_edge(\r\n (i, j),\r\n (i, j + 1),\r\n weight=(1 if s[i][j + 1] == \".\" else x),\r\n )\r\n\r\n return graph.dijkstra(source)[target]\r\n # return graph.astar(source, target, heuristic_function)\r\n\r\n \"\"\"networkx\"\"\"\r\n graph = nx.DiGraph()\r\n\r\n for i in range(h):\r\n for j in range(w):\r\n if i > 0:\r\n graph.add_edge(\r\n (i, j),\r\n (i - 1, j),\r\n weight=(1 if s[i - 1][j] == \".\" else x),\r\n )\r\n if i < h - 1:\r\n graph.add_edge(\r\n (i, j),\r\n (i + 1, j),\r\n weight=(1 if s[i + 1][j] == \".\" else x),\r\n )\r\n if j > 0:\r\n graph.add_edge(\r\n (i, j),\r\n (i, j - 1),\r\n weight=(1 if s[i][j - 1] == \".\" else x),\r\n )\r\n if j < w - 1:\r\n graph.add_edge(\r\n (i, j),\r\n (i, j + 1),\r\n weight=(1 if s[i][j + 1] == \".\" else x),\r\n )\r\n\r\n return nx.dijkstra_path_length(graph, source, target)\r\n return nx.astar_path_length(\r\n graph, source, target, heuristic_function\r\n )\r\n\r\n def binary_search():\r\n lo, hi = 1, t + 1\r\n while lo + 1 < hi:\r\n x = (lo + hi) // 2\r\n if min_time(x) > t:\r\n hi = x\r\n else:\r\n lo = x\r\n return lo\r\n\r\n print(binary_search())\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.readline().split())\r\n div = sorted(NumberTheory.find_divisors(k))\r\n l = len(div)\r\n s = [0] * l\r\n for i, d in enumerate(div):\r\n s[i] = (1 + n // d) * (n // d) // 2 * d % MOD\r\n for i in range(l - 1, -1, -1):\r\n for j in range(i + 1, l):\r\n if div[j] % div[i]:\r\n continue\r\n s[i] = (s[i] - s[j]) % MOD\r\n\r\n print(\r\n sum(s[i] * k // div[i] % MOD for i in range(l)) % MOD\r\n ) # ans is LCM.\r\n\r\n class ABC021:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n s = [1 << i for i in range(5) if n >> i & 1]\r\n print(len(s), *s, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def b():\r\n n, a, b, k, *p = map(int, sys.stdin.read().split())\r\n print(\"YES\" if len(set(p) | set([a, b])) == k + 2 else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, a, b, m, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(m, 2).T - 1\r\n a -= 1\r\n b -= 1\r\n g = csgraph_to_dense(\r\n csr_matrix((np.ones(m), (x, y)), (n, n), dtype=np.int8)\r\n )\r\n g = np.logical_or(g, g.T)\r\n paths = np.zeros(n, dtype=np.int64).reshape(-1, 1)\r\n paths[a, 0] = 1\r\n while not paths[b, 0]:\r\n paths = np.dot(g, paths) % MOD\r\n print(paths[b, 0])\r\n\r\n @staticmethod\r\n def c_2():\r\n n, a, b, m, *xy = map(int, sys.stdin.read().split())\r\n a -= 1\r\n b -= 1\r\n g = GeometryTopology.Graph()\r\n\r\n for x, y in zip(*[iter(xy)] * 2):\r\n x -= 1\r\n y -= 1\r\n g.add_edge(x, y, weight=1)\r\n g.add_edge(y, x, weight=1)\r\n\r\n dist, paths = g.dijkstra(a, paths_cnt=True, mod=MOD)\r\n print(paths[b])\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.read().split())\r\n combinatorics = Combinatorics()\r\n print(combinatorics.mod_choose(n + k - 1, k))\r\n\r\n class ABC022:\r\n @staticmethod\r\n def a():\r\n n, s, t, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n np.cumsum(a, out=a)\r\n print(((s <= a) & (a <= t)).sum())\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n c = Counter(a)\r\n print(sum(c.values()) - len(c))\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *uvl = map(int, sys.stdin.read().split())\r\n u, v, l = np.array(uvl).reshape(m, 3).T\r\n u -= 1\r\n v -= 1\r\n g = csgraph_to_dense(csr_matrix((l, (u, v)), (n, n)))\r\n g += g.T\r\n g[g == 0] = np.inf\r\n dist0 = g[0].copy()\r\n g[0] = 0\r\n g[:, 0] = 0\r\n dist = shortest_path(g, method=\"FW\", directed=False)\r\n u, v = np.array([*itertools.combinations(range(1, n), 2)]).T\r\n res = (dist0[u] + dist[u, v] + dist0[v]).min()\r\n print(-1 if res == np.inf else int(res))\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n c = np.array(ab).reshape(2, n, 2)\r\n g = c.mean(axis=1)\r\n d = np.sqrt(((c - g[:, None, :]) ** 2).sum(axis=-1)).sum(axis=1)\r\n print(d[1] / d[0])\r\n\r\n class ABC023:\r\n @staticmethod\r\n def a():\r\n print(sum(divmod(int(sys.stdin.readline().rstrip()), 10)))\r\n\r\n @staticmethod\r\n def b():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n t = \"b\"\r\n for i in range(n // 2):\r\n if i % 3 == 0:\r\n t = \"a\" + t + \"c\"\r\n elif i % 3 == 1:\r\n t = \"c\" + t + \"a\"\r\n else:\r\n t = \"b\" + t + \"b\"\r\n print(n // 2 if t == s else -1)\r\n\r\n @staticmethod\r\n def b_2():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n if n & 1 ^ 1:\r\n print(-1)\r\n return\r\n a = list(\"abc\")\r\n i = (1 - n // 2) % 3\r\n for c in s:\r\n if c != a[i]:\r\n print(-1)\r\n return\r\n i = (i + 1) % 3\r\n print(n // 2)\r\n\r\n @staticmethod\r\n def c():\r\n h, w, k, n, *rc = map(int, sys.stdin.read().split())\r\n r, c = np.array(rc).reshape(n, 2).T - 1\r\n rb = np.bincount(r, minlength=h)\r\n cb = np.bincount(c, minlength=w)\r\n rbb = np.bincount(rb, minlength=k + 1)\r\n cbb = np.bincount(cb, minlength=k + 1)\r\n tot = (rbb[: k + 1] * cbb[k::-1]).sum()\r\n real = np.bincount(rb[r] + cb[c] - 1, minlength=k + 1)\r\n print(tot - real[k - 1] + real[k])\r\n\r\n @staticmethod\r\n def d():\r\n n, *hs = map(int, sys.stdin.read().split())\r\n h, s = np.array(hs).reshape(n, 2).T\r\n\r\n t = np.arange(n)\r\n\r\n def is_ok(x):\r\n t_lim = (x - h) // s\r\n t_lim.sort()\r\n return np.all(t_lim >= t)\r\n\r\n def binary_search():\r\n lo, hi = 0, 10**14\r\n while lo + 1 < hi:\r\n x = (lo + hi) // 2\r\n if is_ok(x):\r\n hi = x\r\n else:\r\n lo = x\r\n return hi\r\n\r\n print(binary_search())\r\n\r\n class ABC024:\r\n @staticmethod\r\n def a():\r\n a, b, c, k, s, t = map(int, sys.stdin.read().split())\r\n print(a * s + b * t - c * (s + t) * (s + t >= k))\r\n\r\n @staticmethod\r\n def b():\r\n n, t, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n print(np.minimum(a[1:] - a[:-1], t).sum() + t)\r\n\r\n @staticmethod\r\n def c():\r\n n, d, k, *lrst = map(int, sys.stdin.read().split())\r\n lrst = np.array(lrst)\r\n lr = lrst[: 2 * d].reshape(d, 2)\r\n s, t = lrst[2 * d :].reshape(k, 2).T\r\n day = np.zeros((k,), dtype=np.int32)\r\n for i in range(d):\r\n l, r = lr[i]\r\n move = (l <= s) & (s <= r) & (s != t)\r\n reach = move & (l <= t) & (t <= r)\r\n s[move & (s < t)] = r\r\n s[move & (s > t)] = l\r\n s[reach] = t[reach]\r\n day[reach] = i + 1\r\n print(*day, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n a, b, c = map(int, sys.stdin.read().split())\r\n p = MOD\r\n denom = pow(a * b % p - b * c % p + c * a % p, p - 2, p)\r\n w = (b * c - a * b) % p * denom % p\r\n h = (b * c - a * c) % p * denom % p\r\n print(h, w)\r\n\r\n class ABC025:\r\n @staticmethod\r\n def a():\r\n s, n = sys.stdin.read().split()\r\n n = int(n)\r\n i, j = divmod(n - 1, 5)\r\n print(s[i] + s[j])\r\n\r\n @staticmethod\r\n def b():\r\n n, a, b = map(int, sys.stdin.readline().split())\r\n res = defaultdict(int)\r\n for _ in range(n):\r\n s, d = sys.stdin.readline().split()\r\n d = int(d)\r\n res[s] += min(max(d, a), b)\r\n res = res[\"East\"] - res[\"West\"]\r\n if res == 0:\r\n ans = 0\r\n elif res > 0:\r\n ans = f\"East {res}\"\r\n else:\r\n ans = f\"West {-res}\"\r\n print(ans)\r\n\r\n @staticmethod\r\n def c():\r\n b = [0] * 6\r\n for i in range(2):\r\n (*row,) = map(int, sys.stdin.readline().split())\r\n for j in range(3):\r\n b[i * 3 + j] = row[j]\r\n c = [0] * 8\r\n for i in range(3):\r\n (*row,) = map(int, sys.stdin.readline().split())\r\n for j in range(2):\r\n c[i * 3 + j] = row[j]\r\n tot = sum(b) + sum(c)\r\n\r\n @lru_cache(maxsize=None)\r\n def f(s=tuple(0 for _ in range(9))):\r\n if all(s):\r\n res = 0\r\n for i in range(6):\r\n res += (s[i] == s[i + 3]) * b[i]\r\n for i in range(8):\r\n res += (s[i] == s[i + 1]) * c[i]\r\n return res\r\n cand = [i for i in range(9) if not s[i]]\r\n flg = len(cand) & 1\r\n s = list(s)\r\n res = []\r\n for i in cand:\r\n s[i] = (flg ^ 1) + 1\r\n res.append(f(tuple(s)))\r\n s[i] = 0\r\n return sorted(res, reverse=flg)[0]\r\n\r\n a = f()\r\n b = tot - a\r\n print(a)\r\n print(b)\r\n\r\n class ABC026:\r\n @staticmethod\r\n def a():\r\n a = int(sys.stdin.readline().rstrip())\r\n print(a // 2 * (a - a // 2))\r\n\r\n @staticmethod\r\n def b():\r\n n, *r = map(int, sys.stdin.read().split())\r\n s = np.pi * np.array([0] + r) ** 2\r\n s.sort()\r\n res = s[n::-2].sum() - s[n - 1 :: -2].sum()\r\n print(res)\r\n\r\n @staticmethod\r\n def c():\r\n n, *b = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph()\r\n for i in range(1, n):\r\n g.add_edge(b[i - 1] - 1, i, weight=1)\r\n\r\n def f(u=0):\r\n if not g.edges[u]:\r\n return 1\r\n s = [f(v) for v in g.edges[u]]\r\n return max(s) + min(s) + 1\r\n\r\n print(f())\r\n\r\n @staticmethod\r\n def d():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n\r\n def f(t):\r\n return a * t + b * np.sin(c * t * np.pi) - 100\r\n\r\n print(optimize.brenth(f, 0, 200))\r\n\r\n class ABC027:\r\n @staticmethod\r\n def a():\r\n l = [int(l) for l in sys.stdin.readline().split()]\r\n l.sort()\r\n print(l[2] if l[0] == l[1] else l[0])\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n m, r = divmod(sum(a), n)\r\n if r:\r\n print(-1)\r\n return\r\n population = 0\r\n towns = 0\r\n cnt = 0\r\n for x in a:\r\n population += x\r\n towns += 1\r\n if population / towns != m:\r\n cnt += 1\r\n continue\r\n population, towns = 0, 0\r\n print(cnt)\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n flg = n.bit_length() & 1 ^ 1\r\n t = 0\r\n x = 1\r\n while x <= n:\r\n t += 1\r\n x = 2 * x + 1 if t & 1 ^ flg else 2 * x\r\n print(\"Aoki\" if t & 1 else \"Takahashi\")\r\n\r\n class ABC032:\r\n @staticmethod\r\n def a():\r\n a, b, n = map(int, sys.stdin.read().split())\r\n l = NumberTheory.lcm(a, b)\r\n print((n + l - 1) // l * l)\r\n\r\n @staticmethod\r\n def b():\r\n s, k = sys.stdin.read().split()\r\n k = int(k)\r\n res = set()\r\n for i in range(len(s) - k + 1):\r\n res.add(s[i : i + k])\r\n print(len(res))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *s = map(int, sys.stdin.read().split())\r\n if 0 in s:\r\n print(n)\r\n return\r\n s += [inf]\r\n res = 0\r\n l = r = 0\r\n tmp = 1\r\n while r <= n:\r\n tmp *= s[r]\r\n while tmp > k:\r\n res = max(res, r - l)\r\n tmp //= s[l]\r\n l += 1\r\n r += 1\r\n print(res)\r\n\r\n class ABC033:\r\n @staticmethod\r\n def a():\r\n n = set(sys.stdin.readline().rstrip())\r\n print(\"SAME\" if len(n) == 1 else \"DIFFERENT\")\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = dict()\r\n for _ in range(n):\r\n s, p = sys.stdin.readline().split()\r\n p = int(p)\r\n res[s] = p\r\n tot = sum(res.values())\r\n for s, p in res.items():\r\n if p > tot / 2:\r\n print(s)\r\n return\r\n print(\"atcoder\")\r\n\r\n @staticmethod\r\n def c():\r\n s = sys.stdin.readline().rstrip()\r\n res = sum(not \"0\" in f for f in s.split(\"+\"))\r\n print(res)\r\n\r\n class ABC034:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Better\" if y > x else \"Worse\")\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n + 1 if n & 1 else n - 1)\r\n\r\n @staticmethod\r\n def c():\r\n h, w = map(int, sys.stdin.read().split())\r\n combinatorics = Combinatorics(n=2 * 10**5, mod=MOD)\r\n print(combinatorics.mod_choose(h + w - 2, h - 1))\r\n\r\n @staticmethod\r\n def d():\r\n n, k, *wp = map(int, sys.stdin.read().split())\r\n w, p = np.array(wp).reshape(-1, 2).T\r\n\r\n def f(x):\r\n return np.sort(w * (p - x))[-k:].sum()\r\n\r\n print(optimize.bisect(f, 0, 100))\r\n\r\n class ABC035:\r\n @staticmethod\r\n def a():\r\n w, h = map(int, sys.stdin.readline().split())\r\n print(\"4:3\" if 4 * h == 3 * w else \"16:9\")\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n y = 0\r\n x = 0\r\n z = 0\r\n for c in s:\r\n if c == \"?\":\r\n z += 1\r\n elif c == \"L\":\r\n x -= 1\r\n elif c == \"R\":\r\n x += 1\r\n elif c == \"D\":\r\n y -= 1\r\n elif c == \"U\":\r\n y += 1\r\n d = abs(y) + abs(x)\r\n if t == \"1\":\r\n print(d + z)\r\n else:\r\n print(max(d - z, (d - z) & 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, q, *lr = map(int, sys.stdin.read().split())\r\n l, r = np.array(lr).reshape(q, 2).T\r\n res = np.zeros(n + 1, dtype=int)\r\n np.add.at(res, l - 1, 1)\r\n np.subtract.at(res, r, 1)\r\n np.cumsum(res, out=res)\r\n res = res & 1\r\n print(\"\".join(map(str, res[:-1])))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, t = map(int, sys.stdin.readline().split())\r\n point = np.array(sys.stdin.readline().split(), dtype=int)\r\n a, b, c = (\r\n np.array(sys.stdin.read().split(), dtype=np.int64)\r\n .reshape(m, 3)\r\n .T\r\n )\r\n a -= 1\r\n b -= 1\r\n d_1 = shortest_path(\r\n csr_matrix((c, (a, b)), (n, n)),\r\n method=\"D\",\r\n directed=True,\r\n indices=0,\r\n )\r\n d_2 = shortest_path(\r\n csr_matrix((c, (b, a)), (n, n)),\r\n method=\"D\",\r\n directed=True,\r\n indices=0,\r\n )\r\n print(int(np.amax((t - (d_1 + d_2)) * point)))\r\n\r\n class ABC036:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print((b + a - 1) // a)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n n = int(n)\r\n for j in range(n):\r\n row = \"\"\r\n for i in range(n - 1, -1, -1):\r\n row += s[i][j]\r\n print(row)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n b = [None] * n\r\n prev = None\r\n j = -1\r\n for i, x in sorted(enumerate(a), key=lambda x: x[1]):\r\n if x != prev:\r\n j += 1\r\n b[i] = j\r\n prev = x\r\n print(*b, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n edges = [[] for _ in range(n)]\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n edges[a].append(b)\r\n edges[b].append(a)\r\n parent = [None] * n\r\n\r\n def count(u):\r\n black, white = 1, 1\r\n for v in edges[u]:\r\n if v == parent[u]:\r\n continue\r\n parent[v] = u\r\n b, w = count(v)\r\n black *= w\r\n black %= MOD\r\n white *= (b + w) % MOD\r\n white %= MOD\r\n return black, white\r\n\r\n print(sum(count(0)) % MOD)\r\n\r\n class ABC037:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(c // min(a, b))\r\n\r\n @staticmethod\r\n def b():\r\n n, q, *lrt = map(int, sys.stdin.read().split())\r\n a = np.zeros(n, dtype=int)\r\n for l, r, t in zip(*[iter(lrt)] * 3):\r\n a[l - 1 : r] = t\r\n print(*a, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = np.array([0] + a)\r\n np.cumsum(a, out=a)\r\n s = (a[k:] - a[:-k]).sum()\r\n print(s)\r\n\r\n @staticmethod\r\n def d():\r\n h, w = map(int, sys.stdin.readline().split())\r\n a = [\r\n [int(x) for x in sys.stdin.readline().split()]\r\n for _ in range(h)\r\n ]\r\n dyx = [(-1, 0), (0, -1), (1, 0), (0, 1)]\r\n path = [[None] * w for _ in range(h)]\r\n\r\n def paths(i, j):\r\n if path[i][j]:\r\n return path[i][j]\r\n val = a[i][j]\r\n cnt = 1\r\n for dy, dx in dyx:\r\n y = i + dy\r\n x = j + dx\r\n if 0 <= y < h and 0 <= x < w and a[y][x] < val:\r\n cnt += paths(y, x)\r\n cnt %= MOD\r\n path[i][j] = cnt\r\n return cnt\r\n\r\n tot = 0\r\n for i in range(h):\r\n for j in range(w):\r\n tot += paths(i, j)\r\n tot %= MOD\r\n print(tot)\r\n\r\n class ABC038:\r\n @staticmethod\r\n def a():\r\n s = sys.stdin.readline().rstrip()\r\n print(\"YES\" if s[-1] == \"T\" else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c, d = map(int, sys.stdin.read().split())\r\n print(\"YES\" if a == c or b == c or a == d or b == d else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n cnt = n\r\n tmp = 1\r\n for i in range(n):\r\n if a[i + 1] > a[i]:\r\n tmp += 1\r\n else:\r\n cnt += tmp * (tmp - 1) // 2\r\n tmp = 1\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, *wh = map(int, sys.stdin.read().split())\r\n wh = sorted(zip(*[iter(wh)] * 2), key=lambda x: (-x[0], x[1]))\r\n w = [x[1] for x in wh][::-1]\r\n res = [inf] * n\r\n for x in w:\r\n res[bi_l(res, x)] = x\r\n print(bi_l(res, inf))\r\n\r\n class ABC039:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print((a * b + b * c + c * a) * 2)\r\n\r\n @staticmethod\r\n def b():\r\n x = int(sys.stdin.readline().rstrip())\r\n for n in range(1, int(x**0.5) + 1):\r\n if pow(n, 4) == x:\r\n print(n)\r\n return\r\n\r\n @staticmethod\r\n def c():\r\n board = \"WBWBWWBWBWBW\" * 3\r\n convert = \"Do, *, Re, *, Mi, Fa, *, So, *, La, *, Si\".split(\", \")\r\n s = sys.stdin.readline().rstrip()\r\n print(convert[board.index(s)])\r\n\r\n @staticmethod\r\n def d():\r\n h, w = map(int, sys.stdin.readline().split())\r\n s = sys.stdin.read().split()\r\n dyx = list(itertools.product((-1, 0, 1), repeat=2))\r\n black_certain = set()\r\n black_before = set()\r\n for i in range(h):\r\n for j in range(w):\r\n black_cand = set()\r\n for dy, dx in dyx:\r\n y = i + dy\r\n x = j + dx\r\n if y < 0 or y >= h or x < 0 or x >= w:\r\n continue\r\n if s[y][x] == \".\":\r\n break\r\n black_cand.add((y, x))\r\n else:\r\n black_before.add((i, j))\r\n black_certain |= black_cand\r\n for i in range(h):\r\n for j in range(w):\r\n if s[i][j] == \"#\" and not (i, j) in black_certain:\r\n print(\"impossible\")\r\n return\r\n print(\"possible\")\r\n for i in range(h):\r\n row = \"\"\r\n for j in range(w):\r\n row += \"#\" if (i, j) in black_before else \".\"\r\n print(\"\".join(row))\r\n\r\n class ABC040:\r\n @staticmethod\r\n def a():\r\n n, x = map(int, sys.stdin.readline().split())\r\n print(min(x - 1, n - x))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = inf\r\n for i in range(1, int(n**0.5) + 1):\r\n res = min(res, n // i - i + n % i)\r\n print(res)\r\n\r\n @staticmethod\r\n def c():\r\n n, *h = map(int, sys.stdin.read().split())\r\n h = [h[0]] + h\r\n cost = [None] * (n + 1)\r\n cost[0] = cost[1] = 0\r\n for i in range(2, n + 1):\r\n cost[i] = min(\r\n cost[i - 2] + abs(h[i] - h[i - 2]),\r\n cost[i - 1] + abs(h[i] - h[i - 1]),\r\n )\r\n print(cost[n])\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n uf = GeometryTopology.UnionFind(n=n)\r\n queue = []\r\n for _ in range(m):\r\n a, b, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2 * y), a - 1, b - 1))\r\n q = int(sys.stdin.readline().rstrip())\r\n for i in range(q):\r\n v, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2 * y + 1), v - 1, i))\r\n res = [None] * q\r\n while queue:\r\n y, i, j = heappop(queue)\r\n if y & 1:\r\n res[j] = uf.size[uf.find_root(i)]\r\n else:\r\n uf.unite(i, j)\r\n print(*res, sep=\"\\n\")\r\n\r\n class ABC041:\r\n @staticmethod\r\n def a():\r\n s, i = sys.stdin.read().split()\r\n i = int(i)\r\n print(s[i - 1])\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n ans = a * b % MOD * c % MOD\r\n print(ans)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n for i, h in sorted(enumerate(a), key=lambda x: -x[1]):\r\n print(i + 1)\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*[iter(xy)] * 2)\r\n edges = [0] * n\r\n for x, y in xy:\r\n x -= 1\r\n y -= 1\r\n edges[x] |= 1 << y\r\n comb = [None] * (1 << n)\r\n comb[0] = 1\r\n\r\n def count(edges, bit):\r\n if comb[bit] is not None:\r\n return comb[bit]\r\n comb[bit] = 0\r\n for i in range(n):\r\n if (bit >> i) & 1 and not edges[i]:\r\n nxt_bit = bit & ~(1 << i)\r\n nxt_edges = edges.copy()\r\n for j in range(n):\r\n nxt_edges[j] &= ~(1 << i)\r\n cnt = count(nxt_edges, nxt_bit)\r\n comb[bit] += cnt\r\n return comb[bit]\r\n\r\n print(count(edges, (1 << n) - 1))\r\n\r\n class ABC042:\r\n @staticmethod\r\n def a():\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n c = Counter(a)\r\n print(\"YES\" if c[5] == 2 and c[7] == 1 else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n n, l, *s = sys.stdin.read().split()\r\n print(\"\".join(sorted(s)))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *d = sys.stdin.read().split()\r\n l = len(n)\r\n ok = sorted(set(string.digits) - set(d))\r\n cand = [\r\n int(\"\".join(p)) for p in itertools.product(ok, repeat=l)\r\n ] + [int(min(x for x in ok if x > \"0\") + min(ok) * l)]\r\n print(cand[bi_l(cand, int(n))])\r\n\r\n @staticmethod\r\n def d():\r\n h, w, a, b = map(int, sys.stdin.read().split())\r\n combinatorics = Combinatorics(n=2 * 10**5, mod=MOD)\r\n tot = combinatorics.mod_choose(h + w - 2, h - 1)\r\n i = np.arange(h - a, h)\r\n ng = np.sum(\r\n combinatorics.mod_choose(i + b - 1, i)\r\n * combinatorics.mod_choose(h - i + w - b - 2, h - 1 - i)\r\n % MOD\r\n )\r\n tot -= ng\r\n tot %= MOD\r\n print(tot)\r\n\r\n class ABC043:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((1 + n) * n // 2)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n t = \"\"\r\n for c in s:\r\n if c == \"B\":\r\n t = t[:-1]\r\n else:\r\n t += c\r\n print(t)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n x = np.around(a.sum() / n).astype(int)\r\n print(np.sum((a - x) ** 2))\r\n\r\n @staticmethod\r\n def d():\r\n s = sys.stdin.readline().rstrip()\r\n n = len(s)\r\n for i in range(n - 1):\r\n if s[i] == s[i + 1]:\r\n print(i + 1, i + 2)\r\n return\r\n for i in range(n - 2):\r\n if s[i] == s[i + 2]:\r\n print(i + 1, i + 3)\r\n return\r\n print(-1, -1)\r\n\r\n class ABC170:\r\n @staticmethod\r\n def a():\r\n x = [int(x) for x in sys.stdin.readline().split()]\r\n for i in range(5):\r\n if x[i] != i + 1:\r\n print(i + 1)\r\n break\r\n\r\n @staticmethod\r\n def b():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Yes\" if 2 * x <= y <= 4 * x and y % 2 == 0 else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n x, n, *p = map(int, sys.stdin.read().split())\r\n a = list(set(range(102)) - set(p))\r\n a = [(abs(y - x), y) for y in a]\r\n print(sorted(a)[0][1])\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n cand = set(a)\r\n cnt = 0\r\n for x, c in sorted(Counter(a).items()):\r\n cnt += c == 1 and x in cand\r\n cand -= set(range(x * 2, 10**6 + 1, x))\r\n print(cnt)\r\n\r\n @staticmethod\r\n def e():\r\n n, q = map(int, sys.stdin.readline().split())\r\n queue = []\r\n m = 2 * 10**5\r\n infants = [[] for _ in range(m)]\r\n highest_rate = [None] * m\r\n where = [None] * n\r\n rate = [None] * n\r\n\r\n def entry(i, k):\r\n where[i] = k\r\n while infants[k]:\r\n r, j = heappop(infants[k])\r\n if where[j] != k or j == i:\r\n continue\r\n if rate[i] >= -r:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (r, j))\r\n break\r\n else:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (-rate[i], i))\r\n\r\n def transfer(i, k):\r\n now = where[i]\r\n while infants[now]:\r\n r, j = heappop(infants[now])\r\n if where[j] != now or j == i:\r\n continue\r\n if highest_rate[now] != -r:\r\n highest_rate[now] = -r\r\n heappush(queue, (-r, now, j))\r\n heappush(infants[now], (r, j))\r\n break\r\n else:\r\n highest_rate[now] = None\r\n entry(i, k)\r\n\r\n def inquire():\r\n while True:\r\n r, k, i = heappop(queue)\r\n if where[i] != k or r != highest_rate[k]:\r\n continue\r\n heappush(queue, (r, k, i))\r\n return r\r\n\r\n for i in range(n):\r\n a, b = map(int, sys.stdin.readline().split())\r\n rate[i] = a\r\n entry(i, b - 1)\r\n for _ in range(q):\r\n c, d = map(int, sys.stdin.readline().split())\r\n transfer(c - 1, d - 1)\r\n print(inquire())\r\n\r\n class ABC171:\r\n @staticmethod\r\n def a():\r\n c = sys.stdin.readline().rstrip()\r\n print(\"A\" if c < \"a\" else \"a\")\r\n\r\n @staticmethod\r\n def b():\r\n n, k, *p = map(int, sys.stdin.read().split())\r\n print(sum(sorted(p)[:k]))\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n n -= 1\r\n l = 1\r\n while True:\r\n if n < pow(26, l):\r\n break\r\n n -= pow(26, l)\r\n l += 1\r\n res = \"\".join(\r\n [chr(ord(\"a\") + d) for d in NumberTheory.base_convert(n, 26)][\r\n ::-1\r\n ]\r\n )\r\n res = \"a\" * (l - len(res)) + res\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n s = sum(a)\r\n cnt = Counter(a)\r\n q = int(sys.stdin.readline().rstrip())\r\n for _ in range(q):\r\n b, c = map(int, sys.stdin.readline().split())\r\n s += (c - b) * cnt[b]\r\n print(s)\r\n cnt[c] += cnt[b]\r\n cnt[b] = 0\r\n\r\n @staticmethod\r\n def e():\r\n n, *a = map(int, sys.stdin.read().split())\r\n s = 0\r\n for x in a:\r\n s ^= x\r\n b = map(lambda x: x ^ s, a)\r\n print(*b, sep=\" \")\r\n\r\n class ABC172:\r\n @staticmethod\r\n def a():\r\n a = int(sys.stdin.readline().rstrip())\r\n print(a * (1 + a + a**2))\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n print(sum(s[i] != t[i] for i in range(len(s))))\r\n\r\n @staticmethod\r\n def c():\r\n n, m, k = map(int, sys.stdin.readline().split())\r\n a = [0] + [int(x) for x in sys.stdin.readline().split()]\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n (*sa,) = itertools.accumulate(a)\r\n (*sb,) = itertools.accumulate(b)\r\n res = 0\r\n for i in range(n + 1):\r\n r = k - sa[i]\r\n if r < 0:\r\n break\r\n res = max(res, i + bi_r(sb, r))\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n f = np.zeros(n + 1, dtype=np.int64)\r\n for i in range(1, n + 1):\r\n f[i::i] += 1\r\n print((np.arange(1, n + 1) * f[1:]).sum())\r\n\r\n class ABC173:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n charge = (n + 999) // 1000 * 1000 - n\r\n print(charge)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n c = Counter(s)\r\n for v in \"AC, WA, TLE, RE\".split(\", \"):\r\n print(f\"{v} x {c[v]}\")\r\n\r\n @staticmethod\r\n def c():\r\n h, w, k = map(int, sys.stdin.readline().split())\r\n c = [sys.stdin.readline().rstrip() for _ in range(h)]\r\n tot = 0\r\n for i in range(1 << h):\r\n for j in range(1 << w):\r\n cnt = 0\r\n for y in range(h):\r\n for x in range(w):\r\n if i >> y & 1 or j >> x & 1:\r\n continue\r\n cnt += c[y][x] == \"#\"\r\n tot += cnt == k\r\n print(tot)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a.sort(reverse=True)\r\n res = (\r\n a[0]\r\n + sum(a[1 : 1 + (n - 2) // 2]) * 2\r\n + a[1 + (n - 2) // 2] * (n & 1)\r\n )\r\n print(res)\r\n\r\n @staticmethod\r\n def e():\r\n MOD = 10**9 + 7\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n minus = [x for x in a if x < 0]\r\n plus = [x for x in a if x > 0]\r\n if len(plus) + len(minus) // 2 * 2 >= k: # plus\r\n (*minus,) = map(abs, minus)\r\n minus.sort(reverse=True)\r\n plus.sort(reverse=True)\r\n cand = []\r\n if len(minus) & 1:\r\n minus = minus[:-1]\r\n for i in range(0, len(minus) - 1, 2):\r\n cand.append(minus[i] * minus[i + 1] % MOD)\r\n if k & 1:\r\n res = plus[0]\r\n plus = plus[1:]\r\n else:\r\n res = 1\r\n if len(plus) & 1:\r\n plus = plus[:-1]\r\n for i in range(0, len(plus) - 1, 2):\r\n cand.append(plus[i] * plus[i + 1] % MOD)\r\n cand.sort(reverse=True)\r\n for x in cand[: k // 2]:\r\n res *= x\r\n res %= MOD\r\n print(res)\r\n elif 0 in a:\r\n print(0)\r\n else:\r\n cand = sorted(map(abs, a))\r\n res = 1\r\n for i in range(k):\r\n res *= cand[i]\r\n res %= MOD\r\n res = MOD - res\r\n print(res)\r\n pass\r\n\r\n class ABC174:\r\n @staticmethod\r\n def a():\r\n print(\"Yes\" if int(sys.stdin.readline().rstrip()) >= 30 else \"No\")\r\n\r\n class ACL001:\r\n @staticmethod\r\n def a():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*[iter(xy)] * 2)\r\n print(xy)\r\n pass\r\n\r\n class MSolutions2020:\r\n @staticmethod\r\n def a():\r\n x = int(sys.stdin.readline().rstrip())\r\n x -= 400\r\n print(8 - x // 200)\r\n\r\n @staticmethod\r\n def b():\r\n r, g, b, k = map(int, sys.stdin.read().split())\r\n while k and g <= r:\r\n g *= 2\r\n k -= 1\r\n while k and b <= g:\r\n b *= 2\r\n k -= 1\r\n print(\"Yes\" if r < g < b else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n for i in range(k, n):\r\n print(\"Yes\" if a[i] > a[i - k] else \"No\")\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n m = 1000\r\n s = 0\r\n for i in range(n):\r\n if a[i + 1] == a[i]:\r\n continue\r\n elif a[i + 1] > a[i]:\r\n cnt = m // a[i]\r\n m -= a[i] * cnt\r\n s += cnt\r\n else:\r\n m += a[i] * s\r\n s = 0\r\n print(m)\r\n\r\n\r\nclass Codeforces:\r\n pass\r\n\r\n\r\nclass ProjectEuler:\r\n @staticmethod\r\n def p1():\r\n def f(n, x):\r\n return (x + n // x * x) * (n // x) // 2\r\n\r\n n = 1000\r\n ans = f(n - 1, 3) + f(n - 1, 5) - f(n - 1, 15)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p2():\r\n fib = [1, 2]\r\n while fib[-1] < 4 * 10**6:\r\n fib.append(fib[-1] + fib[-2])\r\n print(sum(fib[1:-1:3]))\r\n\r\n @staticmethod\r\n def p3():\r\n number_theory = NumberTheory()\r\n res = number_theory.prime_factorize(600851475143)\r\n print(max(res.keys()))\r\n\r\n @staticmethod\r\n def p4():\r\n def is_palindrome(n):\r\n n = str(n)\r\n return n == n[::-1]\r\n\r\n cand = []\r\n for a in range(100, 1000):\r\n for b in range(a, 1000):\r\n n = a * b\r\n if is_palindrome(n):\r\n cand.append(n)\r\n print(max(cand))\r\n\r\n @staticmethod\r\n def p5():\r\n number_theory = NumberTheory()\r\n res = defaultdict(int)\r\n for i in range(1, 21):\r\n for p, c in number_theory.prime_factorize(i).items():\r\n res[p] = max(res[p], c)\r\n ans = 1\r\n for p, c in res.items():\r\n ans *= pow(p, c)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p6():\r\n a = np.arange(101)\r\n b = np.cumsum(a**2)\r\n a = a.cumsum()\r\n print(a[100] ** 2 - b[100])\r\n\r\n @staticmethod\r\n def p7():\r\n number_theory = NumberTheory()\r\n print(sorted(number_theory.prime_numbers)[10000])\r\n\r\n @staticmethod\r\n def p8():\r\n n = \"7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450\"\r\n n = [int(d) for d in list(n)]\r\n res = 0\r\n for i in range(988):\r\n x = 1\r\n for j in range(13):\r\n x *= n[i + j]\r\n res = max(res, x)\r\n print(res)\r\n\r\n @staticmethod\r\n def p9():\r\n for a in range(1, 997):\r\n for b in range(a, 998 - a):\r\n c = 1000 - a - b\r\n if a**2 + b**2 == c**2:\r\n print(a * b * c)\r\n return\r\n\r\n @staticmethod\r\n def p10():\r\n number_theory = NumberTheory(2 * 10**6 - 1)\r\n print(sum(number_theory.prime_numbers))\r\n\r\n @staticmethod\r\n def p11():\r\n grid = \"08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48\"\r\n # grid = np.array(grid.split(), dtype=np.int64).reshape(20, -1)\r\n # cand = []\r\n # for i in range(20):\r\n # bl1 = i+3 < 20\r\n # for j in range(20):\r\n # bl2 = j+3 < 20\r\n # if bl1:\r\n # np.prod\r\n # tmp = 1\r\n # for d in range(4):\r\n # tmp *= grid[i+d, j]\r\n print(grid)\r\n\r\n pass\r\n\r\n\r\nclass Yukicoder:\r\n pass\r\n\r\n\r\nif __name__ == \"__main__\":\r\n AtCoder.ABC009.d()\r\n", "import sys\r\nimport typing\r\n\r\nimport numba as nb\r\nimport numpy as np\r\n\r\n\r\[email protected]\r\ndef euler_tour_edge(\r\n g: np.ndarray,\r\n edge_idx: np.ndarray,\r\n root: int,\r\n) -> typing.Tuple[(np.ndarray, ) * 3]:\r\n n = g[:, :2].max() + 1\r\n parent = np.full(n, -1, np.int64)\r\n depth = np.zeros(n, np.int64)\r\n tour = np.empty(n << 1, np.int64)\r\n st = [root]\r\n for i in range(n << 1):\r\n u = st.pop()\r\n tour[i] = u\r\n if u < 0: continue\r\n st.append(~u)\r\n for v in g[edge_idx[u]:edge_idx[u + 1], 1][::-1]:\r\n if v == parent[u]: continue\r\n parent[v] = u\r\n depth[v] = depth[u] + 1\r\n st.append(v)\r\n return tour, parent, depth\r\n\r\n\r\n\r\[email protected]\r\ndef dfs_path(\r\n g: np.ndarray,\r\n edge_idx: np.ndarray,\r\n src: int,\r\n dst: int,\r\n) -> np.ndarray:\r\n _, parent, depth = euler_tour_edge(g, edge_idx, src)\r\n u = dst\r\n d = depth[u]\r\n path = np.empty(d + 1, np.int64)\r\n for i in range(d + 1):\r\n path[-1 - i] = u\r\n u = parent[u]\r\n return path\r\n\r\n\r\[email protected]\r\ndef sort_csgraph(\r\n n: int,\r\n g: np.ndarray,\r\n) -> typing.Tuple[(np.ndarray, ) * 3]:\r\n sort_idx = np.argsort(g[:, 0], kind='mergesort')\r\n g = g[sort_idx]\r\n original_idx = np.arange(len(g))[sort_idx]\r\n edge_idx = np.searchsorted(g[:, 0], np.arange(n + 1))\r\n return g, edge_idx, original_idx\r\n\r\n\r\[email protected]\r\ndef csgraph_to_directed(g: np.ndarray) -> np.ndarray:\r\n m = len(g)\r\n g = np.vstack((g, g))\r\n g[m:, :2] = g[m:, 1::-1]\r\n return g\r\n\r\n\r\[email protected]\r\ndef mod_pow(x: int, n: int, mod: int) -> int:\r\n y = 1\r\n while n:\r\n if n & 1: y = y * x % mod\r\n x = x * x % mod\r\n n >>= 1\r\n return y\r\n\r\n\r\[email protected]((nb.i8[:], nb.i8[:, :], nb.i8), cache=True)\r\ndef solve(\r\n a: np.ndarray,\r\n uv: np.ndarray,\r\n k: int,\r\n) -> typing.NoReturn:\r\n n, m = len(uv) + 1, len(a)\r\n\r\n mod = 998_244_353\r\n g = csgraph_to_directed(uv)\r\n g, edge_idx, _ = sort_csgraph(n, g)\r\n\r\n cnt = np.zeros((n, n), np.int64)\r\n total_edge_cnt = 0\r\n for i in range(m - 1):\r\n path = dfs_path(g, edge_idx, a[i], a[i + 1])\r\n for j in range(len(path) - 1):\r\n cnt[path[j], path[j + 1]] += 1\r\n total_edge_cnt += len(path) - 1\r\n\r\n if total_edge_cnt + k < 0 or (total_edge_cnt + k) & 1:\r\n print(0)\r\n return\r\n\r\n not_used_cnt = 0\r\n for i in range(len(g)):\r\n u, v = g[i]\r\n not_used_cnt += cnt[u, v] == cnt[v, u] == 0\r\n\r\n not_used_cnt //= 2\r\n\r\n r = (k + total_edge_cnt) // 2\r\n\r\n b = np.zeros(n * n, np.int64)\r\n ptr = 0\r\n for i in range(n - 1):\r\n for j in range(i + 1, n):\r\n s = cnt[i, j] + cnt[j, i]\r\n if s == 0: continue\r\n b[ptr] = s\r\n ptr += 1\r\n b = b[:ptr]\r\n\r\n assert r >= 0\r\n dp = np.zeros(r + 1, np.int64)\r\n dp[0] = 1\r\n for x in b:\r\n for j in range(r, x - 1, -1):\r\n dp[j] += dp[j - x]\r\n dp[j] %= mod\r\n ans = dp[r] * mod_pow(2, not_used_cnt, mod) % mod\r\n print(ans)\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n n, m, k = map(int, input().split())\r\n a = np.array(\r\n sys.stdin.readline().split(),\r\n dtype=np.int64,\r\n ) - 1\r\n uv = np.array(\r\n sys.stdin.read().split(),\r\n dtype=np.int64,\r\n ).reshape(n - 1, 2) - 1\r\n solve(a, uv, k)\r\n\r\n\r\nmain()\n", "import sys\r\n\r\nimport numpy as np\r\n\r\nI = np.array(sys.stdin.read().split(), dtype=np.int64)\r\nL, H, n = I[:3]\r\na = I[3:]\r\n\r\n\r\ndef main():\r\n res = np.zeros(n, dtype=np.int64)\r\n res = np.maximum(L - a, 0)\r\n res[a > H] = -1\r\n return res\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ans = main()\r\n print(*ans, sep=\"\\n\")\r\n", "import sys\r\n\r\nimport numpy as np\r\n\r\nI = np.array(sys.stdin.read().split(), dtype=np.int64)\r\nn, m, q = I[:3]\r\nl, r = I[3 : 3 + m * 2].reshape(-1, 2).T\r\np, q = I[3 + m * 2 :].reshape(-1, 2).T\r\n\r\n\r\ndef main():\r\n res = np.zeros((n + 2, n + 2), dtype=np.int64)\r\n\r\n np.add.at(res, (np.full(m, 1), r), 1)\r\n np.add.at(res, (l + 1, r), -1)\r\n np.add.at(res, (np.full(m, 1), np.full(m, n + 1)), -1)\r\n np.add.at(res, (l + 1, np.full(m, n + 1)), 1)\r\n res = np.cumsum(res, axis=0)\r\n res = np.cumsum(res, axis=1)\r\n\r\n ans = res[p, q]\r\n return ans\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ans = main()\r\n print(*ans, sep=\"\\n\")\r\n", "import sys\r\nimport typing\r\n\r\nimport numba as nb\r\nimport numpy as np\r\n\r\n\r\[email protected]\r\ndef bit_length(n: int) -> int:\r\n l = 0\r\n while 1 << l <= n: l += 1\r\n return l\r\n\r\n\r\n# segment tree lazy\r\nS = typing.TypeVar('S')\r\nF = typing.TypeVar('F')\r\[email protected]\r\ndef seg_build(\r\n op_s: typing.Callable[[S, S], S],\r\n e_s: typing.Callable[[], S],\r\n e_f: typing.Callable[[], F],\r\n a: np.ndarray,\r\n) -> typing.Tuple[np.ndarray, np.ndarray]:\r\n r\"\"\"Build new segment tree lazy from an array.\"\"\"\r\n n = 1 << bit_length(len(a) - 1)\r\n seg = np.empty((n << 1, ) + a.shape[1:], np.int64)\r\n for i in range(n << 1): seg[i] = e_s()\r\n seg[n:n + len(a)] = a.copy()\r\n for i in range(n - 1, 0, -1):\r\n seg[i] = op_s(seg[i << 1], seg[i << 1 | 1])\r\n lazy = np.empty(n, np.int64)\r\n for i in range(n): lazy[i] = e_f()\r\n return seg, lazy\r\n\r\n\r\[email protected]\r\ndef __seg_apply(\r\n op_f: typing.Callable[[F, F], F],\r\n map_: typing.Callable[[F, S], S],\r\n seg: np.ndarray,\r\n lazy: np.ndarray,\r\n i: int,\r\n f: F,\r\n) -> typing.NoReturn:\r\n seg[i] = map_(f, seg[i])\r\n if i < len(lazy): lazy[i] = op_f(f, lazy[i])\r\n\r\n\r\[email protected]\r\ndef __seg_propagate(\r\n op_f: typing.Callable[[F, F], F],\r\n e_f: typing.Callable[[], F],\r\n map_: typing.Callable[[F, S], S],\r\n seg: np.ndarray,\r\n lazy: np.ndarray,\r\n i: int,\r\n) -> typing.NoReturn:\r\n __seg_apply(op_f, map_, seg, lazy, i << 1, lazy[i])\r\n __seg_apply(op_f, map_, seg, lazy, i << 1 | 1, lazy[i])\r\n lazy[i] = e_f()\r\n\r\n\r\[email protected]\r\ndef __seg_merge(\r\n op_s: typing.Callable[[S, S], S],\r\n seg: np.ndarray,\r\n i: int,\r\n) -> typing.NoReturn:\r\n seg[i] = op_s(seg[i << 1], seg[i << 1 | 1])\r\n\r\n\r\[email protected]\r\ndef seg_set(\r\n op_s: typing.Callable[[S, S], S],\r\n op_f: typing.Callable[[F, F], F],\r\n e_f: typing.Callable[[], F],\r\n map_: typing.Callable[[F, S], S],\r\n seg: np.ndarray,\r\n lazy: np.ndarray,\r\n l: int,\r\n r: int,\r\n f: F,\r\n) -> typing.NoReturn:\r\n r\"\"\"Set x on [l, r).\r\n\r\n \\forall{l \\le i \\lt r}\\ a_i := map_(f, a_i).\r\n - operate f on a_i from right.\r\n \"\"\"\r\n n = len(seg) >> 1\r\n assert 0 <= l <= r <= n # 0 <= l <= r <= size actually\r\n l, r = l + n, r + n\r\n h = bit_length(n)\r\n\r\n for i in range(h, 0, -1):\r\n if (l >> i) << i != l:\r\n __seg_propagate(op_f, e_f, map_, seg, lazy, l >> i)\r\n if (r >> i) << i != r:\r\n __seg_propagate(op_f, e_f, map_, seg, lazy, (r - 1) >> i)\r\n\r\n l0, r0 = l, r\r\n while l < r:\r\n if l & 1: __seg_apply(op_f, map_, seg, lazy, l, f); l += 1\r\n if r & 1: r -= 1; __seg_apply(op_f, map_, seg, lazy, r, f)\r\n l, r = l >> 1, r >> 1\r\n l, r = l0, r0\r\n for i in range(1, h + 1):\r\n if (l >> i) << i != l: __seg_merge(op_s, seg, l >> i)\r\n if (r >> i) << i != r: __seg_merge(op_s, seg, (r - 1) >> i)\r\n\r\n\r\[email protected]\r\ndef seg_get(\r\n op_s: typing.Callable[[S, S], S],\r\n e_s: typing.Callable[[], S],\r\n op_f: typing.Callable[[F, F], F],\r\n e_f: typing.Callable[[], F],\r\n map_: typing.Callable[[F, S], S],\r\n seg: np.ndarray,\r\n lazy: np.ndarray,\r\n l: int,\r\n r: int,\r\n) -> S:\r\n r\"\"\"Get \\prod_{j=l}^{r-1}{a_j}.\"\"\"\r\n n = len(seg) >> 1\r\n assert 0 <= l <= r <= n # 0 <= l <= r <= size actually\r\n l, r = l + n, r + n\r\n h = bit_length(n)\r\n\r\n for i in range(h, 0, -1):\r\n if (l >> i) << i != l:\r\n __seg_propagate(op_f, e_f, map_, seg, lazy, l >> i)\r\n if (r >> i) << i != r:\r\n __seg_propagate(op_f, e_f, map_, seg, lazy, (r - 1) >> i)\r\n\r\n vl, vr = e_s(), e_s()\r\n while l < r:\r\n if l & 1: vl = op_s(vl, seg[l]); l += 1\r\n if r & 1: r -= 1; vr = op_s(seg[r], vr)\r\n l, r = l >> 1, r >> 1\r\n return op_s(vl, vr)\r\n\r\n\r\[email protected]\r\ndef seg_update(\r\n op_s: typing.Callable[[S, S], S],\r\n op_f: typing.Callable[[F, F], F],\r\n e_f: typing.Callable[[], F],\r\n map_: typing.Callable[[F, S], S],\r\n seg: np.ndarray,\r\n lazy: np.ndarray,\r\n i: int,\r\n x: S,\r\n) -> typing.NoReturn:\r\n r\"\"\"Replace a_i with x.\"\"\"\r\n n = len(seg) >> 1\r\n assert 0 <= i < n # 0 <= i < size actually\r\n i += n\r\n h = bit_length(n)\r\n for j in range(h, 0, -1):\r\n __seg_propagate(op_f, e_f, map_, seg, lazy, i >> j)\r\n seg[i] = x\r\n for j in range(1, h + 1): __seg_merge(op_s, seg, i >> j)\r\n\r\n\r\n# segment tree lazy interface.\r\[email protected]\r\ndef build_seg(a: np.ndarray) -> typing.Tuple[np.ndarray, np.ndarray]:\r\n r\"\"\"Build interface.\"\"\"\r\n return seg_build(seg_op_s, seg_e_s, seg_e_f, a)\r\n\r\[email protected]\r\ndef set_seg(seg: np.ndarray, lazy: np.ndarray, l: int, r: int, f: F) -> typing.NoReturn:\r\n r\"\"\"Set interface.\"\"\"\r\n seg_set(seg_op_s, seg_op_f, seg_e_f, seg_map, seg, lazy, l, r, f)\r\n\r\[email protected]\r\ndef get_seg(seg: np.ndarray, lazy: np.ndarray, l: int, r: int) -> S:\r\n r\"\"\"Get interface.\"\"\"\r\n return seg_get(seg_op_s, seg_e_s, seg_op_f, seg_e_f, seg_map, seg, lazy, l, r)\r\n\r\[email protected]\r\ndef update_point_seg(seg: np.ndarray, lazy: np.ndarray, i: int, x: S) -> typing.NoReturn:\r\n r\"\"\"Update interface.\"\"\"\r\n seg_update(seg_op_s, seg_op_f, seg_e_f, seg_map, seg, lazy, i, x)\r\n\r\[email protected]\r\ndef seg_op_s(a: S, b: S) -> S: return a ^ b\r\n\r\[email protected]\r\ndef seg_e_s() -> S: return 0\r\n\r\[email protected]\r\ndef seg_op_f(f: F, g: F) -> F: return f ^ g\r\n\r\[email protected]\r\ndef seg_e_f() -> F: return 0\r\n\r\[email protected]\r\ndef seg_map(f: F, x: S) -> S: return x ^ f\r\n\r\n\r\[email protected]((nb.i8[:], nb.i8[:, :]), cache=True)\r\ndef solve(a: np.ndarray, txy: np.ndarray) -> typing.NoReturn:\r\n n, q = len(a), len(txy)\r\n\r\n seg, lazy = build_seg(a)\r\n for i in range(q):\r\n t, x, y = txy[i]\r\n x -= 1\r\n if t == 1:\r\n set_seg(seg, lazy, x, x + 1, y)\r\n else:\r\n v = get_seg(seg, lazy, x, y)\r\n print(v)\r\n\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n n, q = map(int, input().split())\r\n a = np.array(\r\n sys.stdin.readline().split(),\r\n dtype=np.int64,\r\n )\r\n txy = np.array(\r\n sys.stdin.read().split(),\r\n dtype=np.int64,\r\n ).reshape(q, 3)\r\n\r\n solve(a, txy)\r\n\r\n\r\nmain()\n", "import math\r\nimport string\r\nimport sys\r\nfrom bisect import bisect_left as bi_l\r\nfrom bisect import bisect_right as bi_r\r\nfrom collections import Counter, defaultdict, deque\r\nfrom heapq import heappop, heappush\r\nfrom itertools import accumulate, combinations, product\r\n\r\n# from numba import jit\r\nimport networkx as nx\r\nimport numpy as np\r\nfrom scipy import optimize\r\nfrom scipy.sparse import csr_matrix\r\nfrom scipy.sparse.csgraph import maximum_flow, shortest_path\r\nfrom scipy.special import comb\r\n\r\ninf = float(\"inf\")\r\nfrom functools import lru_cache, reduce\r\n\r\nsys.setrecursionlimit(10**6)\r\nMOD = 10**9 + 7\r\n# MOD = 998244353\r\n\r\n\r\nclass NumberTheory:\r\n def __init__(self, n=2 * 10**6, numpy=True):\r\n self.n = n\r\n self.np_flg = numpy\r\n self.is_prime_number, self.prime_numbers = self.sieve_of_eratosthenes(\r\n n\r\n )\r\n\r\n def sieve_of_eratosthenes(self, n):\r\n if self.np_flg:\r\n sieve = np.ones(n + 1, dtype=np.int64)\r\n sieve[:2] = 0\r\n for i in range(2, int(n**0.5) + 1):\r\n if sieve[i]:\r\n sieve[i * 2 :: i] = 0\r\n prime_numbers = np.flatnonzero(sieve)\r\n else:\r\n sieve = [1] * (n + 1)\r\n sieve[0] = sieve[1] = 0\r\n for i in range(2, int(n**0.5) + 1):\r\n if not sieve[i]:\r\n continue\r\n for j in range(i * 2, n + 1, i):\r\n sieve[j] = 0\r\n prime_numbers = [i for i in range(2, n + 1) if sieve[i]]\r\n return sieve, prime_numbers\r\n\r\n def prime_factorize(self, n):\r\n res = dict()\r\n if n < 2:\r\n return res\r\n border = int(n**0.5)\r\n for p in self.prime_numbers:\r\n if p > border:\r\n break\r\n while n % p == 0:\r\n res[p] = res.get(p, 0) + 1\r\n n //= p\r\n if n == 1:\r\n return res\r\n res[n] = 1\r\n return res\r\n\r\n def prime_factorize_factorial(self, n):\r\n res = dict()\r\n for i in range(2, n + 1):\r\n for p, c in self.prime_factorize(i).items():\r\n res[p] = res.get(p, 0) + c\r\n return res\r\n\r\n @classmethod\r\n def gcd(cls, a, b):\r\n return cls.gcd(b, a % b) if b else abs(a)\r\n\r\n @classmethod\r\n def lcm(cls, a, b):\r\n return abs(a // cls.gcd(a, b) * b)\r\n\r\n @staticmethod\r\n def find_divisors(n):\r\n divisors = []\r\n for i in range(1, int(n**0.5) + 1):\r\n if n % i:\r\n continue\r\n divisors.append(i)\r\n j = n // i\r\n if j != i:\r\n divisors.append(j)\r\n return divisors\r\n\r\n @staticmethod\r\n def base_convert(n, b):\r\n if not n:\r\n return [0]\r\n res = []\r\n while n:\r\n n, r = divmod(n, b)\r\n if r < 0:\r\n n += 1\r\n r -= b\r\n res.append(r)\r\n return res\r\n\r\n\r\nclass UnionFind:\r\n def __init__(self, n=10**6):\r\n self.root = list(range(n))\r\n self.height = [0] * n\r\n self.size = [1] * n\r\n\r\n def find_root(self, u):\r\n if self.root[u] == u:\r\n return u\r\n self.root[u] = self.find_root(self.root[u])\r\n return self.root[u]\r\n\r\n def unite(self, u, v):\r\n ru = self.find_root(u)\r\n rv = self.find_root(v)\r\n if ru == rv:\r\n return\r\n hu = self.height[ru]\r\n hv = self.height[rv]\r\n if hu >= hv:\r\n self.root[rv] = ru\r\n self.size[ru] += self.size[rv]\r\n self.height[ru] = max(hu, hv + 1)\r\n else:\r\n self.root[ru] = rv\r\n self.size[rv] += self.size[ru]\r\n\r\n\r\nclass Combinatorics:\r\n def __init__(self, N=10**9, n=10**6, mod=10**9 + 7, numpy=True):\r\n self.mod = mod\r\n self.nCr = dict()\r\n self.np_flg = numpy\r\n self.make_mod_tables(N, n)\r\n\r\n sys.setrecursionlimit(10**6)\r\n\r\n def choose(self, n, r, mod=None): # no mod, or mod ≠ prime\r\n if r > n or r < 0:\r\n return 0\r\n if r == 0:\r\n return 1\r\n if (n, r) in self.nCr:\r\n return self.nCr[(n, r)]\r\n if not mod:\r\n self.nCr[(n, r)] = self.choose(n - 1, r) + self.choose(\r\n n - 1, r - 1\r\n )\r\n else:\r\n self.nCr[(n, r)] = (\r\n self.choose(n - 1, r, mod) + self.choose(n - 1, r - 1, mod)\r\n ) % mod\r\n return self.nCr[(n, r)]\r\n\r\n def cumprod(self, a):\r\n p = self.mod\r\n l = len(a)\r\n sql = int(np.sqrt(l) + 1)\r\n a = np.resize(a, sql**2).reshape(sql, sql)\r\n for i in range(sql - 1):\r\n a[:, i + 1] *= a[:, i]\r\n a[:, i + 1] %= p\r\n for i in range(sql - 1):\r\n a[i + 1] *= a[i, -1]\r\n a[i + 1] %= p\r\n return np.ravel(a)[:l]\r\n\r\n def make_mod_tables(self, N, n):\r\n p = self.mod\r\n if self.np_flg:\r\n fac = np.arange(n + 1)\r\n fac[0] = 1\r\n fac = self.cumprod(fac)\r\n ifac = np.arange(n + 1, 0, -1)\r\n ifac[0] = pow(int(fac[-1]), p - 2, p)\r\n ifac = self.cumprod(ifac)[n::-1]\r\n n_choose = np.arange(N + 1, N - n, -1)\r\n n_choose[0] = 1\r\n n_choose[1:] = self.cumprod(n_choose[1:]) * ifac[1 : n + 1] % p\r\n else:\r\n fac = [None] * (n + 1)\r\n fac[0] = 1\r\n for i in range(n):\r\n fac[i + 1] = fac[i] * (i + 1) % p\r\n ifac = [None] * (n + 1)\r\n ifac[n] = pow(fac[n], p - 2, p)\r\n for i in range(n, 0, -1):\r\n ifac[i - 1] = ifac[i] * i % p\r\n n_choose = [None] * (n + 1)\r\n n_choose[0] = 1\r\n for i in range(n):\r\n n_choose[i + 1] = n_choose[i] * (N - i) % p\r\n for i in range(n + 1):\r\n n_choose[i] = n_choose[i] * ifac[i] % p\r\n self.fac, self.ifac, self.mod_n_choose = fac, ifac, n_choose\r\n\r\n def mod_choose(self, n, r):\r\n return (\r\n self.fac[n] * self.ifac[r] % self.mod * self.ifac[n - r] % self.mod\r\n )\r\n\r\n\r\ndef z_algorithm(s):\r\n n = len(s)\r\n a = [0] * n\r\n a[0] = n\r\n l = r = -1\r\n for i in range(1, n):\r\n if r >= i:\r\n a[i] = min(a[i - l], r - i)\r\n while i + a[i] < n and s[i + a[i]] == s[a[i]]:\r\n a[i] += 1\r\n if i + a[i] >= r:\r\n l, r = i, i + a[i]\r\n return a\r\n\r\n\r\nclass GeometryTopology:\r\n class GraphTheory:\r\n class MaximumFlow:\r\n @staticmethod\r\n def dinic(graph, s, t): # s: source, t: sink\r\n def bfs():\r\n level = defaultdict(int)\r\n level[s] = 0\r\n q = deque([s])\r\n while q:\r\n u = q.popleft()\r\n for v, cap in graph[u].items():\r\n if cap == 0 or v in level:\r\n continue\r\n level[v] = level[u] + 1\r\n q.append(v)\r\n return level\r\n\r\n def flow_to_sink(u, flow_in):\r\n if u == t:\r\n return flow_in\r\n flow = 0\r\n for v, cap in graph[u].items():\r\n if cap == 0 or level[v] <= level[u]:\r\n continue\r\n f = flow_to_sink(v, min(flow_in, cap))\r\n if not f:\r\n continue\r\n graph[u][v] -= f\r\n graph[v][u] += f\r\n flow_in -= f\r\n flow += f\r\n return flow\r\n\r\n flow = 0\r\n while True:\r\n level = bfs()\r\n if not t in level:\r\n return flow\r\n flow += flow_to_sink(s, float(\"inf\"))\r\n\r\n @staticmethod\r\n def ford_fulkerson():\r\n pass\r\n\r\n @staticmethod\r\n def push_relabel():\r\n pass\r\n\r\n class ShortestPath:\r\n pass\r\n\r\n\r\nclass AtCoder:\r\n class ABC001:\r\n @staticmethod\r\n def a():\r\n h1, h2 = map(int, sys.stdin.read().split())\r\n print(h1 - h2)\r\n\r\n class ABC002:\r\n @staticmethod\r\n def a():\r\n print(max(map(int, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n vowels = set(\"aeiou\")\r\n print(\r\n \"\".join(\r\n [\r\n c\r\n for c in sys.stdin.readline().rstrip()\r\n if c not in vowels\r\n ]\r\n )\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n def triangle_area(x0, y0, x1, y1, x2, y2):\r\n x1 -= x0\r\n x2 -= x0\r\n y1 -= y0\r\n y2 -= y0\r\n return abs(x1 * y2 - x2 * y1) / 2\r\n\r\n print(triangle_area(*map(int, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n edges = set(\r\n (x - 1, y - 1)\r\n for x, y in zip(*[map(int, sys.stdin.read().split())] * 2)\r\n )\r\n print(\r\n max(\r\n len(s)\r\n for i in range(1, 1 << n)\r\n for s in [[j for j in range(n) if i >> j & 1]]\r\n if all((x, y) in edges for x, y in combinations(s, 2))\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m = map(int, sys.stdin.readline().split())\r\n relations = [1 << i for i in range(n)]\r\n for x, y in zip(*[map(int, sys.stdin.read().split())] * 2):\r\n x -= 1\r\n y -= 1\r\n relations[x] |= 1 << y\r\n relations[y] |= 1 << x\r\n res = 0\r\n for i in range(1 << n):\r\n cnt = 0\r\n s = 0\r\n t = (1 << n) - 1\r\n for j in range(n):\r\n if i >> j & 1:\r\n s |= 1 << j\r\n t &= relations[j]\r\n cnt += 1\r\n if t & s == s:\r\n res = max(res, cnt)\r\n print(res)\r\n\r\n class ABC003:\r\n @staticmethod\r\n def a():\r\n print((int(sys.stdin.readline().rstrip()) + 1) * 5000)\r\n\r\n @staticmethod\r\n def b():\r\n atcoder = set(\"atcoder\")\r\n s, t = sys.stdin.read().split()\r\n print(\r\n all(\r\n s[i] == t[i]\r\n or s[i] == \"@\"\r\n and t[i] in atcoder\r\n or t[i] == \"@\"\r\n and s[i] in atcoder\r\n for i in range(len(s))\r\n )\r\n and \"You can win\"\r\n or \"You will lose\"\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *r = map(int, sys.stdin.read().split())\r\n print(reduce(lambda x, y: (x + y) / 2, sorted(r)[-k:], 0))\r\n\r\n class ABC004:\r\n @staticmethod\r\n def a():\r\n print(int(sys.stdin.readline().rstrip()) * 2)\r\n\r\n @staticmethod\r\n def b():\r\n for l in [sys.stdin.readline().rstrip() for _ in range(4)][::-1]:\r\n print(l[::-1])\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip()) % 30\r\n res = list(range(1, 7))\r\n for i in range(n):\r\n i %= 5\r\n res[i], res[i + 1] = res[i + 1], res[i]\r\n print(*res, sep=\"\")\r\n\r\n class ABC005:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(y // x)\r\n\r\n @staticmethod\r\n def b():\r\n n, *t = map(int, sys.stdin.read().split())\r\n print(min(t))\r\n\r\n @staticmethod\r\n def c():\r\n t = int(sys.stdin.readline().rstrip())\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n m = int(sys.stdin.readline().rstrip())\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n i = 0\r\n for p in b:\r\n if i == n:\r\n print(\"no\")\r\n return\r\n while p - a[i] > t:\r\n i += 1\r\n if i == n:\r\n print(\"no\")\r\n return\r\n if a[i] > p:\r\n print(\"no\")\r\n return\r\n i += 1\r\n print(\"yes\")\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n d = np.array(\r\n [sys.stdin.readline().split() for _ in range(n)], np.int64\r\n )\r\n s = d.cumsum(axis=0).cumsum(axis=1)\r\n s = np.pad(s, 1)\r\n max_del = np.zeros((n + 1, n + 1), dtype=np.int64)\r\n for y in range(1, n + 1):\r\n for x in range(1, n + 1):\r\n max_del[y, x] = np.amax(\r\n s[y : n + 1, x : n + 1]\r\n - s[0 : n - y + 1, x : n + 1]\r\n - s[y : n + 1, 0 : n - x + 1]\r\n + s[0 : n - y + 1, 0 : n - x + 1]\r\n )\r\n res = np.arange(n**2 + 1)[:, None]\r\n i = np.arange(1, n + 1)\r\n res = max_del[i, np.minimum(res // i, n)].max(axis=1)\r\n q = int(sys.stdin.readline().rstrip())\r\n p = np.array(sys.stdin.read().split(), dtype=np.int64)\r\n print(*res[p], sep=\"\\n\")\r\n\r\n class ABC006:\r\n @staticmethod\r\n def a():\r\n n = sys.stdin.readline().rstrip()\r\n if \"3\" in n:\r\n print(\"YES\")\r\n elif int(n) % 3 == 0:\r\n print(\"YES\")\r\n else:\r\n print(\"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n mod = 10007\r\n t = [0, 0, 1]\r\n for _ in range(1001001):\r\n t.append(t[-1] + t[-2] + t[-3])\r\n t[-1] %= mod\r\n n = int(sys.stdin.readline().rstrip())\r\n print(t[n - 1])\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n cnt = [0, 0, 0]\r\n if m == 1:\r\n cnt = [-1, -1, -1]\r\n else:\r\n if m & 1:\r\n m -= 3\r\n cnt[1] += 1\r\n n -= 1\r\n cnt[2] = m // 2 - n\r\n cnt[0] = n - cnt[2]\r\n if cnt[0] < 0 or cnt[1] < 0 or cnt[2] < 0:\r\n print(-1, -1, -1)\r\n else:\r\n print(*cnt, sep=\" \")\r\n\r\n @staticmethod\r\n def d():\r\n n, *c = map(int, sys.stdin.read().split())\r\n lis = [inf] * n\r\n for x in c:\r\n lis[bi_l(lis, x)] = x\r\n print(n - bi_l(lis, inf))\r\n\r\n class ABC007:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n - 1)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n if s == \"a\":\r\n print(-1)\r\n else:\r\n print(\"a\")\r\n\r\n @staticmethod\r\n def c():\r\n r, c = map(int, sys.stdin.readline().split())\r\n sy, sx = map(int, sys.stdin.readline().split())\r\n gy, gx = map(int, sys.stdin.readline().split())\r\n sy -= 1\r\n sx -= 1\r\n gy -= 1\r\n gx -= 1\r\n maze = [sys.stdin.readline().rstrip() for _ in range(r)]\r\n queue = deque([(sy, sx)])\r\n dist = np.full((r, c), np.inf)\r\n dist[sy, sx] = 0\r\n while queue:\r\n y, x = queue.popleft()\r\n for i, j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\r\n i += y\r\n j += x\r\n if maze[i][j] == \"#\" or dist[i, j] != np.inf:\r\n continue\r\n dist[i, j] = dist[y, x] + 1\r\n queue.append((i, j))\r\n print(int(dist[gy, gx]))\r\n\r\n @staticmethod\r\n def d():\r\n ng = set([4, 9])\r\n\r\n def count(d):\r\n return d if d <= 4 else d - 1\r\n\r\n def f(n):\r\n x = [int(d) for d in str(n)]\r\n flg = True\r\n dp = 0\r\n for d in x:\r\n dp = dp * 8 + flg * count(d)\r\n if d in ng:\r\n flg = False\r\n return n - (dp + flg)\r\n\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(f(b) - f(a - 1))\r\n\r\n class ABC008:\r\n @staticmethod\r\n def a():\r\n s, t = map(int, sys.stdin.readline().split())\r\n print(t - s + 1)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n res = defaultdict(int)\r\n for name in s:\r\n res[name] += 1\r\n print(sorted(res.items(), key=lambda x: x[1])[-1][0])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n c = n - np.count_nonzero(a[:, None] % a, axis=1)\r\n print(np.sum((c + 1) // 2 / c))\r\n\r\n @staticmethod\r\n def d():\r\n w, h, n, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*([iter(xy)] * 2))\r\n\r\n @lru_cache(maxsize=None)\r\n def count(x1, y1, x2, y2):\r\n res = 0\r\n for x, y in xy:\r\n if not (x1 <= x <= x2 and y1 <= y <= y2):\r\n continue\r\n cnt = (x2 - x1) + (y2 - y1) + 1\r\n cnt += count(x1, y1, x - 1, y - 1)\r\n cnt += count(x1, y + 1, x - 1, y2)\r\n cnt += count(x + 1, y1, x2, y - 1)\r\n cnt += count(x + 1, y + 1, x2, y2)\r\n res = max(res, cnt)\r\n return res\r\n\r\n print(count(1, 1, w, h))\r\n\r\n class ABC009:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((n + 1) // 2)\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n print(sorted(set(a))[-2])\r\n\r\n @staticmethod\r\n def c():\r\n n, k = map(int, sys.stdin.readline().split())\r\n s = list(sys.stdin.readline().rstrip())\r\n cost = [1] * n\r\n r = k\r\n for i in range(n - 1):\r\n q = []\r\n for j in range(i + 1, n):\r\n if s[j] < s[i] and cost[i] + cost[j] <= r:\r\n heappush(q, (s[j], cost[i] + cost[j], -j))\r\n if not q:\r\n continue\r\n _, c, j = heappop(q)\r\n j = -j\r\n s[i], s[j] = s[j], s[i]\r\n r -= c\r\n cost[i] = cost[j] = 0\r\n print(\"\".join(s))\r\n\r\n @staticmethod\r\n def d():\r\n k, m = map(int, sys.stdin.readline().split())\r\n a = np.array([int(x) for x in sys.stdin.readline().split()])\r\n c = np.array([int(x) for x in sys.stdin.readline().split()])\r\n mask = (1 << 32) - 1\r\n d = np.eye(k, k, -1, dtype=np.uint32) * mask\r\n d[0] = c\r\n\r\n def bitwise_dot(a, b):\r\n return np.bitwise_xor.reduce(\r\n a[:, None, :] & b.T[None, :, :], axis=-1\r\n )\r\n\r\n def bitwise_mat_pow(a, n):\r\n if n == 0:\r\n return np.eye(k, dtype=np.uint32) * mask\r\n res = bitwise_mat_pow(a, n // 2)\r\n res = bitwise_dot(res, res)\r\n return bitwise_dot(res, a) if n & 1 else res\r\n\r\n if m <= k:\r\n print(a[m - 1])\r\n return\r\n print(\r\n bitwise_dot(bitwise_mat_pow(d, m - k), a[::-1].reshape(-1, 1))[\r\n 0\r\n ].item()\r\n )\r\n\r\n class ABC010:\r\n @staticmethod\r\n def a():\r\n print(sys.stdin.readline().rstrip() + \"pp\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n tot = 0\r\n for x in a:\r\n c = 0\r\n while x % 2 == 0 or x % 3 == 2:\r\n x -= 1\r\n c += 1\r\n tot += c\r\n print(tot)\r\n\r\n @staticmethod\r\n def c():\r\n sx, sy, gx, gy, t, v, n, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(-1, 2).T\r\n\r\n def dist(x1, y1, x2, y2):\r\n return np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\r\n\r\n ans = (\r\n \"YES\"\r\n if (dist(sx, sy, x, y) + dist(x, y, gx, gy) <= v * t).any()\r\n else \"NO\"\r\n )\r\n print(ans)\r\n\r\n @staticmethod\r\n def d_1():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n p = [int(x) for x in sys.stdin.readline().split()]\r\n x, y = [], []\r\n for _ in range(e):\r\n a, b = map(int, sys.stdin.readline().split())\r\n x.append(a)\r\n y.append(b)\r\n x.append(b)\r\n y.append(a)\r\n for a in p:\r\n x.append(a)\r\n y.append(n)\r\n if not x:\r\n print(0)\r\n return\r\n c = [1] * len(x)\r\n min_cut = maximum_flow(\r\n csr_matrix((c, (x, y)), (n + 1, n + 1)), source=0, sink=n\r\n ).flow_value\r\n print(min_cut)\r\n\r\n @staticmethod\r\n def d_2():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n if g + e == 0:\r\n print(0)\r\n return\r\n graph = nx.DiGraph()\r\n graph.add_nodes_from(range(n + 1))\r\n for p in [int(x) for x in sys.stdin.readline().split()]:\r\n graph.add_edge(p, n, capacity=1)\r\n for _ in range(e):\r\n a, b = map(int, sys.stdin.readline().split())\r\n graph.add_edge(a, b, capacity=1)\r\n graph.add_edge(b, a, capacity=1)\r\n print(nx.minimum_cut_value(graph, 0, n))\r\n\r\n @staticmethod\r\n def d_3():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n if g + e == 0:\r\n print(0)\r\n return\r\n graph = defaultdict(dict)\r\n for p in [int(x) for x in sys.stdin.readline().split()]:\r\n graph[p][n] = 1\r\n graph[n][p] = 0\r\n\r\n for a, b in zip(*[map(int, sys.stdin.read().split())] * 2):\r\n graph[a][b] = 1\r\n graph[b][a] = 1\r\n print(GeometryTopology.GraphTheory.MaximumFlow.dinic(graph, 0, n))\r\n\r\n class ABC011:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n % 12 + 1)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(s[0].upper() + s[1:].lower())\r\n\r\n @staticmethod\r\n def c():\r\n n, *ng = map(int, sys.stdin.read().split())\r\n ng = set(ng)\r\n if n in ng:\r\n print(\"NO\")\r\n else:\r\n r = 100\r\n while n > 0:\r\n if r == 0:\r\n print(\"NO\")\r\n return\r\n for i in range(3, 0, -1):\r\n if (n - i) in ng:\r\n continue\r\n n -= i\r\n r -= 1\r\n break\r\n else:\r\n print(\"NO\")\r\n return\r\n print(\"YES\")\r\n\r\n @staticmethod\r\n def d():\r\n n, d, x, y = map(int, sys.stdin.read().split())\r\n x, y = abs(x), abs(y)\r\n if x % d or y % d:\r\n print(0)\r\n return\r\n x, y = x // d, y // d\r\n r = n - (x + y)\r\n if r < 0 or r & 1:\r\n print(0)\r\n return\r\n\r\n res = 0\r\n half_p = pow(1 / 2, n)\r\n for d in range(r // 2 + 1): # 0 <= d <= r//2, south\r\n south, north = d, y + d\r\n west = (r - 2 * d) // 2\r\n res += (\r\n half_p\r\n * comb(n, south, exact=True)\r\n * comb(n - south, north, exact=True)\r\n * comb(n - south - north, west, exact=True)\r\n * half_p\r\n )\r\n print(res)\r\n\r\n class ABC012:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(b, a)\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n h, n = divmod(n, 3600)\r\n m, s = divmod(n, 60)\r\n print(f\"{h:02}:{m:02}:{s:02}\")\r\n\r\n @staticmethod\r\n def c():\r\n n = 2025 - int(sys.stdin.readline().rstrip())\r\n res = []\r\n for i in range(1, 10):\r\n if n % i != 0 or n // i > 9:\r\n continue\r\n res.append(f\"{i} x {n//i}\")\r\n print(*sorted(res), sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abt = map(int, sys.stdin.read().split())\r\n a, b, t = np.array(abt).reshape(m, 3).T\r\n res = shortest_path(\r\n csr_matrix((t, (a - 1, b - 1)), (n, n)),\r\n method=\"FW\",\r\n directed=False,\r\n )\r\n print(res.max(axis=-1).min().astype(np.int64))\r\n\r\n class ABC013:\r\n @staticmethod\r\n def a():\r\n print(ord(sys.stdin.readline().rstrip()) - ord(\"A\") + 1)\r\n\r\n @staticmethod\r\n def b():\r\n a, b = map(int, sys.stdin.read().split())\r\n d = abs(a - b)\r\n print(min(d, 10 - d))\r\n\r\n @staticmethod\r\n def c():\r\n n, h, a, b, c, d, e = map(int, sys.stdin.read().split())\r\n y = np.arange(n + 1)\r\n x = (n * e - h - (d + e) * y) // (b + e) + 1\r\n np.maximum(x, 0, out=x)\r\n np.minimum(x, n - y, out=x)\r\n print(np.amin(a * x + c * y))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, d, *a = map(int, sys.stdin.read().split())\r\n res = list(range(n))\r\n\r\n def swap(i, j):\r\n res[i], res[j] = res[j], res[i]\r\n\r\n for i in a[::-1]:\r\n swap(i - 1, i)\r\n\r\n group = [None] * n\r\n root = [None] * n\r\n index_in_group = [None] * n\r\n for i in range(n):\r\n if root[i] is not None:\r\n continue\r\n group[i] = []\r\n j = i\r\n for cnt in range(1, n + 1):\r\n index_in_group[j] = cnt - 1\r\n group[i].append(j)\r\n j = res[j]\r\n root[j] = i\r\n if j == i:\r\n break\r\n\r\n for i in range(n):\r\n g = group[root[i]]\r\n print(g[(index_in_group[i] + d) % len(g)] + 1)\r\n\r\n class ABC014:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.read().split())\r\n print((a + b - 1) // b * b - a)\r\n\r\n class ABC032:\r\n @staticmethod\r\n def a():\r\n a, b, n = map(int, sys.stdin.read().split())\r\n l = NumberTheory.lcm(a, b)\r\n print((n + l - 1) // l * l)\r\n\r\n @staticmethod\r\n def b():\r\n s, k = sys.stdin.read().split()\r\n k = int(k)\r\n res = set()\r\n for i in range(len(s) - k + 1):\r\n res.add(s[i : i + k])\r\n print(len(res))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *s = map(int, sys.stdin.read().split())\r\n if 0 in s:\r\n print(n)\r\n return\r\n s += [inf]\r\n res = 0\r\n l = r = 0\r\n tmp = 1\r\n while r <= n:\r\n tmp *= s[r]\r\n while tmp > k:\r\n res = max(res, r - l)\r\n tmp //= s[l]\r\n l += 1\r\n r += 1\r\n print(res)\r\n\r\n class ABC033:\r\n @staticmethod\r\n def a():\r\n n = set(sys.stdin.readline().rstrip())\r\n print(\"SAME\" if len(n) == 1 else \"DIFFERENT\")\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = dict()\r\n for _ in range(n):\r\n s, p = sys.stdin.readline().split()\r\n p = int(p)\r\n res[s] = p\r\n tot = sum(res.values())\r\n for s, p in res.items():\r\n if p > tot / 2:\r\n print(s)\r\n return\r\n print(\"atcoder\")\r\n\r\n @staticmethod\r\n def c():\r\n s = sys.stdin.readline().rstrip()\r\n res = sum(not \"0\" in f for f in s.split(\"+\"))\r\n print(res)\r\n\r\n class ABC034:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Better\" if y > x else \"Worse\")\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n + 1 if n & 1 else n - 1)\r\n\r\n @staticmethod\r\n def c():\r\n h, w = map(int, sys.stdin.read().split())\r\n combinatorics = Combinatorics(n=2 * 10**5, numpy=True, mod=MOD)\r\n print(combinatorics.mod_choose(h + w - 2, h - 1))\r\n\r\n @staticmethod\r\n def d():\r\n n, k, *wp = map(int, sys.stdin.read().split())\r\n w, p = np.array(wp).reshape(-1, 2).T\r\n\r\n def f(x):\r\n return np.sort(w * (p - x))[-k:].sum()\r\n\r\n print(optimize.bisect(f, 0, 100))\r\n\r\n class ABC035:\r\n @staticmethod\r\n def a():\r\n w, h = map(int, sys.stdin.readline().split())\r\n print(\"4:3\" if 4 * h == 3 * w else \"16:9\")\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n y = 0\r\n x = 0\r\n z = 0\r\n for c in s:\r\n if c == \"?\":\r\n z += 1\r\n elif c == \"L\":\r\n x -= 1\r\n elif c == \"R\":\r\n x += 1\r\n elif c == \"D\":\r\n y -= 1\r\n elif c == \"U\":\r\n y += 1\r\n d = abs(y) + abs(x)\r\n if t == \"1\":\r\n print(d + z)\r\n else:\r\n print(max(d - z, (d - z) & 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, q, *lr = map(int, sys.stdin.read().split())\r\n l, r = np.array(lr).reshape(q, 2).T\r\n res = np.zeros(n + 1, dtype=int)\r\n np.add.at(res, l - 1, 1)\r\n np.subtract.at(res, r, 1)\r\n np.cumsum(res, out=res)\r\n res = res & 1\r\n print(\"\".join(map(str, res[:-1])))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, t = map(int, sys.stdin.readline().split())\r\n point = np.array(sys.stdin.readline().split(), dtype=int)\r\n a, b, c = (\r\n np.array(sys.stdin.read().split(), dtype=np.int64)\r\n .reshape(m, 3)\r\n .T\r\n )\r\n a -= 1\r\n b -= 1\r\n d_1 = shortest_path(\r\n csr_matrix((c, (a, b)), (n, n)),\r\n method=\"D\",\r\n directed=True,\r\n indices=0,\r\n )\r\n d_2 = shortest_path(\r\n csr_matrix((c, (b, a)), (n, n)),\r\n method=\"D\",\r\n directed=True,\r\n indices=0,\r\n )\r\n print(int(np.amax((t - (d_1 + d_2)) * point)))\r\n\r\n class ABC036:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print((b + a - 1) // a)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n n = int(n)\r\n for j in range(n):\r\n row = \"\"\r\n for i in range(n - 1, -1, -1):\r\n row += s[i][j]\r\n print(row)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n b = [None] * n\r\n prev = None\r\n j = -1\r\n for i, x in sorted(enumerate(a), key=lambda x: x[1]):\r\n if x != prev:\r\n j += 1\r\n b[i] = j\r\n prev = x\r\n print(*b, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n edges = [[] for _ in range(n)]\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n edges[a].append(b)\r\n edges[b].append(a)\r\n parent = [None] * n\r\n\r\n def count(u):\r\n black, white = 1, 1\r\n for v in edges[u]:\r\n if v == parent[u]:\r\n continue\r\n parent[v] = u\r\n b, w = count(v)\r\n black *= w\r\n black %= MOD\r\n white *= (b + w) % MOD\r\n white %= MOD\r\n return black, white\r\n\r\n print(sum(count(0)) % MOD)\r\n\r\n class ABC037:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(c // min(a, b))\r\n\r\n @staticmethod\r\n def b():\r\n n, q, *lrt = map(int, sys.stdin.read().split())\r\n a = np.zeros(n, dtype=int)\r\n for l, r, t in zip(*[iter(lrt)] * 3):\r\n a[l - 1 : r] = t\r\n print(*a, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = np.array([0] + a)\r\n np.cumsum(a, out=a)\r\n s = (a[k:] - a[:-k]).sum()\r\n print(s)\r\n\r\n @staticmethod\r\n def d():\r\n h, w = map(int, sys.stdin.readline().split())\r\n a = [\r\n [int(x) for x in sys.stdin.readline().split()]\r\n for _ in range(h)\r\n ]\r\n dyx = [(-1, 0), (0, -1), (1, 0), (0, 1)]\r\n path = [[None] * w for _ in range(h)]\r\n\r\n def paths(i, j):\r\n if path[i][j]:\r\n return path[i][j]\r\n val = a[i][j]\r\n cnt = 1\r\n for dy, dx in dyx:\r\n y = i + dy\r\n x = j + dx\r\n if 0 <= y < h and 0 <= x < w and a[y][x] < val:\r\n cnt += paths(y, x)\r\n cnt %= MOD\r\n path[i][j] = cnt\r\n return cnt\r\n\r\n tot = 0\r\n for i in range(h):\r\n for j in range(w):\r\n tot += paths(i, j)\r\n tot %= MOD\r\n print(tot)\r\n\r\n class ABC038:\r\n @staticmethod\r\n def a():\r\n s = sys.stdin.readline().rstrip()\r\n print(\"YES\" if s[-1] == \"T\" else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c, d = map(int, sys.stdin.read().split())\r\n print(\"YES\" if a == c or b == c or a == d or b == d else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n cnt = n\r\n tmp = 1\r\n for i in range(n):\r\n if a[i + 1] > a[i]:\r\n tmp += 1\r\n else:\r\n cnt += tmp * (tmp - 1) // 2\r\n tmp = 1\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, *wh = map(int, sys.stdin.read().split())\r\n wh = sorted(zip(*[iter(wh)] * 2), key=lambda x: (-x[0], x[1]))\r\n w = [x[1] for x in wh][::-1]\r\n res = [inf] * n\r\n for x in w:\r\n res[bi_l(res, x)] = x\r\n print(bi_l(res, inf))\r\n\r\n class ABC039:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print((a * b + b * c + c * a) * 2)\r\n\r\n @staticmethod\r\n def b():\r\n x = int(sys.stdin.readline().rstrip())\r\n for n in range(1, int(x**0.5) + 1):\r\n if pow(n, 4) == x:\r\n print(n)\r\n return\r\n\r\n @staticmethod\r\n def c():\r\n board = \"WBWBWWBWBWBW\" * 3\r\n convert = \"Do, *, Re, *, Mi, Fa, *, So, *, La, *, Si\".split(\", \")\r\n s = sys.stdin.readline().rstrip()\r\n print(convert[board.index(s)])\r\n\r\n @staticmethod\r\n def d():\r\n h, w = map(int, sys.stdin.readline().split())\r\n s = sys.stdin.read().split()\r\n dyx = list(product((-1, 0, 1), repeat=2))\r\n black_certain = set()\r\n black_before = set()\r\n for i in range(h):\r\n for j in range(w):\r\n black_cand = set()\r\n for dy, dx in dyx:\r\n y = i + dy\r\n x = j + dx\r\n if y < 0 or y >= h or x < 0 or x >= w:\r\n continue\r\n if s[y][x] == \".\":\r\n break\r\n black_cand.add((y, x))\r\n else:\r\n black_before.add((i, j))\r\n black_certain |= black_cand\r\n for i in range(h):\r\n for j in range(w):\r\n if s[i][j] == \"#\" and not (i, j) in black_certain:\r\n print(\"impossible\")\r\n return\r\n print(\"possible\")\r\n for i in range(h):\r\n row = \"\"\r\n for j in range(w):\r\n row += \"#\" if (i, j) in black_before else \".\"\r\n print(\"\".join(row))\r\n\r\n class ABC040:\r\n @staticmethod\r\n def a():\r\n n, x = map(int, sys.stdin.readline().split())\r\n print(min(x - 1, n - x))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = inf\r\n for i in range(1, int(n**0.5) + 1):\r\n res = min(res, n // i - i + n % i)\r\n print(res)\r\n\r\n @staticmethod\r\n def c():\r\n n, *h = map(int, sys.stdin.read().split())\r\n h = [h[0]] + h\r\n cost = [None] * (n + 1)\r\n cost[0] = cost[1] = 0\r\n for i in range(2, n + 1):\r\n cost[i] = min(\r\n cost[i - 2] + abs(h[i] - h[i - 2]),\r\n cost[i - 1] + abs(h[i] - h[i - 1]),\r\n )\r\n print(cost[n])\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n uf = UnionFind(n=n)\r\n queue = []\r\n for _ in range(m):\r\n a, b, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2 * y), a - 1, b - 1))\r\n q = int(sys.stdin.readline().rstrip())\r\n for i in range(q):\r\n v, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2 * y + 1), v - 1, i))\r\n res = [None] * q\r\n while queue:\r\n y, i, j = heappop(queue)\r\n if y & 1:\r\n res[j] = uf.size[uf.find_root(i)]\r\n else:\r\n uf.unite(i, j)\r\n print(*res, sep=\"\\n\")\r\n\r\n class ABC041:\r\n @staticmethod\r\n def a():\r\n s, i = sys.stdin.read().split()\r\n i = int(i)\r\n print(s[i - 1])\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n ans = a * b % MOD * c % MOD\r\n print(ans)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n for i, h in sorted(enumerate(a), key=lambda x: -x[1]):\r\n print(i + 1)\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*[iter(xy)] * 2)\r\n edges = [0] * n\r\n for x, y in xy:\r\n x -= 1\r\n y -= 1\r\n edges[x] |= 1 << y\r\n comb = [None] * (1 << n)\r\n comb[0] = 1\r\n\r\n def count(edges, bit):\r\n if comb[bit] is not None:\r\n return comb[bit]\r\n comb[bit] = 0\r\n for i in range(n):\r\n if (bit >> i) & 1 and not edges[i]:\r\n nxt_bit = bit & ~(1 << i)\r\n nxt_edges = edges.copy()\r\n for j in range(n):\r\n nxt_edges[j] &= ~(1 << i)\r\n cnt = count(nxt_edges, nxt_bit)\r\n comb[bit] += cnt\r\n return comb[bit]\r\n\r\n print(count(edges, (1 << n) - 1))\r\n\r\n class ABC042:\r\n @staticmethod\r\n def a():\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n c = Counter(a)\r\n print(\"YES\" if c[5] == 2 and c[7] == 1 else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n n, l, *s = sys.stdin.read().split()\r\n print(\"\".join(sorted(s)))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *d = sys.stdin.read().split()\r\n l = len(n)\r\n ok = sorted(set(string.digits) - set(d))\r\n cand = [int(\"\".join(p)) for p in product(ok, repeat=l)] + [\r\n int(min(x for x in ok if x > \"0\") + min(ok) * l)\r\n ]\r\n print(cand[bi_l(cand, int(n))])\r\n\r\n @staticmethod\r\n def d():\r\n h, w, a, b = map(int, sys.stdin.read().split())\r\n combinatorics = Combinatorics(n=2 * 10**5, mod=MOD, numpy=True)\r\n tot = combinatorics.mod_choose(h + w - 2, h - 1)\r\n i = np.arange(h - a, h)\r\n ng = np.sum(\r\n combinatorics.mod_choose(i + b - 1, i)\r\n * combinatorics.mod_choose(h - i + w - b - 2, h - 1 - i)\r\n % MOD\r\n )\r\n tot -= ng\r\n tot %= MOD\r\n print(tot)\r\n\r\n class ABC043:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((1 + n) * n // 2)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n t = \"\"\r\n for c in s:\r\n if c == \"B\":\r\n t = t[:-1]\r\n else:\r\n t += c\r\n print(t)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n x = np.around(a.sum() / n).astype(int)\r\n print(np.sum((a - x) ** 2))\r\n\r\n @staticmethod\r\n def d():\r\n s = sys.stdin.readline().rstrip()\r\n n = len(s)\r\n for i in range(n - 1):\r\n if s[i] == s[i + 1]:\r\n print(i + 1, i + 2)\r\n return\r\n for i in range(n - 2):\r\n if s[i] == s[i + 2]:\r\n print(i + 1, i + 3)\r\n return\r\n print(-1, -1)\r\n\r\n class ABC170:\r\n @staticmethod\r\n def a():\r\n x = [int(x) for x in sys.stdin.readline().split()]\r\n for i in range(5):\r\n if x[i] != i + 1:\r\n print(i + 1)\r\n break\r\n\r\n @staticmethod\r\n def b():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Yes\" if 2 * x <= y <= 4 * x and y % 2 == 0 else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n x, n, *p = map(int, sys.stdin.read().split())\r\n a = list(set(range(102)) - set(p))\r\n a = [(abs(y - x), y) for y in a]\r\n print(sorted(a)[0][1])\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n cand = set(a)\r\n cnt = 0\r\n for x, c in sorted(Counter(a).items()):\r\n cnt += c == 1 and x in cand\r\n cand -= set(range(x * 2, 10**6 + 1, x))\r\n print(cnt)\r\n\r\n @staticmethod\r\n def e():\r\n n, q = map(int, sys.stdin.readline().split())\r\n queue = []\r\n m = 2 * 10**5\r\n infants = [[] for _ in range(m)]\r\n highest_rate = [None] * m\r\n where = [None] * n\r\n rate = [None] * n\r\n\r\n def entry(i, k):\r\n where[i] = k\r\n while infants[k]:\r\n r, j = heappop(infants[k])\r\n if where[j] != k or j == i:\r\n continue\r\n if rate[i] >= -r:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (r, j))\r\n break\r\n else:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (-rate[i], i))\r\n\r\n def transfer(i, k):\r\n now = where[i]\r\n while infants[now]:\r\n r, j = heappop(infants[now])\r\n if where[j] != now or j == i:\r\n continue\r\n if highest_rate[now] != -r:\r\n highest_rate[now] = -r\r\n heappush(queue, (-r, now, j))\r\n heappush(infants[now], (r, j))\r\n break\r\n else:\r\n highest_rate[now] = None\r\n entry(i, k)\r\n\r\n def inquire():\r\n while True:\r\n r, k, i = heappop(queue)\r\n if where[i] != k or r != highest_rate[k]:\r\n continue\r\n heappush(queue, (r, k, i))\r\n return r\r\n\r\n for i in range(n):\r\n a, b = map(int, sys.stdin.readline().split())\r\n rate[i] = a\r\n entry(i, b - 1)\r\n for _ in range(q):\r\n c, d = map(int, sys.stdin.readline().split())\r\n transfer(c - 1, d - 1)\r\n print(inquire())\r\n\r\n class ABC171:\r\n @staticmethod\r\n def a():\r\n c = sys.stdin.readline().rstrip()\r\n print(\"A\" if c < \"a\" else \"a\")\r\n\r\n @staticmethod\r\n def b():\r\n n, k, *p = map(int, sys.stdin.read().split())\r\n print(sum(sorted(p)[:k]))\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n n -= 1\r\n l = 1\r\n while True:\r\n if n < pow(26, l):\r\n break\r\n n -= pow(26, l)\r\n l += 1\r\n res = \"\".join(\r\n [chr(ord(\"a\") + d) for d in NumberTheory.base_convert(n, 26)][\r\n ::-1\r\n ]\r\n )\r\n res = \"a\" * (l - len(res)) + res\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n s = sum(a)\r\n cnt = Counter(a)\r\n q = int(sys.stdin.readline().rstrip())\r\n for _ in range(q):\r\n b, c = map(int, sys.stdin.readline().split())\r\n s += (c - b) * cnt[b]\r\n print(s)\r\n cnt[c] += cnt[b]\r\n cnt[b] = 0\r\n\r\n @staticmethod\r\n def e():\r\n n, *a = map(int, sys.stdin.read().split())\r\n s = 0\r\n for x in a:\r\n s ^= x\r\n b = map(lambda x: x ^ s, a)\r\n print(*b, sep=\" \")\r\n\r\n class ABC172:\r\n @staticmethod\r\n def a():\r\n a = int(sys.stdin.readline().rstrip())\r\n print(a * (1 + a + a**2))\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n print(sum(s[i] != t[i] for i in range(len(s))))\r\n\r\n @staticmethod\r\n def c():\r\n n, m, k = map(int, sys.stdin.readline().split())\r\n a = [0] + [int(x) for x in sys.stdin.readline().split()]\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n (*sa,) = accumulate(a)\r\n (*sb,) = accumulate(b)\r\n res = 0\r\n for i in range(n + 1):\r\n r = k - sa[i]\r\n if r < 0:\r\n break\r\n res = max(res, i + bi_r(sb, r))\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n f = np.zeros(n + 1, dtype=np.int64)\r\n for i in range(1, n + 1):\r\n f[i::i] += 1\r\n print((np.arange(1, n + 1) * f[1:]).sum())\r\n\r\n class ABC173:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n charge = (n + 999) // 1000 * 1000 - n\r\n print(charge)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n c = Counter(s)\r\n for v in \"AC, WA, TLE, RE\".split(\", \"):\r\n print(f\"{v} x {c[v]}\")\r\n\r\n @staticmethod\r\n def c():\r\n h, w, k = map(int, sys.stdin.readline().split())\r\n c = [sys.stdin.readline().rstrip() for _ in range(h)]\r\n tot = 0\r\n for i in range(1 << h):\r\n for j in range(1 << w):\r\n cnt = 0\r\n for y in range(h):\r\n for x in range(w):\r\n if i >> y & 1 or j >> x & 1:\r\n continue\r\n cnt += c[y][x] == \"#\"\r\n tot += cnt == k\r\n print(tot)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a.sort(reverse=True)\r\n res = (\r\n a[0]\r\n + sum(a[1 : 1 + (n - 2) // 2]) * 2\r\n + a[1 + (n - 2) // 2] * (n & 1)\r\n )\r\n print(res)\r\n\r\n @staticmethod\r\n def e():\r\n MOD = 10**9 + 7\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n minus = [x for x in a if x < 0]\r\n plus = [x for x in a if x > 0]\r\n if len(plus) + len(minus) // 2 * 2 >= k: # plus\r\n (*minus,) = map(abs, minus)\r\n minus.sort(reverse=True)\r\n plus.sort(reverse=True)\r\n cand = []\r\n if len(minus) & 1:\r\n minus = minus[:-1]\r\n for i in range(0, len(minus) - 1, 2):\r\n cand.append(minus[i] * minus[i + 1] % MOD)\r\n if k & 1:\r\n res = plus[0]\r\n plus = plus[1:]\r\n else:\r\n res = 1\r\n if len(plus) & 1:\r\n plus = plus[:-1]\r\n for i in range(0, len(plus) - 1, 2):\r\n cand.append(plus[i] * plus[i + 1] % MOD)\r\n cand.sort(reverse=True)\r\n for x in cand[: k // 2]:\r\n res *= x\r\n res %= MOD\r\n print(res)\r\n elif 0 in a:\r\n print(0)\r\n else:\r\n cand = sorted(map(abs, a))\r\n res = 1\r\n for i in range(k):\r\n res *= cand[i]\r\n res %= MOD\r\n res = MOD - res\r\n print(res)\r\n pass\r\n\r\n class ABC174:\r\n @staticmethod\r\n def a():\r\n print(\"Yes\" if int(sys.stdin.readline().rstrip()) >= 30 else \"No\")\r\n\r\n class ACL001:\r\n @staticmethod\r\n def a():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*[iter(xy)] * 2)\r\n print(xy)\r\n pass\r\n\r\n class MSolutions2020:\r\n @staticmethod\r\n def a():\r\n x = int(sys.stdin.readline().rstrip())\r\n x -= 400\r\n print(8 - x // 200)\r\n\r\n @staticmethod\r\n def b():\r\n r, g, b, k = map(int, sys.stdin.read().split())\r\n while k and g <= r:\r\n g *= 2\r\n k -= 1\r\n while k and b <= g:\r\n b *= 2\r\n k -= 1\r\n print(\"Yes\" if r < g < b else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n for i in range(k, n):\r\n print(\"Yes\" if a[i] > a[i - k] else \"No\")\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n m = 1000\r\n s = 0\r\n for i in range(n):\r\n if a[i + 1] == a[i]:\r\n continue\r\n elif a[i + 1] > a[i]:\r\n cnt = m // a[i]\r\n m -= a[i] * cnt\r\n s += cnt\r\n else:\r\n m += a[i] * s\r\n s = 0\r\n print(m)\r\n\r\n\r\nclass Codeforces:\r\n pass\r\n\r\n\r\nclass ProjectEuler:\r\n @staticmethod\r\n def p1():\r\n def f(n, x):\r\n return (x + n // x * x) * (n // x) // 2\r\n\r\n n = 1000\r\n ans = f(n - 1, 3) + f(n - 1, 5) - f(n - 1, 15)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p2():\r\n fib = [1, 2]\r\n while fib[-1] < 4 * 10**6:\r\n fib.append(fib[-1] + fib[-2])\r\n print(sum(fib[1:-1:3]))\r\n\r\n @staticmethod\r\n def p3():\r\n number_theory = NumberTheory()\r\n res = number_theory.prime_factorize(600851475143)\r\n print(max(res.keys()))\r\n\r\n @staticmethod\r\n def p4():\r\n def is_palindrome(n):\r\n n = str(n)\r\n return n == n[::-1]\r\n\r\n cand = []\r\n for a in range(100, 1000):\r\n for b in range(a, 1000):\r\n n = a * b\r\n if is_palindrome(n):\r\n cand.append(n)\r\n print(max(cand))\r\n\r\n @staticmethod\r\n def p5():\r\n number_theory = NumberTheory()\r\n res = defaultdict(int)\r\n for i in range(1, 21):\r\n for p, c in number_theory.prime_factorize(i).items():\r\n res[p] = max(res[p], c)\r\n ans = 1\r\n for p, c in res.items():\r\n ans *= pow(p, c)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p6():\r\n a = np.arange(101)\r\n b = np.cumsum(a**2)\r\n a = a.cumsum()\r\n print(a[100] ** 2 - b[100])\r\n\r\n @staticmethod\r\n def p7():\r\n number_theory = NumberTheory()\r\n print(sorted(number_theory.prime_numbers)[10000])\r\n\r\n @staticmethod\r\n def p8():\r\n n = \"7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450\"\r\n n = [int(d) for d in list(n)]\r\n res = 0\r\n for i in range(988):\r\n x = 1\r\n for j in range(13):\r\n x *= n[i + j]\r\n res = max(res, x)\r\n print(res)\r\n\r\n @staticmethod\r\n def p9():\r\n for a in range(1, 997):\r\n for b in range(a, 998 - a):\r\n c = 1000 - a - b\r\n if a**2 + b**2 == c**2:\r\n print(a * b * c)\r\n return\r\n\r\n @staticmethod\r\n def p10():\r\n number_theory = NumberTheory(2 * 10**6 - 1)\r\n print(sum(number_theory.prime_numbers))\r\n\r\n @staticmethod\r\n def p11():\r\n grid = \"08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48\"\r\n # grid = np.array(grid.split(), dtype=np.int64).reshape(20, -1)\r\n # cand = []\r\n # for i in range(20):\r\n # bl1 = i+3 < 20\r\n # for j in range(20):\r\n # bl2 = j+3 < 20\r\n # if bl1:\r\n # np.prod\r\n # tmp = 1\r\n # for d in range(4):\r\n # tmp *= grid[i+d, j]\r\n print(grid)\r\n\r\n pass\r\n\r\n\r\nclass Yukicoder:\r\n pass\r\n\r\n\r\nif __name__ == \"__main__\":\r\n AtCoder.ABC014.a()\r\n", "import sys\r\nimport typing\r\n\r\nimport numba as nb\r\nimport numpy as np\r\n\r\n\r\[email protected]\r\ndef seg_build(n: int) -> typing.NoReturn:\r\n return np.zeros(n * 2, np.int64)\r\n\r\n\r\[email protected]\r\ndef seg_build_from_array(a: np.ndarray) -> np.ndarray:\r\n n = len(a)\r\n seg = np.empty(n * 2, np.int64)\r\n seg[n:] = a\r\n for i in range(n - 1, 0, -1):\r\n seg[i] = seg[i << 1] ^ seg[i << 1 | 1]\r\n return seg\r\n\r\n\r\[email protected]\r\ndef seg_set(\r\n seg: np.ndarray,\r\n i: int,\r\n x: int,\r\n) -> typing.NoReturn:\r\n n = len(seg) >> 1\r\n i += n\r\n seg[i] = x\r\n while i > 1:\r\n i >>= 1\r\n seg[i] = seg[i << 1] ^ seg[i << 1 | 1]\r\n\r\n\r\[email protected]\r\ndef seg_get(seg: np.ndarray, i: int) -> int:\r\n n = len(seg) >> 1\r\n return seg[i + n]\r\n\r\n\r\[email protected]\r\ndef seg_get_range(seg: np.ndarray, l: int, r: int) -> int:\r\n n = len(seg) >> 1\r\n l, r = l + n, r + n\r\n v = 0\r\n while l < r:\r\n if l & 1:\r\n v ^= seg[l]\r\n l += 1\r\n if r & 1:\r\n r -= 1\r\n v ^= seg[r]\r\n l, r = l >> 1, r >> 1\r\n return v\r\n\r\n\r\[email protected]((nb.i8[:], nb.i8[:, :]), cache=True)\r\ndef solve(a: np.ndarray, txy: np.ndarray) -> typing.NoReturn:\r\n n, q = len(a), len(txy)\r\n seg = seg_build_from_array(a)\r\n for i in range(q):\r\n t, x, y = txy[i]\r\n if t == 1:\r\n x -= 1\r\n v = seg_get(seg, x)\r\n seg_set(seg, x, v ^ y)\r\n else:\r\n v = seg_get_range(seg, x - 1, y)\r\n print(v)\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n n, q = map(int, input().split())\r\n a = np.array(\r\n sys.stdin.readline().split(),\r\n dtype=np.int64,\r\n )\r\n txy = np.array(\r\n sys.stdin.read().split(),\r\n dtype=np.int64,\r\n ).reshape(q, 3)\r\n\r\n solve(a, txy)\r\n\r\n\r\nmain()\n", "import sys\r\nimport typing\r\n\r\nimport numba as nb\r\nimport numpy as np\r\n\r\n\r\[email protected]((nb.i8[:, :], ), cache=True)\r\ndef solve(dc: np.ndarray) -> typing.NoReturn:\r\n m = len(dc)\r\n d, c = dc[:, 0], dc[:, 1]\r\n s = np.sum(d * c)\r\n l = c.sum()\r\n a = l * 9 + s\r\n q, r = divmod(a, 9)\r\n q -= 1\r\n print(q)\r\n\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n m = int(input())\r\n dc = np.array(\r\n sys.stdin.read().split(),\r\n dtype=np.int64,\r\n ).reshape(m, 2)\r\n solve(dc)\r\n\r\n\r\nmain()\n", "import itertools\r\nimport math\r\nimport string\r\nimport sys\r\nfrom bisect import bisect_left as bi_l\r\nfrom bisect import bisect_right as bi_r\r\nfrom collections import Counter, defaultdict, deque\r\nfrom functools import lru_cache, reduce\r\nfrom heapq import heapify, heappop, heappush\r\nfrom operator import or_, xor\r\n\r\nsys.setrecursionlimit(10**7)\r\ninf = float(\"inf\")\r\nMOD = 10**9 + 7\r\n# MOD = 998244353\r\n\r\n\r\nusing_numpy = 1\r\nimport networkx as nx\r\nimport numpy as np\r\nfrom numba import i8, njit\r\nfrom scipy import optimize\r\nfrom scipy.ndimage import distance_transform_cdt\r\nfrom scipy.sparse import csr_matrix\r\nfrom scipy.sparse.csgraph import (\r\n connected_components,\r\n csgraph_to_dense,\r\n maximum_flow,\r\n minimum_spanning_tree,\r\n shortest_path,\r\n)\r\nfrom scipy.spatial import ConvexHull\r\nfrom scipy.special import comb\r\n\r\n\r\nclass Algebra:\r\n class Modular(int):\r\n def __init__(self, n, mod=MOD):\r\n self.value = n\r\n self.mod = mod\r\n\r\n def __str__(self):\r\n return f\"{self.value}\"\r\n\r\n def __add__(self, other):\r\n return self.__class__((self.value + other.value) % self.mod)\r\n\r\n def __sub__(self, x):\r\n return self.__class__((self.value - x.value) % self.mod)\r\n\r\n def __mul__(self, x):\r\n return self.__class__((self.value * x.value) % self.mod)\r\n\r\n def __pow__(self, x):\r\n return self.__class__(pow(self.value, x.value, self.mod))\r\n\r\n def __lt__(self, x):\r\n return self.value < x.value\r\n\r\n def __le__(self, x):\r\n return self.value <= x.value\r\n\r\n def __eq__(self, x):\r\n return self.value == x.value\r\n\r\n def __ne__(self, x):\r\n return self.value != x.value\r\n\r\n def __gt__(self, x):\r\n return self.value > x.value\r\n\r\n def __ge__(self, x):\r\n return self.value >= x.value\r\n\r\n class SemiGroup:\r\n pass\r\n\r\n class Monoid:\r\n pass\r\n\r\n class Group:\r\n pass\r\n\r\n class SemiRing:\r\n pass\r\n\r\n class Ring:\r\n pass\r\n\r\n @staticmethod\r\n def identity(n):\r\n if using_numpy:\r\n return np.identity(n, dtype=np.int64)\r\n else:\r\n a = [[0] * n for _ in range(n)]\r\n for i in range(n):\r\n a[i][i] = 1\r\n return a\r\n\r\n @staticmethod\r\n def dot(a, b):\r\n if using_numpy:\r\n return np.dot(a, b)\r\n else:\r\n h, w, l = len(a), len(b[0]), len(b)\r\n assert len(a[0]) == l\r\n c = [[0] * w for _ in range(h)]\r\n for i in range(h):\r\n for j in range(w):\r\n for k in range(l):\r\n c[i][j] += a[i][k] * b[k][j]\r\n return c\r\n\r\n @classmethod\r\n def matrix_pow(cls, a, n, mod=10**9 + 7):\r\n m = len(a)\r\n b = cls.identity(m)\r\n while n:\r\n if n & 1:\r\n b = cls.dot(b, a)\r\n n >>= 1\r\n a = cls.dot(a, a)\r\n if using_numpy:\r\n a %= mod\r\n b %= mod\r\n else:\r\n for i in range(m):\r\n for j in range(m):\r\n a[i][j] %= mod\r\n b[i][j] %= mod\r\n return b\r\n\r\n @staticmethod\r\n def bitwise_dot(a, b):\r\n if using_numpy:\r\n return np.bitwise_xor.reduce(\r\n a[:, None, :] & b.T[None, :, :], axis=-1\r\n )\r\n else:\r\n h, w, l = len(a), len(b[0]), len(b)\r\n assert len(a[0]) == l\r\n c = [[0] * w for _ in range(h)]\r\n for i in range(h):\r\n for j in range(w):\r\n for k in range(l):\r\n c[i][j] ^= a[i][k] & b[k][j]\r\n return c\r\n\r\n @classmethod\r\n def bitwise_mat_pow(cls, a, n):\r\n if n == 0:\r\n return np.eye(len(a), dtype=np.uint32) * ((1 << 32) - 1)\r\n res = cls.bitwise_mat_pow(a, n // 2)\r\n res = cls.bitwise_dot(res, res)\r\n return cls.bitwise_dot(res, a) if n & 1 else res\r\n\r\n @staticmethod\r\n def cumprod(a, mod):\r\n l = len(a)\r\n sql = int(np.sqrt(l) + 1)\r\n a = np.resize(a, sql**2).reshape(sql, sql)\r\n for i in range(sql - 1):\r\n a[:, i + 1] *= a[:, i]\r\n a[:, i + 1] %= mod\r\n for i in range(sql - 1):\r\n a[i + 1] *= a[i, -1]\r\n a[i + 1] %= mod\r\n return np.ravel(a)[:l]\r\n\r\n @classmethod\r\n def generate_fac_ifac(cls, n, p=MOD):\r\n if using_numpy:\r\n fac = np.arange(n + 1)\r\n fac[0] = 1\r\n fac = cls.cumprod(fac, p)\r\n ifac = np.arange(n + 1, 0, -1)\r\n ifac[0] = pow(int(fac[-1]), p - 2, p)\r\n ifac = cls.cumprod(ifac, p)[n::-1]\r\n else:\r\n fac = [None] * (n + 1)\r\n fac[0] = 1\r\n for i in range(n):\r\n fac[i + 1] = fac[i] * (i + 1) % p\r\n ifac = [None] * (n + 1)\r\n ifac[n] = pow(fac[n], p - 2, p)\r\n for i in range(n, 0, -1):\r\n ifac[i - 1] = ifac[i] * i % p\r\n return fac, ifac\r\n\r\n class Kitamasa:\r\n pass\r\n\r\n\r\nmint = Algebra.Modular\r\n\r\n\r\nclass NumberTheory:\r\n class PrimeNumbers: # pn\r\n def __init__(self, n=2 * 10**6):\r\n self.is_prime, self.prime_nums = self.find(n)\r\n\r\n def __call__(self, n):\r\n return self.is_prime[n]\r\n\r\n def __iter__(self):\r\n return iter(self.prime_nums)\r\n\r\n def __getitem__(self, key):\r\n return self.prime_nums[key]\r\n\r\n @staticmethod\r\n def find(n): # Sieve of eratosthenes\r\n if using_numpy:\r\n is_prime = np.ones(n + 1, dtype=np.bool)\r\n is_prime[:2] = 0\r\n for i in range(2, int(n**0.5) + 1):\r\n if is_prime[i]:\r\n is_prime[i * 2 :: i] = 0\r\n prime_nums = np.flatnonzero(is_prime)\r\n else:\r\n is_prime = [True] * (n + 1)\r\n is_prime[0] = is_prime[1] = 0\r\n for i in range(2, int(n**0.5) + 1):\r\n if not is_prime[i]:\r\n continue\r\n for j in range(i * 2, n + 1, i):\r\n is_prime[j] = 0\r\n prime_nums = [i for i in range(2, n + 1) if is_prime[i]]\r\n return is_prime, prime_nums\r\n\r\n @lru_cache(maxsize=None)\r\n def factorize(self, n):\r\n res = defaultdict(int)\r\n if n < 2:\r\n return res\r\n for p in self:\r\n if p * p > n:\r\n break\r\n while n % p == 0:\r\n res[p] += 1\r\n n //= p\r\n if n == 1:\r\n return res\r\n res[n] = 1\r\n return res\r\n\r\n def factorize_factorial(self, n):\r\n res = defaultdict(int)\r\n for i in range(2, n + 1):\r\n for p, c in self.factorize(i).items():\r\n res[p] += c\r\n return res\r\n\r\n @classmethod\r\n @lru_cache(maxsize=None)\r\n def gcd(cls, a, b):\r\n return cls.gcd(b, a % b) if b else abs(a)\r\n\r\n @classmethod\r\n def lcm(cls, a, b):\r\n return abs(a // cls.gcd(a, b) * b)\r\n\r\n @staticmethod\r\n def find_divisors(n):\r\n divisors = []\r\n for i in range(1, int(n**0.5) + 1):\r\n if n % i:\r\n continue\r\n divisors.append(i)\r\n j = n // i\r\n if j != i:\r\n divisors.append(j)\r\n return sorted(divisors)\r\n\r\n @staticmethod\r\n def base_convert(n, b):\r\n if not n:\r\n return [0]\r\n res = []\r\n while n:\r\n n, r = divmod(n, b)\r\n if r < 0:\r\n n += 1\r\n r -= b\r\n res.append(r)\r\n return res\r\n\r\n\r\nclass Combinatorics:\r\n @classmethod\r\n @lru_cache(maxsize=None)\r\n def choose(cls, n, r, mod=None):\r\n if r > n or r < 0:\r\n return 0\r\n if r == 0:\r\n return 1\r\n res = cls.choose(n - 1, r, mod) + cls.choose(n - 1, r - 1, mod)\r\n if mod:\r\n res %= mod\r\n return res\r\n\r\n class CombinationsMod:\r\n def __init__(self, n=2 * 10**6, mod=MOD):\r\n self.__mod = mod\r\n self.__fac, self.__ifac = Algebra.generate_fac_ifac(n, mod)\r\n\r\n def __call__(self, n, r):\r\n return self.__choose(n, r)\r\n\r\n def __choose(self, n, r):\r\n bl = (0 <= r) & (r <= n)\r\n p = self.__mod\r\n return (\r\n bl\r\n * self.__fac[n]\r\n * self.__ifac[r]\r\n % p\r\n * self.__ifac[n - r]\r\n % p\r\n )\r\n\r\n def make_nchoose_table(self, n):\r\n p = self.__mod\r\n r = len(self.__fac) - 1\r\n if using_numpy:\r\n n_choose = np.arange(n + 1, n - r, -1)\r\n n_choose[0] = 1\r\n n_choose = Algebra.cumprod(n_choose, p) * self.__ifac % p\r\n else:\r\n n_choose = [None] * (r + 1)\r\n n_choose[0] = 1\r\n for i in range(r):\r\n n_choose[i + 1] = n_choose[i] * (n - i) % p\r\n for i in range(1, r + 1):\r\n n_choose[i] = n_choose[i] * self.__ifac[i] % p\r\n return n_choose\r\n\r\n @classmethod\r\n def permutations(cls, a, r=None, i=0):\r\n a = list(a)\r\n n = len(a)\r\n if r is None:\r\n r = n\r\n res = []\r\n if r > n or i > r:\r\n return res\r\n if i == r:\r\n return [tuple(a[:r])]\r\n for j in range(i, n):\r\n a[i], a[j] = a[j], a[i]\r\n res += cls.permutations(a, r, i + 1)\r\n return res\r\n\r\n @staticmethod\r\n def combinations(a, r):\r\n a = tuple(a)\r\n n = len(a)\r\n if r > n:\r\n return\r\n indices = list(range(r))\r\n yield a[:r]\r\n while True:\r\n for i in range(r - 1, -1, -1):\r\n if indices[i] != i + n - r:\r\n break\r\n else:\r\n return\r\n indices[i] += 1\r\n for j in range(i + 1, r):\r\n indices[j] = indices[j - 1] + 1\r\n yield tuple(a[i] for i in indices)\r\n\r\n\r\nclass DP:\r\n @staticmethod\r\n def LIS(a):\r\n res = [inf] * len(a)\r\n for x in a:\r\n res[bi_l(res, x)] = x\r\n return res\r\n\r\n\r\nclass String:\r\n @staticmethod\r\n def z_algorithm(s):\r\n n = len(s)\r\n a = [0] * n\r\n a[0] = n\r\n l = r = -1\r\n for i in range(1, n):\r\n if r >= i:\r\n a[i] = min(a[i - l], r - i)\r\n while i + a[i] < n and s[i + a[i]] == s[a[i]]:\r\n a[i] += 1\r\n if i + a[i] >= r:\r\n l, r = i, i + a[i]\r\n return a\r\n\r\n\r\nclass GeometryTopology:\r\n class Graph:\r\n class __Edge:\r\n def __init__(self, weight=1, capacity=1, **args):\r\n self.weight = weight\r\n self.capacity = capacity\r\n\r\n class __Node:\r\n def __init__(self, **args):\r\n pass\r\n\r\n def __init__(self, n=0):\r\n self.__N = n\r\n self.nodes = [None] * n\r\n self.edges = [{} for _ in range(n)]\r\n\r\n def add_node_info(self, v, **args):\r\n self.nodes[v] = self.__Node(**args)\r\n\r\n def add_edge(self, u, v, **args):\r\n self.edges[u][v] = self.__Edge(**args)\r\n\r\n def get_size(self):\r\n return self.__N\r\n\r\n def bfs(self, src=0):\r\n n = self.__N\r\n self.depth = self.lv = lv = [None] * n\r\n lv[src] = 0 # depth in tree, or level in general graph.\r\n self.dist = dist = [inf] * n\r\n dist[src] = 0 # dist for only tree.\r\n self.parent = par = [None] * n\r\n par[src] = src\r\n q = deque([src])\r\n while q:\r\n u = q.popleft()\r\n for v, e in self.edges[u].items():\r\n if e.capacity == 0 or lv[v] is not None:\r\n continue\r\n lv[v], dist[v], par[v] = lv[u] + 1, dist[u] + e.weight, u\r\n q.append(v)\r\n return dist\r\n\r\n def dinic(self, src, sink):\r\n def flow_to_sink(u, flow_in):\r\n if u == sink:\r\n return flow_in\r\n flow = 0\r\n for v, e in self.edges[u].items():\r\n if e.capacity == 0 or self.lv[v] <= self.lv[u]:\r\n continue\r\n f = flow_to_sink(v, min(flow_in, e.capacity))\r\n if not f:\r\n continue\r\n self.edges[u][v].capacity -= f\r\n if u in self.edges[v]:\r\n self.edges[v][u].capacity += f\r\n else:\r\n self.add_edge(v, u, capacity=f)\r\n flow_in -= f\r\n flow += f\r\n return flow\r\n\r\n flow = 0\r\n while True:\r\n self.bfs(src)\r\n if self.lv[sink] is None:\r\n return flow\r\n flow += flow_to_sink(src, inf)\r\n\r\n def ford_fulkerson(self):\r\n pass\r\n\r\n def push_relabel(self):\r\n pass\r\n\r\n def floyd_warshall(self):\r\n n = self.__N\r\n d = [[inf] * n for _ in range(n)]\r\n for u in range(n):\r\n d[u][u] = 0\r\n for v, e in self.edges[u].items():\r\n d[u][v] = e.weight\r\n for w in range(n):\r\n for u in range(n):\r\n for v in range(n):\r\n d[u][v] = min(d[u][v], d[u][w] + d[w][v])\r\n return d\r\n\r\n def dijkstra(self, src, paths_cnt=False, mod=None):\r\n dist = [inf] * self.__N\r\n dist[src] = 0\r\n visited = [False] * self.__N\r\n paths = [0] * self.__N\r\n paths[src] = 1\r\n q = [(0, src)]\r\n while q:\r\n d, u = heappop(q)\r\n if visited[u]:\r\n continue\r\n visited[u] = True\r\n for v, e in self.edges[u].items():\r\n dv = d + e.weight\r\n if dv > dist[v]:\r\n continue\r\n elif dv == dist[v]:\r\n paths[v] += paths[u]\r\n if mod:\r\n paths[v] %= mod\r\n continue\r\n paths[v], dist[v] = paths[u], dv\r\n heappush(q, (dv, v))\r\n if paths_cnt:\r\n return dist, paths\r\n else:\r\n return dist\r\n\r\n def astar(self, src, tgt, heuristic_func):\r\n cost = [inf] * self.__N\r\n q = [(heuristic_func(src, tgt), 0, src)]\r\n while q:\r\n _, c, u = heappop(q)\r\n if u == tgt:\r\n return c\r\n if cost[u] != inf:\r\n continue\r\n cost[u] = c\r\n for v, e in self.edges[u].items():\r\n if cost[v] != inf:\r\n continue\r\n h = heuristic_func(v, tgt)\r\n nc = c + e.weight\r\n heappush(q, (h + nc, nc, v))\r\n return inf\r\n\r\n def bellman_ford(self, src):\r\n n = self.__N\r\n d = [inf] * n\r\n d[src] = 0\r\n for _ in range(n - 1):\r\n for u in range(n):\r\n for v, e in self.edges[u].items():\r\n d[v] = min(d[v], d[u] + e.weight)\r\n\r\n for u in range(n):\r\n for v, e in self.edges[u].items():\r\n if d[u] + e.weight < d[v]:\r\n raise Exception(\"found negative cycle.\")\r\n\r\n return d\r\n\r\n def find_ancestors(self): # tree doubling.\r\n self.__ancestors = ancestors = [self.parent]\r\n for _ in range(max(self.depth).bit_length()):\r\n ancestors.append([ancestors[-1][u] for u in ancestors[-1]])\r\n\r\n def find_dist(self, u, v):\r\n return (\r\n self.dist[u]\r\n + self.dist[v]\r\n - 2 * self.dist[self.__find_lca(u, v)]\r\n )\r\n\r\n def __find_lca(self, u, v):\r\n du, dv = self.depth[u], self.depth[v]\r\n if du > dv:\r\n u, v = v, u\r\n du, dv = dv, du\r\n\r\n d = dv - du\r\n for i in range(d.bit_length()): # up-stream\r\n if d >> i & 1:\r\n v = self.__ancestors[i][v]\r\n if v == u:\r\n return v\r\n\r\n for i in range(\r\n du.bit_length() - 1, -1, -1\r\n ): # find direct child of LCA.\r\n nu, nv = self.__ancestors[i][u], self.__ancestors[i][v]\r\n if nu == nv:\r\n continue\r\n u, v = nu, nv\r\n\r\n return self.__ancestors[0][u]\r\n\r\n def init_dsu(self): # disjoint set union (union-find)\r\n n = self.__N\r\n self.parent = list(range(n))\r\n self.rank = [0] * n\r\n self.size = [1] * n\r\n\r\n def find(self, u):\r\n if self.parent[u] == u:\r\n return u\r\n self.parent[u] = self.find(self.parent[u])\r\n return self.parent[u]\r\n\r\n def unite(self, u, v):\r\n u, v = self.find(u), self.find(v)\r\n if u == v:\r\n return\r\n if self.rank[u] < self.rank[v]:\r\n u, v = v, u\r\n self.parent[v] = u\r\n self.size[u] += self.size[v]\r\n self.rank[u] = max(self.rank[u], self.rank[v] + 1)\r\n\r\n def same(self, u, v):\r\n return self.find(u) == self.find(v)\r\n\r\n def scc(self): # strongly connected components\r\n n = self.__N\r\n visited, q, root, r = [False] * n, [], [None] * n, 0\r\n gg = self.__class__(n)\r\n for u in range(n):\r\n for v in self.edges[u]:\r\n gg.add_edge(v, u)\r\n\r\n def dfs(u):\r\n if visited[u]:\r\n return\r\n visited[u] = True\r\n for v in self.edges[u]:\r\n dfs(v)\r\n q.append(u)\r\n\r\n def rev_dfs(u, r):\r\n if root[u] is not None:\r\n return\r\n root[u] = r\r\n for v in gg.edges[u]:\r\n rev_dfs(v, r)\r\n\r\n for u in range(n):\r\n dfs(u)\r\n for u in q[::-1]:\r\n rev_dfs(u, r)\r\n r += 1\r\n return root\r\n\r\n def kruskal(self): # minimum spanning tree\r\n n = self.__N\r\n uf = self.__class__(n)\r\n uf.init_dsu()\r\n edges = sorted(\r\n [\r\n (u, v, e.weight)\r\n for u in range(n)\r\n for v, e in self.edges[u].items()\r\n ],\r\n key=lambda x: x[2],\r\n )\r\n g = self.__class__(n)\r\n d = 0\r\n for u, v, w in edges:\r\n if uf.same(u, v):\r\n continue\r\n uf.unite(u, v)\r\n g.add_edge(u, v, weight=w)\r\n d += w\r\n return g, d\r\n\r\n def prim(self, src=0, return_parent=False): # minimum spanning tree\r\n n = self.__N\r\n g = self.__class__(n)\r\n parent, visited, dist = [None] * n, [False] * n, 0\r\n q = [(0, (src, src))]\r\n while q:\r\n d, (w, u) = heappop(q)\r\n if visited[u]:\r\n continue\r\n visited[u], parent[u] = True, w\r\n dist += d\r\n g.add_edge(w, u, weight=d)\r\n for v, e in self.edges[u].items():\r\n if not visited[v]:\r\n heappush(q, (e.weight, (u, v)))\r\n if return_parent:\r\n return g, dist, parent\r\n return g, dist\r\n\r\n def boruvka(self): # minimum spanning tree\r\n n = self.__N\r\n uf = self.__class__(n)\r\n uf.init_dsu()\r\n g = self.__class__(n)\r\n d = 0\r\n\r\n def dfs(u):\r\n if visited[u]:\r\n return (inf, (None, None))\r\n visited[u] = True\r\n cand = []\r\n for v, e in self.edges[u].items():\r\n if uf.same(u, v):\r\n cand.append(dfs(v))\r\n continue\r\n cand.append((e.weight, (u, v)))\r\n return sorted(cand)[0]\r\n\r\n while len(set(uf.parent)) != 1:\r\n edges, visited = [], [False] * n\r\n for u in range(n):\r\n if visited[u]:\r\n continue\r\n edges.append(dfs(u))\r\n for w, (u, v) in edges:\r\n if uf.same(u, v):\r\n continue\r\n g.add_edge(u, v, weight=w)\r\n uf.unite(u, v)\r\n d += w\r\n for u in range(n):\r\n uf.find(u)\r\n\r\n return g, d\r\n\r\n def tsp(self): # traveling salesperson problem\r\n pass\r\n\r\n @staticmethod\r\n def triangle_area(p0, p1, p2, signed=False):\r\n x1, y1, x2, y2 = (\r\n p1[0] - p0[0],\r\n p1[1] - p0[1],\r\n p2[0] - p0[0],\r\n p2[1] - p0[1],\r\n )\r\n return (\r\n (x1 * y2 - x2 * y1) / 2 if signed else abs(x1 * y2 - x2 * y1) / 2\r\n )\r\n\r\n @classmethod\r\n def intersect(cls, seg1, seg2):\r\n (p1, p2), (p3, p4) = seg1, seg2\r\n t1 = cls.triangle_area(p1, p2, p3, signed=True)\r\n t2 = cls.triangle_area(p1, p2, p4, signed=True)\r\n t3 = cls.triangle_area(p3, p4, p1, signed=True)\r\n t4 = cls.triangle_area(p3, p4, p2, signed=True)\r\n return (t1 * t2 < 0) & (t3 * t4 < 0)\r\n\r\n\r\ndef cumxor(a):\r\n return reduce(xor, a, 0)\r\n\r\n\r\ndef cumor(a):\r\n return reduce(or_, a, 0)\r\n\r\n\r\ndef bit_count(n):\r\n cnt = 0\r\n while n:\r\n cnt += n & 1\r\n n >>= 1\r\n return cnt\r\n\r\n\r\nclass AtCoder:\r\n class ABC001:\r\n @staticmethod\r\n def a():\r\n h1, h2 = map(int, sys.stdin.read().split())\r\n print(h1 - h2)\r\n\r\n @staticmethod\r\n def d():\r\n def to_minuites(x):\r\n q, r = divmod(x, 100)\r\n return 60 * q + r\r\n\r\n def to_hmform(x):\r\n q, r = divmod(x, 60)\r\n return 100 * q + r\r\n\r\n n = int(sys.stdin.readline().rstrip())\r\n term = [0] * 2001\r\n for _ in range(n):\r\n s, e = map(\r\n to_minuites,\r\n map(int, sys.stdin.readline().rstrip().split(\"-\")),\r\n )\r\n s = s // 5 * 5\r\n e = (e + 4) // 5 * 5\r\n term[s] += 1\r\n term[e + 1] -= 1\r\n for i in range(2000):\r\n term[i + 1] += term[i]\r\n\r\n res = []\r\n raining = False\r\n for i in range(2001):\r\n if term[i]:\r\n if not raining:\r\n s = i\r\n raining = True\r\n elif raining:\r\n res.append((s, i - 1))\r\n raining = False\r\n for s, e in res:\r\n print(f\"{to_hmform(s):04}-{to_hmform(e):04}\")\r\n\r\n class ABC002:\r\n @staticmethod\r\n def a():\r\n print(max(map(int, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n vowels = set(\"aeiou\")\r\n print(\r\n \"\".join(\r\n [\r\n c\r\n for c in sys.stdin.readline().rstrip()\r\n if c not in vowels\r\n ]\r\n )\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n print(\r\n GeometryTopology.triangle_area(\r\n *map(int, sys.stdin.readline().split())\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n edges = set(\r\n (x - 1, y - 1)\r\n for x, y in zip(*[map(int, sys.stdin.read().split())] * 2)\r\n )\r\n print(\r\n max(\r\n len(s)\r\n for i in range(1, 1 << n)\r\n for s in [[j for j in range(n) if i >> j & 1]]\r\n if all(\r\n (x, y) in edges\r\n for x, y in itertools.combinations(s, 2)\r\n )\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m = map(int, sys.stdin.readline().split())\r\n relations = [1 << i for i in range(n)]\r\n for x, y in zip(*[map(int, sys.stdin.read().split())] * 2):\r\n relations[x] |= 1 << (y - 1)\r\n relations[y] |= 1 << (x - 1)\r\n res = 0\r\n for i in range(1 << n):\r\n s, cnt = (1 << n) - 1, 0\r\n for j in range(n):\r\n if i >> j & 1:\r\n t &= relations[j] | 1 << j\r\n cnt += 1\r\n if s & i == i:\r\n res = max(res, cnt)\r\n print(res)\r\n\r\n class ABC003:\r\n @staticmethod\r\n def a():\r\n print((int(sys.stdin.readline().rstrip()) + 1) * 5000)\r\n\r\n @staticmethod\r\n def b():\r\n atcoder = set(\"atcoder\")\r\n s, t = sys.stdin.read().split()\r\n print(\r\n all(\r\n s[i] == t[i]\r\n or s[i] == \"@\"\r\n and t[i] in atcoder\r\n or t[i] == \"@\"\r\n and s[i] in atcoder\r\n for i in range(len(s))\r\n )\r\n and \"You can win\"\r\n or \"You will lose\"\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *r = map(int, sys.stdin.read().split())\r\n print(reduce(lambda x, y: (x + y) / 2, sorted(r)[-k:], 0))\r\n\r\n class ABC004:\r\n @staticmethod\r\n def a():\r\n print(int(sys.stdin.readline().rstrip()) * 2)\r\n\r\n @staticmethod\r\n def b():\r\n for l in [sys.stdin.readline().rstrip() for _ in range(4)][::-1]:\r\n print(l[::-1])\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip()) % 30\r\n res = list(range(1, 7))\r\n for i in range(n):\r\n i %= 5\r\n res[i], res[i + 1] = res[i + 1], res[i]\r\n print(*res, sep=\"\")\r\n\r\n class ABC005:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(y // x)\r\n\r\n @staticmethod\r\n def b():\r\n n, *t = map(int, sys.stdin.read().split())\r\n print(min(t))\r\n\r\n @staticmethod\r\n def c():\r\n t = int(sys.stdin.readline().rstrip())\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n m = int(sys.stdin.readline().rstrip())\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n i = 0\r\n for p in b:\r\n if i == n:\r\n print(\"no\")\r\n return\r\n while p - a[i] > t:\r\n i += 1\r\n if i == n:\r\n print(\"no\")\r\n return\r\n if a[i] > p:\r\n print(\"no\")\r\n return\r\n i += 1\r\n print(\"yes\")\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n d = np.array(\r\n [sys.stdin.readline().split() for _ in range(n)], np.int64\r\n )\r\n s = d.cumsum(axis=0).cumsum(axis=1)\r\n s = np.pad(s, 1)\r\n max_del = np.zeros((n + 1, n + 1), dtype=np.int64)\r\n for y in range(1, n + 1):\r\n for x in range(1, n + 1):\r\n max_del[y, x] = np.amax(\r\n s[y : n + 1, x : n + 1]\r\n - s[0 : n - y + 1, x : n + 1]\r\n - s[y : n + 1, 0 : n - x + 1]\r\n + s[0 : n - y + 1, 0 : n - x + 1]\r\n )\r\n res = np.arange(n**2 + 1)[:, None]\r\n i = np.arange(1, n + 1)\r\n res = max_del[i, np.minimum(res // i, n)].max(axis=1)\r\n q = int(sys.stdin.readline().rstrip())\r\n p = np.array(sys.stdin.read().split(), dtype=np.int64)\r\n print(*res[p], sep=\"\\n\")\r\n\r\n class ABC006:\r\n @staticmethod\r\n def a():\r\n n = sys.stdin.readline().rstrip()\r\n if \"3\" in n:\r\n print(\"YES\")\r\n elif int(n) % 3 == 0:\r\n print(\"YES\")\r\n else:\r\n print(\"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n mod = 10007\r\n a = np.eye(N=3, k=-1, dtype=np.int64)\r\n a[0] = 1\r\n n = int(sys.stdin.readline().rstrip())\r\n a = Algebra.matrix_pow(a, n - 1, mod)\r\n print(a[2][0])\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n cnt = [0, 0, 0]\r\n if m == 1:\r\n cnt = [-1, -1, -1]\r\n else:\r\n if m & 1:\r\n m -= 3\r\n cnt[1] += 1\r\n n -= 1\r\n cnt[2] = m // 2 - n\r\n cnt[0] = n - cnt[2]\r\n if cnt[0] < 0 or cnt[1] < 0 or cnt[2] < 0:\r\n print(-1, -1, -1)\r\n else:\r\n print(*cnt, sep=\" \")\r\n\r\n @staticmethod\r\n def d():\r\n n, *c = map(int, sys.stdin.read().split())\r\n lis = [inf] * n\r\n for x in c:\r\n lis[bi_l(lis, x)] = x\r\n print(n - bi_l(lis, inf))\r\n\r\n class ABC007:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n - 1)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n if s == \"a\":\r\n print(-1)\r\n else:\r\n print(\"a\")\r\n\r\n @staticmethod\r\n def c():\r\n r, c = map(int, sys.stdin.readline().split())\r\n sy, sx = map(int, sys.stdin.readline().split())\r\n gy, gx = map(int, sys.stdin.readline().split())\r\n sy -= 1\r\n sx -= 1\r\n gy -= 1\r\n gx -= 1\r\n maze = [sys.stdin.readline().rstrip() for _ in range(r)]\r\n queue = deque([(sy, sx)])\r\n dist = np.full((r, c), np.inf)\r\n dist[sy, sx] = 0\r\n while queue:\r\n y, x = queue.popleft()\r\n for i, j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\r\n i += y\r\n j += x\r\n if maze[i][j] == \"#\" or dist[i, j] != np.inf:\r\n continue\r\n dist[i, j] = dist[y, x] + 1\r\n queue.append((i, j))\r\n print(int(dist[gy, gx]))\r\n\r\n @staticmethod\r\n def d():\r\n ng = set([4, 9])\r\n\r\n def count(d):\r\n return d if d <= 4 else d - 1\r\n\r\n def f(n):\r\n x = [int(d) for d in str(n)]\r\n flg = True\r\n dp = 0\r\n for d in x:\r\n dp = dp * 8 + flg * count(d)\r\n if d in ng:\r\n flg = False\r\n return n - (dp + flg)\r\n\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(f(b) - f(a - 1))\r\n\r\n class ABC008:\r\n @staticmethod\r\n def a():\r\n s, t = map(int, sys.stdin.readline().split())\r\n print(t - s + 1)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n res = defaultdict(int)\r\n for name in s:\r\n res[name] += 1\r\n print(sorted(res.items(), key=lambda x: x[1])[-1][0])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n c = n - np.count_nonzero(a[:, None] % a, axis=1)\r\n print(np.sum((c + 1) // 2 / c))\r\n\r\n @staticmethod\r\n def d():\r\n w, h, n, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*([iter(xy)] * 2))\r\n\r\n @lru_cache(maxsize=None)\r\n def count(x1, y1, x2, y2):\r\n res = 0\r\n for x, y in xy:\r\n if not (x1 <= x <= x2 and y1 <= y <= y2):\r\n continue\r\n cnt = (x2 - x1) + (y2 - y1) + 1\r\n cnt += count(x1, y1, x - 1, y - 1)\r\n cnt += count(x1, y + 1, x - 1, y2)\r\n cnt += count(x + 1, y1, x2, y - 1)\r\n cnt += count(x + 1, y + 1, x2, y2)\r\n res = max(res, cnt)\r\n return res\r\n\r\n print(count(1, 1, w, h))\r\n\r\n class ABC009:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((n + 1) // 2)\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n print(sorted(set(a))[-2])\r\n\r\n @staticmethod\r\n def c():\r\n n, k = map(int, sys.stdin.readline().split())\r\n s = list(sys.stdin.readline().rstrip())\r\n cost = [1] * n\r\n r = k\r\n for i in range(n - 1):\r\n q = []\r\n for j in range(i + 1, n):\r\n if s[j] < s[i] and cost[i] + cost[j] <= r:\r\n heappush(q, (s[j], cost[i] + cost[j], -j))\r\n if not q:\r\n continue\r\n _, c, j = heappop(q)\r\n j = -j\r\n s[i], s[j] = s[j], s[i]\r\n r -= c\r\n cost[i] = cost[j] = 0\r\n print(\"\".join(s))\r\n\r\n @staticmethod\r\n def d():\r\n k, m = map(int, sys.stdin.readline().split())\r\n a = np.array([int(x) for x in sys.stdin.readline().split()])\r\n c = np.array([int(x) for x in sys.stdin.readline().split()])\r\n mask = (1 << 32) - 1\r\n d = np.eye(k, k, -1, dtype=np.uint32) * mask\r\n d[0] = c\r\n if m <= k:\r\n print(a[m - 1])\r\n return\r\n # print(Algebra.bitwise_mat_pow(d, m-k))\r\n # print(Algebra.bitwise_dot(Algebra.bitwise_mat_pow(d, m-k), a[::-1].reshape(-1, 1))[0].item())\r\n print(\r\n Algebra.bitwise_dot(\r\n Algebra.bitwise_mat_pow(d, m - k), a[::-1].reshape(-1, 1)\r\n )[0][0]\r\n )\r\n\r\n class ABC010:\r\n @staticmethod\r\n def a():\r\n print(sys.stdin.readline().rstrip() + \"pp\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n tot = 0\r\n for x in a:\r\n c = 0\r\n while x % 2 == 0 or x % 3 == 2:\r\n x -= 1\r\n c += 1\r\n tot += c\r\n print(tot)\r\n\r\n @staticmethod\r\n def c():\r\n sx, sy, gx, gy, t, v, n, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(-1, 2).T\r\n\r\n def dist(x1, y1, x2, y2):\r\n return np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\r\n\r\n ans = (\r\n \"YES\"\r\n if (dist(sx, sy, x, y) + dist(x, y, gx, gy) <= v * t).any()\r\n else \"NO\"\r\n )\r\n print(ans)\r\n\r\n @staticmethod\r\n def d():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n p = [int(x) for x in sys.stdin.readline().split()]\r\n x, y = [], []\r\n for _ in range(e):\r\n a, b = map(int, sys.stdin.readline().split())\r\n x.append(a)\r\n y.append(b)\r\n x.append(b)\r\n y.append(a)\r\n for a in p:\r\n x.append(a)\r\n y.append(n)\r\n if not x:\r\n print(0)\r\n return\r\n c = [1] * len(x)\r\n min_cut = maximum_flow(\r\n csr_matrix((c, (x, y)), (n + 1, n + 1)), source=0, sink=n\r\n ).flow_value\r\n print(min_cut)\r\n\r\n @staticmethod\r\n def d_2():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n graph = nx.DiGraph()\r\n graph.add_nodes_from(range(n + 1))\r\n for p in [int(x) for x in sys.stdin.readline().split()]:\r\n graph.add_edge(p, n, capacity=1)\r\n for _ in range(e):\r\n a, b = map(int, sys.stdin.readline().split())\r\n graph.add_edge(a, b, capacity=1)\r\n graph.add_edge(b, a, capacity=1)\r\n print(nx.minimum_cut_value(graph, 0, n))\r\n\r\n @staticmethod\r\n def d_3():\r\n n, q, m = map(int, sys.stdin.readline().split())\r\n g = GeometryTopology.Graph(n + 1)\r\n # for i in range(n+1): g.add_node(i)\r\n for p in [int(x) for x in sys.stdin.readline().split()]:\r\n g.add_edge(p, n, capacity=1)\r\n for a, b in zip(*[map(int, sys.stdin.read().split())] * 2):\r\n g.add_edge(a, b, capacity=1)\r\n g.add_edge(b, a, capacity=1)\r\n print(g.dinic(0, n))\r\n\r\n class ABC011:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n % 12 + 1)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(s[0].upper() + s[1:].lower())\r\n\r\n @staticmethod\r\n def c():\r\n n, *ng = map(int, sys.stdin.read().split())\r\n ng = set(ng)\r\n if n in ng:\r\n print(\"NO\")\r\n else:\r\n r = 100\r\n while n > 0:\r\n if r == 0:\r\n print(\"NO\")\r\n return\r\n for i in range(3, 0, -1):\r\n if (n - i) in ng:\r\n continue\r\n n -= i\r\n r -= 1\r\n break\r\n else:\r\n print(\"NO\")\r\n return\r\n print(\"YES\")\r\n\r\n @staticmethod\r\n def d():\r\n n, d, x, y = map(int, sys.stdin.read().split())\r\n x, y = abs(x), abs(y)\r\n if x % d or y % d:\r\n print(0)\r\n return\r\n x, y = x // d, y // d\r\n r = n - (x + y)\r\n if r < 0 or r & 1:\r\n print(0)\r\n return\r\n\r\n res = 0\r\n half_p = pow(1 / 2, n)\r\n for d in range(r // 2 + 1): # 0 <= d <= r//2, south\r\n south, north = d, y + d\r\n west = (r - 2 * d) // 2\r\n res += (\r\n half_p\r\n * comb(n, south, exact=True)\r\n * comb(n - south, north, exact=True)\r\n * comb(n - south - north, west, exact=True)\r\n * half_p\r\n )\r\n print(res)\r\n\r\n class ABC012:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(b, a)\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n h, n = divmod(n, 3600)\r\n m, s = divmod(n, 60)\r\n print(f\"{h:02}:{m:02}:{s:02}\")\r\n\r\n @staticmethod\r\n def c():\r\n n = 2025 - int(sys.stdin.readline().rstrip())\r\n res = []\r\n for i in range(1, 10):\r\n if n % i != 0 or n // i > 9:\r\n continue\r\n res.append(f\"{i} x {n//i}\")\r\n print(*sorted(res), sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abt = map(int, sys.stdin.read().split())\r\n a, b, t = np.array(abt).reshape(m, 3).T\r\n res = shortest_path(\r\n csr_matrix((t, (a - 1, b - 1)), (n, n)),\r\n method=\"FW\",\r\n directed=False,\r\n )\r\n print(res.max(axis=-1).min().astype(np.int64))\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m, *abt = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b, t in zip(*[iter(abt)] * 3):\r\n a -= 1\r\n b -= 1\r\n g.add_edge(a, b, weight=t)\r\n g.add_edge(b, a, weight=t)\r\n\r\n print(min(max(d) for d in g.floyd_warshall()))\r\n\r\n class ABC013:\r\n @staticmethod\r\n def a():\r\n print(ord(sys.stdin.readline().rstrip()) - ord(\"A\") + 1)\r\n\r\n @staticmethod\r\n def b():\r\n a, b = map(int, sys.stdin.read().split())\r\n d = abs(a - b)\r\n print(min(d, 10 - d))\r\n\r\n @staticmethod\r\n def c():\r\n n, h, a, b, c, d, e = map(int, sys.stdin.read().split())\r\n y = np.arange(n + 1)\r\n x = (n * e - h - (d + e) * y) // (b + e) + 1\r\n np.maximum(x, 0, out=x)\r\n np.minimum(x, n - y, out=x)\r\n print(np.amin(a * x + c * y))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, d, *a = map(int, sys.stdin.read().split())\r\n res = list(range(n))\r\n\r\n def swap(i, j):\r\n res[i], res[j] = res[j], res[i]\r\n\r\n for i in a[::-1]:\r\n swap(i - 1, i)\r\n res = np.array(res)\r\n\r\n def binary_method(a, p):\r\n b = np.arange(n)\r\n while p:\r\n if p & 1:\r\n b = a[b]\r\n p >>= 1\r\n a = a[a]\r\n return b\r\n\r\n print(*(binary_method(res, d) + 1), sep=\"\\n\")\r\n\r\n class ABC014:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.read().split())\r\n print((a + b - 1) // b * b - a)\r\n\r\n @staticmethod\r\n def b():\r\n n, x, *a = map(int, sys.stdin.read().split())\r\n print(sum(a[i] for i in range(n) if x >> i & 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n a, b = np.array(ab).reshape(n, 2).T\r\n res = np.zeros(10**6 + 2, dtype=np.int64)\r\n np.add.at(res, a, 1)\r\n np.subtract.at(res, b + 1, 1)\r\n np.cumsum(res, out=res)\r\n print(res.max())\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n g = GeometryTopology.Graph(n)\r\n for _ in range(n - 1):\r\n x, y = map(int, sys.stdin.readline().split())\r\n x -= 1\r\n y -= 1\r\n g.add_edge(x, y, weight=1)\r\n g.add_edge(y, x, weight=1)\r\n\r\n g.bfs(0)\r\n g.find_ancestors()\r\n\r\n q, *ab = map(int, sys.stdin.read().split())\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n print(g.find_dist(a, b) + 1)\r\n\r\n class ABC015:\r\n @staticmethod\r\n def a():\r\n a, b = sys.stdin.read().split()\r\n print(a if len(a) > len(b) else b)\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n print(\r\n np.ceil(\r\n a[np.nonzero(a)[0]].sum() / np.count_nonzero(a)\r\n ).astype(np.int8)\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *t = map(int, sys.stdin.read().split())\r\n t = np.array(t).reshape(n, k)\r\n x = np.zeros((1, 1), dtype=np.int8)\r\n for i in range(n):\r\n x = x.reshape(-1, 1) ^ t[i]\r\n print(\"Found\" if np.count_nonzero(x == 0) > 0 else \"Nothing\")\r\n\r\n @staticmethod\r\n def d():\r\n w, n, k, *ab = map(int, sys.stdin.read().split())\r\n dp = np.zeros((k + 1, w + 1), dtype=np.int32)\r\n for a, b in zip(*[iter(ab)] * 2):\r\n np.maximum(dp[1:, a:], dp[:-1, :-a] + b, out=dp[1:, a:])\r\n print(dp[k][w])\r\n\r\n class ABC016:\r\n @staticmethod\r\n def a():\r\n m, d = map(int, sys.stdin.readline().split())\r\n print(\"YES\" if m % d == 0 else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n f1, f2 = a + b == c, a - b == c\r\n if f1 & f2:\r\n print(\"?\")\r\n elif f1 & (~f2):\r\n print(\"+\")\r\n elif (~f1) & f2:\r\n print(\"-\")\r\n else:\r\n print(\"!\")\r\n\r\n @staticmethod\r\n def c():\r\n n, _, *ab = map(int, sys.stdin.read().split())\r\n f = [0] * n\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n f[a] |= 1 << b\r\n f[b] |= 1 << a\r\n res = [\r\n bit_count(\r\n cumor(f[j] for j in range(n) if f[i] >> j & 1)\r\n & ~(f[i] | 1 << i)\r\n )\r\n for i in range(n)\r\n ]\r\n print(*res, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n sx, sy, gx, gy = map(int, sys.stdin.readline().split())\r\n seg1 = ((sx, sy), (gx, gy))\r\n n = int(sys.stdin.readline().rstrip())\r\n p1 = (\r\n np.array(sys.stdin.read().split(), dtype=np.int64)\r\n .reshape(n, 2)\r\n .T\r\n )\r\n p2 = np.hstack((p1[:, 1:], p1[:, :1]))\r\n seg2 = (p1, p2)\r\n print(\r\n np.count_nonzero(GeometryTopology.intersect(seg1, seg2)) // 2\r\n + 1\r\n )\r\n\r\n class ABC017:\r\n @staticmethod\r\n def a():\r\n s, e = (\r\n np.array(sys.stdin.read().split(), dtype=np.int16)\r\n .reshape(3, 2)\r\n .T\r\n )\r\n print((s // 10 * e).sum())\r\n\r\n @staticmethod\r\n def b():\r\n choku_tail = set(\"ch, o, k, u\".split(\", \"))\r\n\r\n def is_choku(s):\r\n if s == \"\":\r\n return True\r\n if len(s) >= 1 and (s[-1] in choku_tail) and is_choku(s[:-1]):\r\n return True\r\n if len(s) >= 2 and (s[-2:] in choku_tail) and is_choku(s[:-2]):\r\n return True\r\n return False\r\n\r\n print(\"YES\" if is_choku(sys.stdin.readline().rstrip()) else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *lrs = map(int, sys.stdin.read().split())\r\n l, r, s = np.array(lrs).reshape(n, 3).T\r\n score = np.zeros((m + 1,), dtype=np.int32)\r\n np.add.at(score, l - 1, s)\r\n np.subtract.at(score, r, s)\r\n np.cumsum(score, out=score)\r\n print(s.sum() - score[:m].min())\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *f = map(int, sys.stdin.read().split())\r\n prev = [0] * (n + 1)\r\n tmp = defaultdict(int)\r\n for i in range(n):\r\n prev[i + 1] = tmp[f[i]]\r\n tmp[f[i]] = i + 1\r\n\r\n dp = [0] * (n + 1)\r\n dp[0] = 1\r\n l, s = 0, dp[0]\r\n for i in range(1, n + 1):\r\n while l < prev[i]:\r\n s = (s - dp[l]) % MOD\r\n l += 1\r\n dp[i] = s\r\n s = (s + dp[i]) % MOD\r\n print(dp[n])\r\n\r\n class ABC018:\r\n @staticmethod\r\n def a():\r\n (*a,) = map(int, sys.stdin.read().split())\r\n a = sorted(enumerate(a), key=lambda x: -x[1])\r\n res = [None] * 3\r\n for i in range(3):\r\n res[a[i][0]] = i + 1\r\n print(*res, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n n, *lr = map(int, sys.stdin.read().split())\r\n for l, r in zip(*[iter(lr)] * 2):\r\n l -= 1\r\n r -= 1\r\n s = s[:l] + s[l : r + 1][::-1] + s[r + 1 :]\r\n print(s)\r\n\r\n @staticmethod\r\n def c():\r\n r, c, k = map(int, sys.stdin.readline().split())\r\n s = np.array([list(s) for s in sys.stdin.read().split()])\r\n s = np.pad(s, 1, constant_values=\"x\")\r\n\r\n a = np.zeros_like(s, dtype=np.float64)\r\n a[s == \"o\"] = np.inf\r\n for i in range(1, r + 1):\r\n np.minimum(a[i - 1, :] + 1, a[i, :], out=a[i, :])\r\n for i in range(r, 0, -1):\r\n np.minimum(a[i + 1, :] + 1, a[i, :], out=a[i, :])\r\n for j in range(1, c + 1):\r\n np.minimum(a[:, j - 1] + 1, a[:, j], out=a[:, j])\r\n for j in range(c, 0, -1):\r\n np.minimum(a[:, j + 1] + 1, a[:, j], out=a[:, j])\r\n print(np.count_nonzero(a >= k))\r\n\r\n @staticmethod\r\n def c_2():\r\n r, c, k = map(int, sys.stdin.readline().split())\r\n s = np.array([list(s) for s in sys.stdin.read().split()])\r\n s = np.pad(s, 1, constant_values=\"x\")\r\n a = (s == \"o\").astype(np.int16)\r\n a = distance_transform_cdt(a, metric=\"taxicab\")\r\n print(np.count_nonzero(a >= k))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, p, q, r, *xyz = map(int, sys.stdin.read().split())\r\n x, y, z = np.array(xyz).reshape(r, 3).T\r\n h = np.zeros((n, m), dtype=np.int32)\r\n h[x - 1, y - 1] = z\r\n g = np.array([*itertools.combinations(range(n), p)])\r\n print(np.sort(h[g].sum(axis=1), axis=1)[:, -q:].sum(axis=1).max())\r\n\r\n class ABC019:\r\n @staticmethod\r\n def a():\r\n (*a,) = map(int, sys.stdin.readline().split())\r\n print(sorted(a)[1])\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip() + \"$\"\r\n cnt = 0\r\n prev = \"$\"\r\n t = \"\"\r\n for c in s:\r\n if c == prev:\r\n cnt += 1\r\n continue\r\n t += prev + str(cnt)\r\n prev = c\r\n cnt = 1\r\n print(t[2:])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n res = set()\r\n for x in a:\r\n while not x & 1:\r\n x >>= 1\r\n res.add(x)\r\n print(len(res))\r\n\r\n @staticmethod\r\n def d():\r\n def inquire(u, v):\r\n print(f\"? {u} {v}\".format(u, v), flush=True)\r\n return int(sys.stdin.readline().rstrip())\r\n\r\n n = int(sys.stdin.readline().rstrip())\r\n u = sorted([(inquire(1, v), v) for v in range(2, n + 1)])[-1][1]\r\n d = max((inquire(u, v)) for v in range(1, n + 1) if u != v)\r\n print(f\"! {d}\")\r\n\r\n class ABC020:\r\n @staticmethod\r\n def a():\r\n print(\r\n \"ABC\"\r\n if int(sys.stdin.readline().rstrip()) == 1\r\n else \"chokudai\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n a, b = sys.stdin.readline().split()\r\n print(int(a + b) * 2)\r\n\r\n @staticmethod\r\n def c():\r\n h, w, t = map(int, sys.stdin.readline().split())\r\n s = [list(s) for s in sys.stdin.read().split()]\r\n for i in range(h):\r\n for j in range(w):\r\n if s[i][j] == \"S\":\r\n sy, sx = i, j\r\n if s[i][j] == \"G\":\r\n gy, gx = i, j\r\n s[sy][sx] = s[gy][gx] = \".\"\r\n source, target = sy * w + sx, gy * w + gx\r\n\r\n def heuristic_function(u, v=target):\r\n uy, ux = divmod(u, w)\r\n vy, vx = divmod(v, w)\r\n return abs(vy - uy) + abs(ux - vx)\r\n\r\n def min_time(x):\r\n g = GeometryTopology.Graph(h * w)\r\n # g = nx.DiGraph()\r\n\r\n for i in range(h):\r\n for j in range(w):\r\n u = i * w + j\r\n if i > 0:\r\n g.add_edge(\r\n u,\r\n (i - 1) * w + j,\r\n weight=(1 if s[i - 1][j] == \".\" else x),\r\n )\r\n if i < h - 1:\r\n g.add_edge(\r\n u,\r\n (i + 1) * w + j,\r\n weight=(1 if s[i + 1][j] == \".\" else x),\r\n )\r\n if j > 0:\r\n g.add_edge(\r\n u,\r\n i * w + j - 1,\r\n weight=(1 if s[i][j - 1] == \".\" else x),\r\n )\r\n if j < w - 1:\r\n g.add_edge(\r\n u,\r\n i * w + j + 1,\r\n weight=(1 if s[i][j + 1] == \".\" else x),\r\n )\r\n\r\n return g.dijkstra(source)[target]\r\n return g.astar(source, target, heuristic_function)\r\n # return nx.dijkstra_path_length(g, source, target)\r\n # return nx.astar_path_length(g, source, target, heuristic_function)\r\n\r\n def binary_search():\r\n lo, hi = 1, t + 1\r\n while lo + 1 < hi:\r\n x = (lo + hi) // 2\r\n if min_time(x) > t:\r\n hi = x\r\n else:\r\n lo = x\r\n return lo\r\n\r\n print(binary_search())\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.readline().split())\r\n div = sorted(NumberTheory.find_divisors(k))\r\n l = len(div)\r\n s = [0] * l\r\n for i, d in enumerate(div):\r\n s[i] = (1 + n // d) * (n // d) // 2 * d % MOD\r\n for i in range(l - 1, -1, -1):\r\n for j in range(i + 1, l):\r\n if div[j] % div[i]:\r\n continue\r\n s[i] = (s[i] - s[j]) % MOD\r\n\r\n print(\r\n sum(s[i] * k // div[i] % MOD for i in range(l)) % MOD\r\n ) # ans is LCM.\r\n\r\n class ABC021:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n s = [1 << i for i in range(5) if n >> i & 1]\r\n print(len(s), *s, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def b():\r\n n, a, b, k, *p = map(int, sys.stdin.read().split())\r\n print(\"YES\" if len(set(p) | set([a, b])) == k + 2 else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, a, b, m, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(m, 2).T - 1\r\n a -= 1\r\n b -= 1\r\n g = csgraph_to_dense(\r\n csr_matrix((np.ones(m), (x, y)), (n, n), dtype=np.int8)\r\n )\r\n g = np.logical_or(g, g.T)\r\n paths = np.zeros(n, dtype=np.int64).reshape(-1, 1)\r\n paths[a, 0] = 1\r\n while not paths[b, 0]:\r\n paths = np.dot(g, paths) % MOD\r\n print(paths[b, 0])\r\n\r\n @staticmethod\r\n def c_2():\r\n n, a, b, m, *xy = map(int, sys.stdin.read().split())\r\n a -= 1\r\n b -= 1\r\n g = GeometryTopology.Graph()\r\n\r\n for x, y in zip(*[iter(xy)] * 2):\r\n x -= 1\r\n y -= 1\r\n g.add_edge(x, y, weight=1)\r\n g.add_edge(y, x, weight=1)\r\n\r\n dist, paths = g.dijkstra(a, paths_cnt=True, mod=MOD)\r\n print(paths[b])\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.read().split())\r\n cn = Combinatorics.CombinationsMod()\r\n print(cn(n + k - 1, k))\r\n\r\n class ABC022:\r\n @staticmethod\r\n def a():\r\n n, s, t, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n np.cumsum(a, out=a)\r\n print(((s <= a) & (a <= t)).sum())\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n c = Counter(a)\r\n print(sum(c.values()) - len(c))\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *uvl = map(int, sys.stdin.read().split())\r\n u, v, l = np.array(uvl).reshape(m, 3).T\r\n u -= 1\r\n v -= 1\r\n g = csgraph_to_dense(csr_matrix((l, (u, v)), (n, n)))\r\n g += g.T\r\n g[g == 0] = np.inf\r\n dist0 = g[0].copy()\r\n g[0] = 0\r\n g[:, 0] = 0\r\n dist = shortest_path(g, method=\"FW\", directed=False)\r\n u, v = np.array([*itertools.combinations(range(1, n), 2)]).T\r\n res = (dist0[u] + dist[u, v] + dist0[v]).min()\r\n print(-1 if res == np.inf else int(res))\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n c = np.array(ab).reshape(2, n, 2)\r\n g = c.mean(axis=1)\r\n d = np.sqrt(((c - g[:, None, :]) ** 2).sum(axis=-1)).sum(axis=1)\r\n print(d[1] / d[0])\r\n\r\n class ABC023:\r\n @staticmethod\r\n def a():\r\n print(sum(divmod(int(sys.stdin.readline().rstrip()), 10)))\r\n\r\n @staticmethod\r\n def b():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n t = \"b\"\r\n for i in range(n // 2):\r\n if i % 3 == 0:\r\n t = \"a\" + t + \"c\"\r\n elif i % 3 == 1:\r\n t = \"c\" + t + \"a\"\r\n else:\r\n t = \"b\" + t + \"b\"\r\n print(n // 2 if t == s else -1)\r\n\r\n @staticmethod\r\n def b_2():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n if n & 1 ^ 1:\r\n print(-1)\r\n return\r\n a = list(\"abc\")\r\n i = (1 - n // 2) % 3\r\n for c in s:\r\n if c != a[i]:\r\n print(-1)\r\n return\r\n i = (i + 1) % 3\r\n print(n // 2)\r\n\r\n @staticmethod\r\n def c():\r\n h, w, k, n, *rc = map(int, sys.stdin.read().split())\r\n r, c = np.array(rc).reshape(n, 2).T - 1\r\n rb = np.bincount(r, minlength=h)\r\n cb = np.bincount(c, minlength=w)\r\n rbb = np.bincount(rb, minlength=k + 1)\r\n cbb = np.bincount(cb, minlength=k + 1)\r\n tot = (rbb[: k + 1] * cbb[k::-1]).sum()\r\n real = np.bincount(rb[r] + cb[c] - 1, minlength=k + 1)\r\n print(tot - real[k - 1] + real[k])\r\n\r\n @staticmethod\r\n def d():\r\n n, *hs = map(int, sys.stdin.read().split())\r\n h, s = np.array(hs).reshape(n, 2).T\r\n\r\n t = np.arange(n)\r\n\r\n def is_ok(x):\r\n return np.all(np.sort((x - h) // s) >= t)\r\n\r\n def binary_search():\r\n lo, hi = 0, 10**14\r\n while lo + 1 < hi:\r\n x = (lo + hi) // 2\r\n if is_ok(x):\r\n hi = x\r\n else:\r\n lo = x\r\n return hi\r\n\r\n print(binary_search())\r\n\r\n class ABC024:\r\n @staticmethod\r\n def a():\r\n a, b, c, k, s, t = map(int, sys.stdin.read().split())\r\n print(a * s + b * t - c * (s + t) * (s + t >= k))\r\n\r\n @staticmethod\r\n def b():\r\n n, t, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n print(np.minimum(a[1:] - a[:-1], t).sum() + t)\r\n\r\n @staticmethod\r\n def c():\r\n n, d, k, *lrst = map(int, sys.stdin.read().split())\r\n lrst = np.array(lrst)\r\n lr = lrst[: 2 * d].reshape(d, 2)\r\n s, t = lrst[2 * d :].reshape(k, 2).T\r\n day = np.zeros((k,), dtype=np.int32)\r\n for i in range(d):\r\n l, r = lr[i]\r\n move = (l <= s) & (s <= r) & (s != t)\r\n reach = move & (l <= t) & (t <= r)\r\n s[move & (s < t)] = r\r\n s[move & (s > t)] = l\r\n s[reach] = t[reach]\r\n day[reach] = i + 1\r\n print(*day, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n a, b, c = map(int, sys.stdin.read().split())\r\n p = MOD\r\n denom = pow(a * b % p - b * c % p + c * a % p, p - 2, p)\r\n w = (b * c - a * b) % p * denom % p\r\n h = (b * c - a * c) % p * denom % p\r\n print(h, w)\r\n\r\n class ABC025:\r\n @staticmethod\r\n def a():\r\n s, n = sys.stdin.read().split()\r\n n = int(n)\r\n i, j = divmod(n - 1, 5)\r\n print(s[i] + s[j])\r\n\r\n @staticmethod\r\n def b():\r\n n, a, b = map(int, sys.stdin.readline().split())\r\n res = defaultdict(int)\r\n for _ in range(n):\r\n s, d = sys.stdin.readline().split()\r\n d = int(d)\r\n res[s] += min(max(d, a), b)\r\n res = res[\"East\"] - res[\"West\"]\r\n if res == 0:\r\n ans = 0\r\n elif res > 0:\r\n ans = f\"East {res}\"\r\n else:\r\n ans = f\"West {-res}\"\r\n print(ans)\r\n\r\n @staticmethod\r\n def c():\r\n b = [0] * 6\r\n for i in range(2):\r\n (*row,) = map(int, sys.stdin.readline().split())\r\n for j in range(3):\r\n b[i * 3 + j] = row[j]\r\n c = [0] * 8\r\n for i in range(3):\r\n (*row,) = map(int, sys.stdin.readline().split())\r\n for j in range(2):\r\n c[i * 3 + j] = row[j]\r\n tot = sum(b) + sum(c)\r\n\r\n @lru_cache(maxsize=None)\r\n def f(s=tuple(0 for _ in range(9))):\r\n if all(s):\r\n res = 0\r\n for i in range(6):\r\n res += (s[i] == s[i + 3]) * b[i]\r\n for i in range(8):\r\n res += (s[i] == s[i + 1]) * c[i]\r\n return res\r\n cand = [i for i in range(9) if not s[i]]\r\n flg = len(cand) & 1\r\n s = list(s)\r\n res = []\r\n for i in cand:\r\n s[i] = (flg ^ 1) + 1\r\n res.append(f(tuple(s)))\r\n s[i] = 0\r\n return sorted(res, reverse=flg)[0]\r\n\r\n a = f()\r\n b = tot - a\r\n print(a)\r\n print(b)\r\n\r\n class ABC026:\r\n @staticmethod\r\n def a():\r\n a = int(sys.stdin.readline().rstrip())\r\n print(a // 2 * (a - a // 2))\r\n\r\n @staticmethod\r\n def b():\r\n n, *r = map(int, sys.stdin.read().split())\r\n s = np.pi * np.array([0] + r) ** 2\r\n s.sort()\r\n res = s[n::-2].sum() - s[n - 1 :: -2].sum()\r\n print(res)\r\n\r\n @staticmethod\r\n def c():\r\n n, *b = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph()\r\n for i in range(1, n):\r\n g.add_edge(b[i - 1] - 1, i, weight=1)\r\n\r\n def f(u=0):\r\n if not g.edges[u]:\r\n return 1\r\n s = [f(v) for v in g.edges[u]]\r\n return max(s) + min(s) + 1\r\n\r\n print(f())\r\n\r\n @staticmethod\r\n def d():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n\r\n def f(t):\r\n return a * t + b * np.sin(c * t * np.pi) - 100\r\n\r\n print(optimize.brenth(f, 0, 200))\r\n\r\n class ABC027:\r\n @staticmethod\r\n def a():\r\n l = [int(l) for l in sys.stdin.readline().split()]\r\n l.sort()\r\n print(l[2] if l[0] == l[1] else l[0])\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n m, r = divmod(sum(a), n)\r\n if r:\r\n print(-1)\r\n return\r\n population = 0\r\n towns = 0\r\n cnt = 0\r\n for x in a:\r\n population += x\r\n towns += 1\r\n if population / towns != m:\r\n cnt += 1\r\n continue\r\n population, towns = 0, 0\r\n print(cnt)\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n flg = n.bit_length() & 1 ^ 1\r\n t = 0\r\n x = 1\r\n while x <= n:\r\n t += 1\r\n x = 2 * x + 1 if t & 1 ^ flg else 2 * x\r\n print(\"Aoki\" if t & 1 else \"Takahashi\")\r\n\r\n class ABC028:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(\r\n \"Bad\"\r\n if n < 60\r\n else \"Good\"\r\n if n < 90\r\n else \"Great\"\r\n if n < 100\r\n else \"Perfect\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n cnt = Counter(s)\r\n print(*[cnt.get(c, 0) for c in \"ABCDEF\"])\r\n\r\n @staticmethod\r\n def c():\r\n a, b, c, d, e = map(int, sys.stdin.readline().split())\r\n print(max(b + c + e, a + d + e))\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.readline().split())\r\n c = 3 * 2 * (n - k) * (k - 1) + 3 * (n - 1) + 1\r\n print(c / n**3)\r\n\r\n class ABC029:\r\n @staticmethod\r\n def a():\r\n print(sys.stdin.readline().rstrip() + \"s\")\r\n\r\n @staticmethod\r\n def b():\r\n print(sum(\"r\" in s for s in sys.stdin.read().split()))\r\n\r\n @staticmethod\r\n def c():\r\n print(\r\n *[\r\n \"\".join(s)\r\n for s in itertools.product(\r\n \"abc\", repeat=int(sys.stdin.readline().rstrip())\r\n )\r\n ],\r\n sep=\"\\n\",\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(\r\n sum(\r\n n // 10 ** (i + 1) * 10**i\r\n + min(max((n % 10 ** (i + 1) - 10**i + 1), 0), 10**i)\r\n for i in range(9)\r\n )\r\n )\r\n\r\n class ABC030:\r\n @staticmethod\r\n def a():\r\n a, b, c, d = map(int, sys.stdin.readline().split())\r\n e, f = b * c, d * a\r\n print(\"TAKAHASHI\" if e > f else \"AOKI\" if f > e else \"DRAW\")\r\n\r\n @staticmethod\r\n def b():\r\n n, m = map(int, sys.stdin.readline().split())\r\n n = (n % 12 + m / 60) * 30\r\n m *= 6\r\n d = abs(n - m)\r\n print(min(d, 360 - d))\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n x, y = map(int, sys.stdin.readline().split())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n\r\n t = 0\r\n p = 1\r\n cnt = 0\r\n while True:\r\n if p:\r\n i = bi_l(a, t)\r\n if i == n:\r\n break\r\n t = a[i] + x\r\n else:\r\n i = bi_l(b, t)\r\n if i == m:\r\n break\r\n t = b[i] + y\r\n cnt += 1\r\n p ^= 1\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, a = map(int, sys.stdin.readline().split())\r\n a -= 1\r\n k = sys.stdin.readline().rstrip()\r\n b = [int(x) - 1 for x in sys.stdin.readline().split()]\r\n\r\n c = [None] * n\r\n for i in range(n + 1):\r\n if str(i) == k:\r\n print(a + 1)\r\n return\r\n if c[a] is not None:\r\n l, d = i - c[a], c[a]\r\n break\r\n c[a] = i\r\n a = b[a]\r\n\r\n r = [None] * len(k)\r\n r[0] = 1\r\n for i in range(len(k) - 1):\r\n r[i + 1] = r[i] * 10 % l\r\n k = [int(c) for c in k][::-1]\r\n d = (sum(r[i] * k[i] for i in range(len(k))) - d) % l\r\n for _ in range(d):\r\n a = b[a]\r\n print(a + 1)\r\n\r\n @staticmethod\r\n def d_2():\r\n n, a, k, *b = map(int, sys.stdin.read().split())\r\n a -= 1\r\n b = [x - 1 for x in b]\r\n c = [None] * n\r\n for i in range(n + 1):\r\n if i == k:\r\n print(a + 1)\r\n return\r\n if c[a] is not None:\r\n for _ in range((k - c[a]) % (i - c[a])):\r\n a = b[a]\r\n print(a + 1)\r\n return\r\n c[a] = i\r\n a = b[a]\r\n\r\n class ABC031:\r\n @staticmethod\r\n def a():\r\n a, d = map(int, sys.stdin.readline().split())\r\n if a > d:\r\n a, d = d, a\r\n print((a + 1) * d)\r\n\r\n @staticmethod\r\n def b():\r\n l, h, n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n res = np.maximum(l - a, 0)\r\n res[a > h] = -1\r\n print(*res, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n np.cumsum(a[::2], out=a[::2])\r\n np.cumsum(a[1::2], out=a[1::2])\r\n a = list(a) + [0] * 2\r\n\r\n def score(i, j):\r\n if i > j:\r\n i, j = j, i\r\n if (j - i) & 1:\r\n x, y = a[j - 1] - a[i - 2], a[j] - a[i - 1]\r\n else:\r\n x, y = a[j] - a[i - 2], a[j - 1] - a[i - 1]\r\n return x, y\r\n\r\n res = -inf\r\n for i in range(n):\r\n s = -inf\r\n for j in range(n):\r\n if i == j:\r\n continue\r\n x, y = score(i, j)\r\n if y > s:\r\n s, t = y, x\r\n res = max(res, t)\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n k, m = map(int, sys.stdin.readline().split())\r\n (*vw,) = zip(*[iter(sys.stdin.read().split())] * 2)\r\n for l in itertools.product((1, 2, 3), repeat=k):\r\n s = dict()\r\n for v, w in vw:\r\n i = 0\r\n for d in v:\r\n d = int(d) - 1\r\n j = i + l[d]\r\n if j > len(w):\r\n break\r\n t = w[i:j]\r\n if d in s and s[d] != t:\r\n break\r\n s[d] = t\r\n i = j\r\n else:\r\n if i == len(w):\r\n continue\r\n break\r\n else:\r\n for i in range(k):\r\n print(s[i])\r\n return\r\n\r\n class ABC032:\r\n @staticmethod\r\n def a():\r\n a, b, n = map(int, sys.stdin.read().split())\r\n l = NumberTheory.lcm(a, b)\r\n print((n + l - 1) // l * l)\r\n\r\n @staticmethod\r\n def b():\r\n s, k = sys.stdin.read().split()\r\n k = int(k)\r\n res = set()\r\n for i in range(len(s) - k + 1):\r\n res.add(s[i : i + k])\r\n print(len(res))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *s = map(int, sys.stdin.read().split())\r\n if 0 in s:\r\n print(n)\r\n return\r\n if k == 0:\r\n print(0)\r\n return\r\n res, tmp, l = 0, 1, 0\r\n for r in range(n):\r\n tmp *= s[r]\r\n while tmp > k:\r\n tmp //= s[l]\r\n l += 1\r\n res = max(res, r - l + 1)\r\n\r\n print(res)\r\n\r\n class ABC033:\r\n @staticmethod\r\n def a():\r\n print(\r\n \"SAME\"\r\n if len(set(sys.stdin.readline().rstrip())) == 1\r\n else \"DIFFERENT\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = dict()\r\n for _ in range(n):\r\n s, p = sys.stdin.readline().split()\r\n res[s] = int(p)\r\n tot = sum(res.values())\r\n for s, p in res.items():\r\n if p > tot / 2:\r\n print(s)\r\n return\r\n print(\"atcoder\")\r\n\r\n @staticmethod\r\n def c():\r\n s = sys.stdin.readline().rstrip()\r\n print(sum(not \"0\" in f for f in s.split(\"+\")))\r\n\r\n class ABC034:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Better\" if y > x else \"Worse\")\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n + 1 if n & 1 else n - 1)\r\n\r\n @staticmethod\r\n def c():\r\n h, w = map(int, sys.stdin.read().split())\r\n choose = Combinatorics.CombinationsMod()\r\n print(choose(h + w - 2, h - 1))\r\n\r\n @staticmethod\r\n def d():\r\n n, k, *wp = map(int, sys.stdin.read().split())\r\n w, p = np.array(wp).reshape(-1, 2).T\r\n\r\n def f(x):\r\n return np.sort(w * (p - x))[-k:].sum()\r\n\r\n print(optimize.bisect(f, 0, 100))\r\n\r\n class ABC035:\r\n @staticmethod\r\n def a():\r\n w, h = map(int, sys.stdin.readline().split())\r\n print(\"4:3\" if 4 * h == 3 * w else \"16:9\")\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n y = x = z = 0\r\n for c in s:\r\n if c == \"?\":\r\n z += 1\r\n elif c == \"L\":\r\n x -= 1\r\n elif c == \"R\":\r\n x += 1\r\n elif c == \"D\":\r\n y -= 1\r\n elif c == \"U\":\r\n y += 1\r\n d = abs(y) + abs(x)\r\n print(d + z if t == \"1\" else max(d - z, (d - z) & 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, q, *lr = map(int, sys.stdin.read().split())\r\n l, r = np.array(lr).reshape(q, 2).T\r\n res = np.zeros(n + 1, dtype=int)\r\n np.add.at(res, l - 1, 1)\r\n np.subtract.at(res, r, 1)\r\n np.cumsum(res, out=res)\r\n res = res & 1\r\n print(\"\".join(map(str, res[:-1])))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, t = map(int, sys.stdin.readline().split())\r\n point = np.array(sys.stdin.readline().split(), dtype=int)\r\n a, b, c = (\r\n np.array(sys.stdin.read().split(), dtype=np.int64)\r\n .reshape(m, 3)\r\n .T\r\n )\r\n a -= 1\r\n b -= 1\r\n d_1 = shortest_path(\r\n csr_matrix((c, (a, b)), (n, n)),\r\n method=\"D\",\r\n directed=True,\r\n indices=0,\r\n )\r\n d_2 = shortest_path(\r\n csr_matrix((c, (b, a)), (n, n)),\r\n method=\"D\",\r\n directed=True,\r\n indices=0,\r\n )\r\n print(int(np.amax((t - (d_1 + d_2)) * point)))\r\n\r\n class ABC036:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print((b + a - 1) // a)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n n = int(n)\r\n for j in range(n):\r\n row = \"\"\r\n for i in range(n - 1, -1, -1):\r\n row += s[i][j]\r\n print(row)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n b = [None] * n\r\n prev = None\r\n j = -1\r\n for i, x in sorted(enumerate(a), key=lambda x: x[1]):\r\n if x != prev:\r\n j += 1\r\n b[i] = j\r\n prev = x\r\n print(*b, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n edges = [[] for _ in range(n)]\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n edges[a].append(b)\r\n edges[b].append(a)\r\n parent = [None] * n\r\n\r\n def count(u):\r\n black, white = 1, 1\r\n for v in edges[u]:\r\n if v == parent[u]:\r\n continue\r\n parent[v] = u\r\n b, w = count(v)\r\n black *= w\r\n black %= MOD\r\n white *= (b + w) % MOD\r\n white %= MOD\r\n return black, white\r\n\r\n print(sum(count(0)) % MOD)\r\n\r\n class ABC037:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(c // min(a, b))\r\n\r\n @staticmethod\r\n def b():\r\n n, q, *lrt = map(int, sys.stdin.read().split())\r\n a = np.zeros(n, dtype=int)\r\n for l, r, t in zip(*[iter(lrt)] * 3):\r\n a[l - 1 : r] = t\r\n print(*a, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = np.array([0] + a)\r\n np.cumsum(a, out=a)\r\n s = (a[k:] - a[:-k]).sum()\r\n print(s)\r\n\r\n @staticmethod\r\n def d():\r\n h, w, *a = map(int, sys.stdin.read().split())\r\n p = [None] * (h * w)\r\n\r\n def paths(k):\r\n if p[k]:\r\n return p[k]\r\n p[k] = 1\r\n i, j = divmod(k, w)\r\n if j > 0 and a[k] > a[k - 1]:\r\n p[k] += paths(k - 1)\r\n if j < w - 1 and a[k] > a[k + 1]:\r\n p[k] += paths(k + 1)\r\n if i > 0 and a[k] > a[k - w]:\r\n p[k] += paths(k - w)\r\n if i < h - 1 and a[k] > a[k + w]:\r\n p[k] += paths(k + w)\r\n p[k] %= MOD\r\n return p[k]\r\n\r\n print(sum(paths(i) for i in range(h * w)) % MOD)\r\n\r\n class ABC038:\r\n @staticmethod\r\n def a():\r\n s = sys.stdin.readline().rstrip()\r\n print(\"YES\" if s[-1] == \"T\" else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c, d = map(int, sys.stdin.read().split())\r\n print(\"YES\" if a == c or b == c or a == d or b == d else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n cnt = n\r\n tmp = 1\r\n for i in range(n):\r\n if a[i + 1] > a[i]:\r\n tmp += 1\r\n else:\r\n cnt += tmp * (tmp - 1) // 2\r\n tmp = 1\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, *wh = map(int, sys.stdin.read().split())\r\n a = [\r\n x[1]\r\n for x in sorted(\r\n zip(*[iter(wh)] * 2), key=lambda x: (x[0], -x[1])\r\n )\r\n ]\r\n print(bi_l(DP.LIS(a), inf))\r\n\r\n class ABC039:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print((a * b + b * c + c * a) * 2)\r\n\r\n @staticmethod\r\n def b():\r\n x = int(sys.stdin.readline().rstrip())\r\n for n in range(1, int(x**0.5) + 1):\r\n if pow(n, 4) == x:\r\n print(n)\r\n return\r\n\r\n @staticmethod\r\n def c():\r\n board = \"WBWBWWBWBWBW\" * 3\r\n convert = \"Do, *, Re, *, Mi, Fa, *, So, *, La, *, Si\".split(\", \")\r\n s = sys.stdin.readline().rstrip()\r\n print(convert[board.index(s)])\r\n\r\n @staticmethod\r\n def d():\r\n h, w = map(int, sys.stdin.readline().split())\r\n s = \"\".join(sys.stdin.read().split())\r\n white = set()\r\n for i in range(h * w):\r\n if s[i] == \"#\":\r\n continue\r\n l = 0 if i % w == 0 else -1\r\n r = 0 if (i + 1) % w == 0 else 1\r\n white |= {\r\n i + dy + dx\r\n for dy in range(-w, w + 1, w)\r\n for dx in range(l, r + 1)\r\n }\r\n black_before = set(range(h * w)) - white\r\n black_after = set()\r\n for i in black_before:\r\n l = 0 if i % w == 0 else -1\r\n r = 0 if (i + 1) % w == 0 else 1\r\n black_after |= {\r\n i + dy + dx\r\n for dy in range(-w, w + 1, w)\r\n for dx in range(l, r + 1)\r\n }\r\n black_after &= set(range(h * w))\r\n for i in range(h * w):\r\n if s[i] == \"#\" and not i in black_after:\r\n print(\"impossible\")\r\n return\r\n print(\"possible\")\r\n for i in range(h):\r\n print(\r\n \"\".join(\r\n [\r\n \"#\" if i * w + j in black_before else \".\"\r\n for j in range(w)\r\n ]\r\n )\r\n )\r\n\r\n class ABC040:\r\n @staticmethod\r\n def a():\r\n n, x = map(int, sys.stdin.readline().split())\r\n print(min(x - 1, n - x))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = inf\r\n for i in range(1, int(n**0.5) + 1):\r\n res = min(res, n // i - i + n % i)\r\n print(res)\r\n\r\n @staticmethod\r\n def c():\r\n n, *h = map(int, sys.stdin.read().split())\r\n h = [h[0]] + h\r\n cost = [None] * (n + 1)\r\n cost[0] = cost[1] = 0\r\n for i in range(2, n + 1):\r\n cost[i] = min(\r\n cost[i - 2] + abs(h[i] - h[i - 2]),\r\n cost[i - 1] + abs(h[i] - h[i - 1]),\r\n )\r\n print(cost[n])\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n uf = GeometryTopology.Graph(n)\r\n uf.init_dsu()\r\n queue = []\r\n for _ in range(m):\r\n a, b, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2 * y), a - 1, b - 1))\r\n q = int(sys.stdin.readline().rstrip())\r\n for i in range(q):\r\n v, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2 * y + 1), v - 1, i))\r\n res = [None] * q\r\n while queue:\r\n y, i, j = heappop(queue)\r\n if y & 1:\r\n res[j] = uf.size[uf.find(i)]\r\n else:\r\n uf.unite(i, j)\r\n print(*res, sep=\"\\n\")\r\n\r\n class ABC041:\r\n @staticmethod\r\n def a():\r\n s, i = sys.stdin.read().split()\r\n i = int(i)\r\n print(s[i - 1])\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n ans = a * b % MOD * c % MOD\r\n print(ans)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n for i, h in sorted(enumerate(a), key=lambda x: -x[1]):\r\n print(i + 1)\r\n\r\n @staticmethod\r\n def d():\r\n n, _, *xy = map(int, sys.stdin.read().split())\r\n g = [0] * n\r\n for x, y in zip(*[iter(xy)] * 2):\r\n g[x - 1] |= 1 << (y - 1)\r\n res = [0] * (1 << n)\r\n res[0] = 1\r\n for i in range(1 << n):\r\n for j in range(n):\r\n if i >> j & 1 ^ 1:\r\n continue\r\n if not (g[j] & i):\r\n res[i] += res[i & ~(1 << j)]\r\n print(res[-1])\r\n\r\n class ABC042:\r\n @staticmethod\r\n def a():\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n c = Counter(a)\r\n print(\"YES\" if c[5] == 2 and c[7] == 1 else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n n, l, *s = sys.stdin.read().split()\r\n print(\"\".join(sorted(s)))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *d = sys.stdin.read().split()\r\n l = len(n)\r\n ok = sorted(set(string.digits) - set(d))\r\n cand = [\r\n int(\"\".join(p)) for p in itertools.product(ok, repeat=l)\r\n ] + [int(min(x for x in ok if x > \"0\") + min(ok) * l)]\r\n print(cand[bi_l(cand, int(n))])\r\n\r\n @staticmethod\r\n def d():\r\n h, w, a, b = map(int, sys.stdin.read().split())\r\n combinations = Combinatorics.CombinationsMod(\r\n n=2 * 10**5, mod=MOD\r\n )\r\n i = np.arange(h - a, h)\r\n ng = np.sum(\r\n combinations(i + b - 1, i)\r\n * combinations(h - i + w - b - 2, h - 1 - i)\r\n % MOD\r\n )\r\n print((combinations(h + w - 2, h - 1) - ng) % MOD)\r\n\r\n class ABC043:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((1 + n) * n // 2)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n t = \"\"\r\n for c in s:\r\n if c == \"B\":\r\n t = t[:-1]\r\n else:\r\n t += c\r\n print(t)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n x = np.around(a.sum() / n).astype(int)\r\n print(np.sum((a - x) ** 2))\r\n\r\n @staticmethod\r\n def d():\r\n s = sys.stdin.readline().rstrip()\r\n n = len(s)\r\n for i in range(n - 1):\r\n if s[i] == s[i + 1]:\r\n print(i + 1, i + 2)\r\n return\r\n for i in range(n - 2):\r\n if s[i] == s[i + 2]:\r\n print(i + 1, i + 3)\r\n return\r\n print(-1, -1)\r\n\r\n class ABC044:\r\n @staticmethod\r\n def a():\r\n n, k, x, y = map(int, sys.stdin.read().split())\r\n print(min(n, k) * x + max(0, n - k) * y)\r\n\r\n @staticmethod\r\n def b():\r\n res = set(\r\n c & 1 for c in Counter(sys.stdin.readline().rstrip()).values()\r\n )\r\n print(\"Yes\" if len(res) == 1 and res.pop() == 0 else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n, a, *x = map(int, sys.stdin.read().split())\r\n dp = np.zeros((n + 1, 2501), dtype=np.int64)\r\n dp[0, 0] = 1\r\n for v in x:\r\n dp[1:, v:] += dp[:-1, :-v]\r\n i = np.arange(1, n + 1)\r\n print(dp[i, i * a].sum())\r\n\r\n @staticmethod\r\n def c_2():\r\n n, a, *x = map(int, sys.stdin.read().split())\r\n for i in range(n):\r\n x[i] -= a\r\n\r\n s = defaultdict(int)\r\n s[0] = 1\r\n for i in range(n):\r\n ns = s.copy()\r\n for k, v in s.items():\r\n ns[k + x[i]] += v\r\n s = ns\r\n print(s[0] - 1)\r\n\r\n @staticmethod\r\n def d():\r\n pass\r\n\r\n class ABC045:\r\n @staticmethod\r\n def a():\r\n a, b, h = map(int, sys.stdin.read().split())\r\n print((a + b) * h // 2)\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = sys.stdin.read().split()\r\n d = {\"a\": a[::-1], \"b\": b[::-1], \"c\": c[::-1]}\r\n nx = \"a\"\r\n while 1:\r\n if not d[nx]:\r\n print(nx.upper())\r\n return\r\n d[nx], nx = d[nx][:-1], d[nx][-1]\r\n\r\n @staticmethod\r\n def c():\r\n def c(l):\r\n return pow(2, max(0, l - 1))\r\n\r\n s = sys.stdin.readline().rstrip()\r\n n = len(s)\r\n print(\r\n sum(\r\n int(s[i : j + 1]) * c(i) * c(n - 1 - j)\r\n for i in range(n)\r\n for j in range(i, n)\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n h, w, n, *ab = map(int, sys.stdin.read().split())\r\n c = defaultdict(int)\r\n for y, x in zip(*[iter(ab)] * 2):\r\n y -= 1\r\n x -= 1\r\n for dy, dx in itertools.product(range(-1, 2), repeat=2):\r\n i, j = y + dy, x + dx\r\n if not (0 < i < h - 1 and 0 < j < w - 1):\r\n continue\r\n c[(i, j)] += 1\r\n c = Counter(c.values())\r\n c[0] = (h - 2) * (w - 2) - sum(c.values())\r\n for i in range(10):\r\n print(c[i])\r\n\r\n class ABC046:\r\n @staticmethod\r\n def a():\r\n print(len(set(sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n n, k = map(int, sys.stdin.readline().split())\r\n print(k * pow(k - 1, n - 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n a, b = 1, 1\r\n for x, y in zip(*[iter(xy)] * 2):\r\n n = max((a + x - 1) // x, (b + y - 1) // y)\r\n a, b = n * x, n * y\r\n print(a + b)\r\n\r\n @staticmethod\r\n def d():\r\n c = Counter(sys.stdin.readline().rstrip())\r\n print((c[\"g\"] - c[\"p\"]) // 2)\r\n\r\n class ABC047:\r\n @staticmethod\r\n def a():\r\n c = sorted(map(int, sys.stdin.readline().split()))\r\n print(\"Yes\" if c[0] + c[1] == c[2] else \"No\")\r\n\r\n @staticmethod\r\n def b():\r\n w, h, n, *xyf = map(int, sys.stdin.read().split())\r\n l, r, d, u = 0, w, 0, h\r\n for x, y, f in zip(*[iter(xyf)] * 3):\r\n if f == 1:\r\n l = max(l, x)\r\n if f == 2:\r\n r = min(r, x)\r\n if f == 3:\r\n d = max(d, y)\r\n if f == 4:\r\n u = min(u, y)\r\n print(max(0, r - l) * max(0, u - d))\r\n\r\n @staticmethod\r\n def c():\r\n s = sys.stdin.readline().rstrip()\r\n print(sum(s[i] != s[i + 1] for i in range(len(s) - 1)))\r\n\r\n @staticmethod\r\n def d():\r\n mn, mx, c = inf, -1, 0\r\n n, t, *a = map(int, sys.stdin.read().split())\r\n for p in a:\r\n if p - mn == mx:\r\n c += 1\r\n elif p - mn > mx:\r\n mx, c = p - mn, 1\r\n mn = min(mn, p)\r\n print(c)\r\n\r\n class ABC048:\r\n @staticmethod\r\n def a():\r\n def initial(s):\r\n return s[0].upper()\r\n\r\n print(\"\".join(map(initial, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n a, b, x = map(int, sys.stdin.readline().split())\r\n print(\r\n b // x - (a - 1) // x\r\n ) # if a=0, (a-1)/x is rounded down to -1.\r\n\r\n @staticmethod\r\n def c():\r\n n, x, *a = map(int, sys.stdin.read().split())\r\n cnt = prev = 0\r\n for i in range(n):\r\n d = prev + a[i] - x\r\n prev = a[i]\r\n if d <= 0:\r\n continue\r\n cnt += d\r\n prev -= d\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n s = sys.stdin.readline().rstrip()\r\n print(\"First\" if len(s) & 1 ^ (s[0] == s[-1]) else \"Second\")\r\n\r\n class ABC049:\r\n @staticmethod\r\n def a():\r\n vowels = set(\"aeiou\")\r\n print(\r\n \"vowel\"\r\n if sys.stdin.readline().rstrip() in vowels\r\n else \"consonant\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n h, w, *s = sys.stdin.read().split()\r\n for l in s:\r\n for _ in range(2):\r\n print(l)\r\n\r\n @staticmethod\r\n def c():\r\n t = set(\"dream, dreamer, erase, eraser\".split(\", \"))\r\n\r\n def obtainable(s):\r\n while True:\r\n for i in range(5, 8):\r\n if s[-i:] in t:\r\n s = s[:-i]\r\n if not s:\r\n return True\r\n break\r\n else:\r\n return False\r\n\r\n s = sys.stdin.readline().rstrip()\r\n print(\"YES\" if obtainable(s) else \"NO\")\r\n\r\n @staticmethod\r\n def d():\r\n n, k, l = map(int, sys.stdin.readline().split())\r\n uf1 = GeometryTopology.Graph(n)\r\n uf1.init_dsu()\r\n uf2 = GeometryTopology.Graph(n)\r\n uf2.init_dsu()\r\n\r\n def add_edges(uf, m):\r\n for _ in range(m):\r\n x, y = map(int, sys.stdin.readline().split())\r\n x -= 1\r\n y -= 1\r\n uf.unite(x, y)\r\n\r\n add_edges(uf1, k)\r\n add_edges(uf2, l)\r\n\r\n g = defaultdict(list)\r\n for i in range(n):\r\n g[(uf1.find(i), uf2.find(i))].append(i)\r\n\r\n res = [None] * n\r\n for a in g:\r\n for i in g[a]:\r\n res[i] = len(g[a])\r\n\r\n print(*res, sep=\" \")\r\n\r\n class ABC050:\r\n @staticmethod\r\n def a():\r\n print(eval(sys.stdin.readline().rstrip()))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n t = np.array(sys.stdin.readline().split(), dtype=np.int64)\r\n m, *px = map(int, sys.stdin.read().split())\r\n p, x = np.array(px).reshape(m, 2).T\r\n p -= 1\r\n print(*(t.sum() + x - t[p]), sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = Counter(a)\r\n if n & 1 and not (\r\n a[0] == 1 and all(a[i] == 2 for i in range(2, n, 2))\r\n ):\r\n print(0)\r\n return\r\n if ~n & 1 and any(a[i] != 2 for i in range(1, n, 2)):\r\n print(0)\r\n return\r\n print(pow(2, n // 2, MOD))\r\n\r\n @staticmethod\r\n def d():\r\n pass\r\n\r\n class ABC051:\r\n @staticmethod\r\n def a():\r\n print(\" \".join(sys.stdin.readline().rstrip().split(\",\")))\r\n\r\n @staticmethod\r\n def b():\r\n k, s = map(int, sys.stdin.readline().split())\r\n tot = 0\r\n for x in range(k + 1):\r\n if s - x < 0:\r\n break\r\n if s - x > 2 * k:\r\n continue\r\n tot += s - x + 1 if s - x <= k else 2 * k - (s - x) + 1\r\n print(tot)\r\n\r\n @staticmethod\r\n def c():\r\n x1, y1, x2, y2 = map(int, sys.stdin.readline().split())\r\n dx, dy = x2 - x1, y2 - y1\r\n print(\r\n \"U\" * dy\r\n + \"R\" * (dx + 1)\r\n + \"D\" * (dy + 1)\r\n + \"L\" * (dx + 1)\r\n + \"U\"\r\n + \"L\"\r\n + \"U\" * (dy + 1)\r\n + \"R\" * (dx + 1)\r\n + \"D\" * (dy + 1)\r\n + \"L\" * dx\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abc = map(int, sys.stdin.read().split())\r\n x = np.arange(n)\r\n a, b, c = np.array(abc).reshape(m, 3).T\r\n a -= 1\r\n b -= 1\r\n d = shortest_path(\r\n csr_matrix((c, (a, b)), shape=(n, n)),\r\n method=\"FW\",\r\n directed=False,\r\n ).astype(np.int64)\r\n print(\r\n m\r\n - np.any(\r\n d[x, a[:, None]] + c[:, None] == d[x, b[:, None]], axis=1\r\n ).sum()\r\n )\r\n\r\n class ABC052:\r\n @staticmethod\r\n def a():\r\n a, b, c, d = map(int, sys.stdin.readline().split())\r\n print(max(a * b, c * d))\r\n\r\n @staticmethod\r\n def b():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n a = [0] * (n + 1)\r\n for i in range(n):\r\n a[i + 1] = a[i] + (1 if s[i] == \"I\" else -1)\r\n print(max(a))\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n pn = NumberTheory.PrimeNumbers(n)\r\n s = 1\r\n for c in pn.factorize_factorial(n).values():\r\n s = s * (c + 1) % MOD\r\n print(s)\r\n\r\n @staticmethod\r\n def d():\r\n n, a, b, *x = map(int, sys.stdin.read().split())\r\n x = np.array(x)\r\n print(np.minimum((x[1:] - x[:-1]) * a, b).sum())\r\n\r\n class ABC053:\r\n @staticmethod\r\n def a():\r\n print(\r\n \"ABC\" if int(sys.stdin.readline().rstrip()) < 1200 else \"ARC\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(len(s) - s.find(\"A\") - s[::-1].find(\"Z\"))\r\n\r\n @staticmethod\r\n def c():\r\n x = int(sys.stdin.readline().rstrip())\r\n q, r = divmod(x, 11)\r\n print(2 * q + (r + 5) // 6)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n print(n - ((n - len(set(a)) + 1) // 2 * 2))\r\n\r\n class ABC054:\r\n @staticmethod\r\n def a():\r\n def f(x):\r\n return (x + 11) % 13\r\n\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(\"Alice\" if f(a) > f(b) else \"Bob\" if f(a) < f(b) else \"Draw\")\r\n\r\n @staticmethod\r\n def b():\r\n n, m = map(int, sys.stdin.readline().split())\r\n a = [sys.stdin.readline().rstrip() for _ in range(n)]\r\n b = [sys.stdin.readline().rstrip() for _ in range(m)]\r\n\r\n for i in range(n - m + 1):\r\n for j in range(n - m + 1):\r\n for y in range(m):\r\n for x in range(m):\r\n if a[i + y][j + x] == b[y][x]:\r\n continue\r\n break\r\n else:\r\n continue\r\n break\r\n else:\r\n print(\"Yes\")\r\n return\r\n print(\"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *ab = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n g.add_edge(a, b)\r\n g.add_edge(b, a)\r\n\r\n cnt = 0\r\n stack = [(0, 1)]\r\n while stack:\r\n u, s = stack.pop()\r\n if s == (1 << n) - 1:\r\n cnt += 1\r\n continue\r\n for v in g.edges[u]:\r\n if s >> v & 1:\r\n continue\r\n stack.append((v, s | 1 << v))\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, ma, mb, *abc = map(int, sys.stdin.read().split())\r\n dp = np.full((401, 401), np.inf)\r\n dp[0, 0] = 0\r\n for a, b, c in zip(*[iter(abc)] * 3):\r\n np.minimum(dp[a:, b:], dp[:-a, :-b] + c, out=dp[a:, b:])\r\n i = np.arange(1, 400 // max(ma, mb) + 1)\r\n res = dp[i * ma, i * mb].min()\r\n print(int(res) if res != np.inf else -1)\r\n\r\n class ABC055:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(800 * n - 200 * (n // 15))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n fac, _ = Algebra.generate_fac_ifac(n, MOD)\r\n print(fac[-1])\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n print(m // 2 if m <= 2 * n else n + (m - 2 * n) // 4)\r\n\r\n @staticmethod\r\n def d():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n s = [1 if c == \"o\" else 0 for c in s]\r\n\r\n def possible(t):\r\n for i in range(1, n - 1):\r\n t[i + 1] = t[i - 1] ^ t[i] ^ s[i]\r\n return (\r\n (t[0] ^ s[0] ^ t[1] ^ t[-1])\r\n | (t[-1] ^ s[-1] ^ t[-2] ^ t[0])\r\n ) ^ 1\r\n\r\n for fst in [(1, 0), (0, 1), (1, 1), (0, 0)]:\r\n t = [None] * n\r\n t[0], t[1] = fst[0], fst[1]\r\n if possible(t):\r\n print(\"\".join(\"S\" if x == 1 else \"W\" for x in t))\r\n return\r\n print(-1)\r\n\r\n class ABC056:\r\n @staticmethod\r\n def a():\r\n def to_i(c):\r\n return 1 if c == \"H\" else 0\r\n\r\n a, b = map(to_i, sys.stdin.readline().split())\r\n print(\"D\" if a ^ b else \"H\")\r\n\r\n @staticmethod\r\n def b():\r\n w, a, b = map(int, sys.stdin.readline().split())\r\n if a > b:\r\n a, b = b, a\r\n print(max(b - (a + w), 0))\r\n\r\n @staticmethod\r\n def c():\r\n x = int(sys.stdin.readline().rstrip())\r\n print(int(math.ceil(math.sqrt(2 * x + 1 / 4) - 0.5)))\r\n\r\n @staticmethod\r\n def d():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = sorted(min(x, k) for x in a)\r\n\r\n def necessary(i):\r\n dp = np.zeros(k, dtype=np.bool)\r\n dp[0] = True\r\n for j in range(n):\r\n if j == i:\r\n continue\r\n dp[a[j] :] += dp[: -a[j]]\r\n return np.any(dp[k - a[i] :])\r\n\r\n def binary_search():\r\n lo, hi = -1, n\r\n while hi - lo > 1:\r\n i = (lo + hi) // 2\r\n if necessary(i):\r\n hi = i\r\n else:\r\n lo = i\r\n return hi\r\n\r\n print(binary_search())\r\n\r\n class ABC057:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print((a + b) % 24)\r\n\r\n @staticmethod\r\n def b():\r\n n, m, *I = map(int, sys.stdin.read().split())\r\n I = np.array(I).reshape(-1, 2)\r\n ab, cd = I[:n], I[n:]\r\n print(\r\n *(\r\n np.argmin(\r\n np.absolute(ab[:, None] - cd).sum(axis=-1), axis=-1\r\n )\r\n + 1\r\n ),\r\n sep=\"\\n\",\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n divs = NumberTheory.find_divisors(n)\r\n print(len(str(divs[bi_l(divs, math.sqrt(n))])))\r\n\r\n @staticmethod\r\n def d():\r\n c = Combinatorics.choose\r\n n, a, b, *v = map(int, sys.stdin.read().split())\r\n v.sort()\r\n print(sum(v[-a:]) / a)\r\n l, r = bi_l(v, v[-a]), bi_r(v, v[-a])\r\n print(\r\n sum(\r\n c(r - l, i)\r\n for i in range(r - n + a, r - max(l, n - b) + 1)\r\n )\r\n if r == n\r\n else c(r - l, r - n + a)\r\n )\r\n\r\n class ABC058:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(\"YES\" if c - b == b - a else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n a = \"\"\r\n for i in range(len(t)):\r\n a += s[i] + t[i]\r\n if len(s) > len(t):\r\n a += s[-1]\r\n print(a)\r\n\r\n @staticmethod\r\n def c():\r\n n, *s = sys.stdin.read().split()\r\n res = {c: 100 for c in string.ascii_lowercase}\r\n for counter in map(Counter, s):\r\n for (\r\n c,\r\n x,\r\n ) in res.items():\r\n res[c] = min(x, counter[c])\r\n t = \"\"\r\n for c, x in sorted(res.items()):\r\n t += c * x\r\n print(t)\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy[:n]), np.array(xy[n:])\r\n print(\r\n (x * (np.arange(n) + 1) - np.cumsum(x)).sum()\r\n % MOD\r\n * ((y * (np.arange(m) + 1) - np.cumsum(y)).sum() % MOD)\r\n % MOD\r\n )\r\n\r\n class ABC059:\r\n @staticmethod\r\n def a():\r\n def initial(s):\r\n return s[0].upper()\r\n\r\n print(\"\".join(map(initial, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n a, b = sys.stdin.read().split()\r\n la, lb = len(a), len(b)\r\n print(\r\n \"GREATER\"\r\n if la > lb\r\n else \"LESS\"\r\n if la < lb\r\n else \"GREATER\"\r\n if a > b\r\n else \"LESS\"\r\n if a < b\r\n else \"EQUAL\"\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n c = s = 0\r\n for i in range(n):\r\n s += a[i]\r\n if i & 1 and s >= 0:\r\n c += s + 1\r\n s = -1\r\n elif i & 1 ^ 1 and s <= 0:\r\n c += 1 - s\r\n s = 1\r\n c1 = c\r\n c = s = 0\r\n for i in range(n):\r\n s += a[i]\r\n if i & 1 and s <= 0:\r\n c += 1 - s\r\n s = 1\r\n elif i & 1 ^ 1 and s >= 0:\r\n c += s + 1\r\n s = -1\r\n c2 = c\r\n print(min(c1, c2))\r\n\r\n @staticmethod\r\n def d():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Brown\" if abs(x - y) <= 1 else \"Alice\")\r\n\r\n class ABC060:\r\n @staticmethod\r\n def a():\r\n a, b, c = sys.stdin.readline().split()\r\n print(\"YES\" if a[-1] == b[0] and b[-1] == c[0] else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(\"NO\" if c % NumberTheory.gcd(a, b) else \"YES\")\r\n\r\n @staticmethod\r\n def c():\r\n n, t, *a = map(int, sys.stdin.read().split())\r\n print(sum(min(a[i + 1] - a[i], t) for i in range(n - 1)) + t)\r\n\r\n @staticmethod\r\n def d():\r\n pass\r\n\r\n class ABC061:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(\"Yes\" if a <= c <= b else \"No\")\r\n\r\n @staticmethod\r\n def b():\r\n n, m, *ab = map(int, sys.stdin.read().split())\r\n ab = np.array(ab) - 1\r\n g = np.zeros(n, dtype=np.int32)\r\n np.add.at(g, ab, 1)\r\n print(*g, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *ab = map(int, sys.stdin.read().split())\r\n ab = np.transpose(np.array(ab).reshape(n, 2))\r\n a, b = ab[:, np.argsort(ab[0])]\r\n print(a[np.cumsum(b) >= k][0])\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abc = map(int, sys.stdin.read().split())\r\n a, b, c = np.array(abc).reshape(m, 3).T\r\n a -= 1\r\n b -= 1\r\n c *= -1\r\n g = csr_matrix(\r\n ([1] * (m + 1), (np.append(a, n - 1), np.append(b, 0))), (n, n)\r\n )\r\n _, labels = connected_components(g, connection=\"strong\")\r\n bl = (labels[a] == labels[0]) & (labels[b] == labels[0])\r\n g = csr_matrix((c[bl], (a[bl], b[bl])), (n, n))\r\n try:\r\n print(\r\n -shortest_path(g, method=\"BF\", directed=True, indices=0)[\r\n -1\r\n ].astype(int)\r\n )\r\n except:\r\n print(\"inf\")\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m, *abc = map(int, sys.stdin.read().split())\r\n a, b, c = np.array(abc).reshape(m, 3).T\r\n a -= 1\r\n b -= 1\r\n c *= -1\r\n d = np.full(n, np.inf)\r\n d[0] = 0\r\n for _ in range(n - 1):\r\n np.minimum.at(d, b, d[a] + c)\r\n neg_cycle = np.zeros(n, dtype=np.bool)\r\n for _ in range(n):\r\n np.logical_or.at(neg_cycle, b, d[a] + c < d[b])\r\n np.minimum.at(d, b, d[a] + c)\r\n print(inf if neg_cycle[-1] else -d[-1].astype(int))\r\n\r\n class ABC062:\r\n @staticmethod\r\n def a():\r\n g = [0, 2, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0]\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Yes\" if g[x - 1] == g[y - 1] else \"No\")\r\n\r\n @staticmethod\r\n def b():\r\n h, w = map(int, sys.stdin.readline().split())\r\n a = np.array(\r\n [list(s) for s in sys.stdin.read().split()], dtype=\"U1\"\r\n )\r\n a = np.pad(a, pad_width=1, constant_values=\"#\")\r\n for s in a:\r\n print(\"\".join(s))\r\n\r\n @staticmethod\r\n def c():\r\n h, w = map(int, sys.stdin.readline().split())\r\n if h * w % 3 == 0:\r\n print(0)\r\n return\r\n\r\n def minimize(h, w):\r\n return min(\r\n h,\r\n *(\r\n s[-1] - s[0]\r\n for x in range(w // 3, w // 3 + 2)\r\n for s in (\r\n sorted(\r\n [\r\n h * x,\r\n h // 2 * (w - x),\r\n (h + 1) // 2 * (w - x),\r\n ]\r\n ),\r\n )\r\n ),\r\n )\r\n\r\n print(min(minimize(h, w), minimize(w, h)))\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n\r\n def optimize(a):\r\n a = list(a)\r\n l, r = a[:n], a[n:]\r\n heapify(l)\r\n s = [None] * (n + 1)\r\n s[0] = sum(l)\r\n for i in range(n):\r\n x = heappop(l)\r\n heappush(l, max(x, r[i]))\r\n s[i + 1] = s[i] + max(0, r[i] - x)\r\n return np.array(s)\r\n\r\n print(\r\n (\r\n optimize(a[: 2 * n]) + optimize(-a[-1 : n - 1 : -1])[::-1]\r\n ).max()\r\n )\r\n\r\n class ABC063:\r\n @staticmethod\r\n def a():\r\n a = sum(map(int, sys.stdin.readline().split()))\r\n print(\"error\" if a >= 10 else a)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(\"yes\" if len(set(s)) == len(s) else \"no\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n s = a.sum()\r\n if s % 10:\r\n print(s)\r\n elif not np.count_nonzero(a % 10):\r\n print(0)\r\n else:\r\n print(s - a[a % 10 != 0].min())\r\n\r\n @staticmethod\r\n def d():\r\n n, a, b, *h = map(int, sys.stdin.read().split())\r\n h = np.array(h)\r\n d = a - b\r\n\r\n def possible(c):\r\n hh = h.copy()\r\n np.maximum(hh - b * c, 0, out=hh)\r\n return ((hh + d - 1) // d).sum() <= c\r\n\r\n def binary_search():\r\n lo, hi = 0, 10**9\r\n while hi - lo > 1:\r\n c = (lo + hi) // 2\r\n if possible(c):\r\n hi = c\r\n else:\r\n lo = c\r\n return hi\r\n\r\n print(binary_search())\r\n\r\n class ABC064:\r\n @staticmethod\r\n def a():\r\n r, g, b = map(int, sys.stdin.readline().split())\r\n print(\"NO\" if (10 * g + b) % 4 else \"YES\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a.sort()\r\n print(a[-1] - a[0])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.bincount(np.minimum(np.array(a) // 400, 8), minlength=9)\r\n mx = np.count_nonzero(a[:-1]) + a[-1]\r\n mn = max(mx - a[-1], 1)\r\n print(mn, mx)\r\n\r\n @staticmethod\r\n def d():\r\n n, s = sys.stdin.read().split()\r\n l = r = 0\r\n for c in s:\r\n if c == \"(\":\r\n r += 1\r\n else:\r\n if r == 0:\r\n l += 1\r\n else:\r\n r -= 1\r\n print(\"(\" * l + s + \")\" * r)\r\n\r\n class ABC065:\r\n @staticmethod\r\n def a():\r\n x, a, b = map(int, sys.stdin.readline().split())\r\n y = -a + b\r\n print(\"delicious\" if y <= 0 else \"safe\" if y <= x else \"dangerous\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = [int(x) - 1 for x in sys.stdin.read().split()]\r\n i = 0\r\n for c in range(n):\r\n i = a[i]\r\n if i == 1:\r\n print(c + 1)\r\n return\r\n print(-1)\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n d = abs(n - m)\r\n if d >= 2:\r\n print(0)\r\n return\r\n fac, _ = Algebra.generate_fac_ifac(10**5)\r\n print(fac[n] * fac[m] * (1 if d else 2) % MOD)\r\n\r\n @staticmethod\r\n def d():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(n, 2).T\r\n i = np.argsort(x)\r\n ax, bx, cx = (\r\n i[:-1],\r\n i[1:],\r\n x[\r\n i[1:],\r\n ]\r\n - x[i[:-1]],\r\n )\r\n i = np.argsort(y)\r\n ay, by, cy = (\r\n i[:-1],\r\n i[1:],\r\n y[\r\n i[1:],\r\n ]\r\n - y[i[:-1]],\r\n )\r\n e = np.vstack(\r\n [np.hstack([ax, ay]), np.hstack([bx, by]), np.hstack([cx, cy])]\r\n )\r\n e = e[:, np.argsort(e[-1])]\r\n _, i = np.unique(e[:-1], return_index=True, axis=1)\r\n a, b, c = e[:, i]\r\n print(\r\n minimum_spanning_tree(csr_matrix((c, (a, b)), (n, n)))\r\n .astype(np.int64)\r\n .sum()\r\n )\r\n\r\n @staticmethod\r\n def d_2():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n x, y = xy[::2], xy[1::2]\r\n g = GeometryTopology.Graph(n)\r\n\r\n def make(a):\r\n b = sorted(enumerate(a), key=lambda x: x[1])\r\n for i in range(n - 1):\r\n u, v, w = b[i][0], b[i + 1][0], b[i + 1][1] - b[i][1]\r\n for u, v in [(v, u), (u, v)]:\r\n if not v in g.edges[u]:\r\n g.add_edge(u, v, weight=w)\r\n else:\r\n g.edges[u][v].weight = min(g.edges[u][v].weight, w)\r\n\r\n make(x)\r\n make(y)\r\n _, d = g.kruskal()\r\n # _, d = g.prim()\r\n # _, d = g.boruvka()\r\n print(d)\r\n\r\n class ABC066:\r\n @staticmethod\r\n def a():\r\n print(sum(sorted(map(int, sys.stdin.readline().split()))[:-1]))\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n\r\n def f(s):\r\n n = len(s) // 2\r\n return s[:n] == s[n:]\r\n\r\n for i in range(len(s) - 2, 0, -2):\r\n if f(s[:i]):\r\n print(i)\r\n return\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n b = deque()\r\n for i in range(n):\r\n if i & 1:\r\n b.appendleft(a[i])\r\n else:\r\n b.append(a[i])\r\n if n & 1:\r\n b.reverse()\r\n print(*b)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n tmp = [None] * (n + 1)\r\n for i in range(n + 1):\r\n if tmp[a[i]] is not None:\r\n d = tmp[a[i]] + n - i\r\n break\r\n tmp[a[i]] = i\r\n k = np.arange(1, n + 2)\r\n c = Combinatorics.CombinationsMod(n + 1, MOD)\r\n print(*((c(n + 1, k) - c(d, k - 1)) % MOD), sep=\"\\n\")\r\n\r\n class ABC067:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n g.add_edge(a, b)\r\n g.add_edge(b, a)\r\n d1, d2 = g.bfs(0), g.bfs(n - 1)\r\n print(\r\n \"Fennec\"\r\n if sum(d1[i] <= d2[i] for i in range(n)) > n // 2\r\n else \"Snuke\"\r\n )\r\n\r\n class ABC068:\r\n @staticmethod\r\n def d():\r\n k = int(sys.stdin.readline().rstrip())\r\n q, r = divmod(k, 50)\r\n n = 50\r\n a = np.arange(n) + q\r\n a[-r:] += 1\r\n print(n)\r\n print(*a)\r\n\r\n class ABC069:\r\n pass\r\n\r\n class ABC070:\r\n pass\r\n\r\n class ABC071:\r\n pass\r\n\r\n class ABC072:\r\n pass\r\n\r\n class ABC073:\r\n pass\r\n\r\n class ABC074:\r\n pass\r\n\r\n class ABC075:\r\n pass\r\n\r\n class ABC076:\r\n pass\r\n\r\n class ABC077:\r\n pass\r\n\r\n class ABC078:\r\n pass\r\n\r\n class ABC079:\r\n pass\r\n\r\n class ABC080:\r\n pass\r\n\r\n class ABC081:\r\n pass\r\n\r\n class ABC082:\r\n pass\r\n\r\n class ABC083:\r\n pass\r\n\r\n class ABC084:\r\n pass\r\n\r\n class ABC085:\r\n pass\r\n\r\n class ABC086:\r\n pass\r\n\r\n class ABC087:\r\n pass\r\n\r\n class ABC088:\r\n pass\r\n\r\n class ABC089:\r\n pass\r\n\r\n class ABC090:\r\n pass\r\n\r\n class ABC091:\r\n pass\r\n\r\n class ABC092:\r\n pass\r\n\r\n class ABC093:\r\n pass\r\n\r\n class ABC094:\r\n pass\r\n\r\n class ABC095:\r\n pass\r\n\r\n class ABC096:\r\n pass\r\n\r\n class ABC097:\r\n pass\r\n\r\n class ABC098:\r\n pass\r\n\r\n class ABC099:\r\n pass\r\n\r\n class ABC100:\r\n pass\r\n\r\n class ABC101:\r\n pass\r\n\r\n class ABC102:\r\n pass\r\n\r\n class ABC103:\r\n pass\r\n\r\n class ABC104:\r\n pass\r\n\r\n class ABC105:\r\n pass\r\n\r\n class ABC106:\r\n pass\r\n\r\n class ABC107:\r\n pass\r\n\r\n class ABC108:\r\n pass\r\n\r\n class ABC109:\r\n pass\r\n\r\n class ABC110:\r\n pass\r\n\r\n class ABC111:\r\n pass\r\n\r\n class ABC112:\r\n pass\r\n\r\n class ABC113:\r\n pass\r\n\r\n class ABC114:\r\n pass\r\n\r\n class ABC115:\r\n pass\r\n\r\n class ABC116:\r\n pass\r\n\r\n class ABC117:\r\n pass\r\n\r\n class ABC118:\r\n pass\r\n\r\n class ABC119:\r\n pass\r\n\r\n class ABC120:\r\n pass\r\n\r\n class ABC121:\r\n pass\r\n\r\n class ABC122:\r\n pass\r\n\r\n class ABC123:\r\n pass\r\n\r\n class ABC124:\r\n pass\r\n\r\n class ABC125:\r\n pass\r\n\r\n class ABC126:\r\n pass\r\n\r\n class ABC127:\r\n pass\r\n\r\n class ABC128:\r\n pass\r\n\r\n class ABC129:\r\n pass\r\n\r\n class ABC130:\r\n pass\r\n\r\n class ABC131:\r\n pass\r\n\r\n class ABC132:\r\n pass\r\n\r\n class ABC133:\r\n pass\r\n\r\n class ABC134:\r\n pass\r\n\r\n class ABC135:\r\n pass\r\n\r\n class ABC136:\r\n pass\r\n\r\n class ABC137:\r\n pass\r\n\r\n class ABC138:\r\n pass\r\n\r\n class ABC139:\r\n pass\r\n\r\n class ABC140:\r\n pass\r\n\r\n class ABC141:\r\n pass\r\n\r\n class ABC142:\r\n pass\r\n\r\n class ABC143:\r\n pass\r\n\r\n class ABC144:\r\n pass\r\n\r\n class ABC145:\r\n pass\r\n\r\n class ABC146:\r\n pass\r\n\r\n class ABC147:\r\n pass\r\n\r\n class ABC148:\r\n pass\r\n\r\n class ABC149:\r\n pass\r\n\r\n class ABC150:\r\n pass\r\n\r\n class ABC151:\r\n pass\r\n\r\n class ABC152:\r\n pass\r\n\r\n class ABC153:\r\n pass\r\n\r\n class ABC154:\r\n pass\r\n\r\n class ABC155:\r\n pass\r\n\r\n class ABC156:\r\n pass\r\n\r\n class ABC157:\r\n pass\r\n\r\n class ABC158:\r\n pass\r\n\r\n class ABC159:\r\n pass\r\n\r\n class ABC160:\r\n pass\r\n\r\n class ABC161:\r\n pass\r\n\r\n class ABC162:\r\n pass\r\n\r\n class ABC163:\r\n pass\r\n\r\n class ABC164:\r\n pass\r\n\r\n class ABC165:\r\n pass\r\n\r\n class ABC166:\r\n pass\r\n\r\n class ABC167:\r\n pass\r\n\r\n class ABC168:\r\n pass\r\n\r\n class ABC169:\r\n pass\r\n\r\n class ABC170:\r\n @staticmethod\r\n def a():\r\n x = [int(x) for x in sys.stdin.readline().split()]\r\n for i in range(5):\r\n if x[i] != i + 1:\r\n print(i + 1)\r\n break\r\n\r\n @staticmethod\r\n def b():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Yes\" if 2 * x <= y <= 4 * x and y % 2 == 0 else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n x, n, *p = map(int, sys.stdin.read().split())\r\n a = list(set(range(102)) - set(p))\r\n a = [(abs(y - x), y) for y in a]\r\n print(sorted(a)[0][1])\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n cand = set(a)\r\n cnt = 0\r\n for x, c in sorted(Counter(a).items()):\r\n cnt += c == 1 and x in cand\r\n cand -= set(range(x * 2, 10**6 + 1, x))\r\n print(cnt)\r\n\r\n @staticmethod\r\n def e():\r\n n, q = map(int, sys.stdin.readline().split())\r\n queue = []\r\n m = 2 * 10**5\r\n infants = [[] for _ in range(m)]\r\n highest_rate = [None] * m\r\n where = [None] * n\r\n rate = [None] * n\r\n\r\n def entry(i, k):\r\n where[i] = k\r\n while infants[k]:\r\n r, j = heappop(infants[k])\r\n if where[j] != k or j == i:\r\n continue\r\n if rate[i] >= -r:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (r, j))\r\n break\r\n else:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (-rate[i], i))\r\n\r\n def transfer(i, k):\r\n now = where[i]\r\n while infants[now]:\r\n r, j = heappop(infants[now])\r\n if where[j] != now or j == i:\r\n continue\r\n if highest_rate[now] != -r:\r\n highest_rate[now] = -r\r\n heappush(queue, (-r, now, j))\r\n heappush(infants[now], (r, j))\r\n break\r\n else:\r\n highest_rate[now] = None\r\n entry(i, k)\r\n\r\n def inquire():\r\n while True:\r\n r, k, i = heappop(queue)\r\n if where[i] != k or r != highest_rate[k]:\r\n continue\r\n heappush(queue, (r, k, i))\r\n return r\r\n\r\n for i in range(n):\r\n a, b = map(int, sys.stdin.readline().split())\r\n rate[i] = a\r\n entry(i, b - 1)\r\n for _ in range(q):\r\n c, d = map(int, sys.stdin.readline().split())\r\n transfer(c - 1, d - 1)\r\n print(inquire())\r\n\r\n class ABC171:\r\n @staticmethod\r\n def a():\r\n c = sys.stdin.readline().rstrip()\r\n print(\"A\" if c < \"a\" else \"a\")\r\n\r\n @staticmethod\r\n def b():\r\n n, k, *p = map(int, sys.stdin.read().split())\r\n print(sum(sorted(p)[:k]))\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n n -= 1\r\n l = 1\r\n while True:\r\n if n < pow(26, l):\r\n break\r\n n -= pow(26, l)\r\n l += 1\r\n res = \"\".join(\r\n [chr(ord(\"a\") + d) for d in NumberTheory.base_convert(n, 26)][\r\n ::-1\r\n ]\r\n )\r\n res = \"a\" * (l - len(res)) + res\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n s = sum(a)\r\n cnt = Counter(a)\r\n q = int(sys.stdin.readline().rstrip())\r\n for _ in range(q):\r\n b, c = map(int, sys.stdin.readline().split())\r\n s += (c - b) * cnt[b]\r\n print(s)\r\n cnt[c] += cnt[b]\r\n cnt[b] = 0\r\n\r\n @staticmethod\r\n def e():\r\n n, *a = map(int, sys.stdin.read().split())\r\n s = 0\r\n for x in a:\r\n s ^= x\r\n b = map(lambda x: x ^ s, a)\r\n print(*b, sep=\" \")\r\n\r\n class ABC172:\r\n @staticmethod\r\n def a():\r\n a = int(sys.stdin.readline().rstrip())\r\n print(a * (1 + a + a**2))\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n print(sum(s[i] != t[i] for i in range(len(s))))\r\n\r\n @staticmethod\r\n def c():\r\n n, m, k = map(int, sys.stdin.readline().split())\r\n a = [0] + [int(x) for x in sys.stdin.readline().split()]\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n (*sa,) = itertools.accumulate(a)\r\n (*sb,) = itertools.accumulate(b)\r\n res = 0\r\n for i in range(n + 1):\r\n r = k - sa[i]\r\n if r < 0:\r\n break\r\n res = max(res, i + bi_r(sb, r))\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n f = np.zeros(n + 1, dtype=np.int64)\r\n for i in range(1, n + 1):\r\n f[i::i] += 1\r\n print((np.arange(1, n + 1) * f[1:]).sum())\r\n\r\n class ABC173:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n charge = (n + 999) // 1000 * 1000 - n\r\n print(charge)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n c = Counter(s)\r\n for v in \"AC, WA, TLE, RE\".split(\", \"):\r\n print(f\"{v} x {c[v]}\")\r\n\r\n @staticmethod\r\n def c():\r\n h, w, k = map(int, sys.stdin.readline().split())\r\n c = [sys.stdin.readline().rstrip() for _ in range(h)]\r\n tot = 0\r\n for i in range(1 << h):\r\n for j in range(1 << w):\r\n cnt = 0\r\n for y in range(h):\r\n for x in range(w):\r\n if i >> y & 1 or j >> x & 1:\r\n continue\r\n cnt += c[y][x] == \"#\"\r\n tot += cnt == k\r\n print(tot)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a.sort(reverse=True)\r\n res = (\r\n a[0]\r\n + sum(a[1 : 1 + (n - 2) // 2]) * 2\r\n + a[1 + (n - 2) // 2] * (n & 1)\r\n )\r\n print(res)\r\n\r\n @staticmethod\r\n def e():\r\n MOD = 10**9 + 7\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n minus = [x for x in a if x < 0]\r\n plus = [x for x in a if x > 0]\r\n if len(plus) + len(minus) // 2 * 2 >= k: # plus\r\n (*minus,) = map(abs, minus)\r\n minus.sort(reverse=True)\r\n plus.sort(reverse=True)\r\n cand = []\r\n if len(minus) & 1:\r\n minus = minus[:-1]\r\n for i in range(0, len(minus) - 1, 2):\r\n cand.append(minus[i] * minus[i + 1] % MOD)\r\n if k & 1:\r\n res = plus[0]\r\n plus = plus[1:]\r\n else:\r\n res = 1\r\n if len(plus) & 1:\r\n plus = plus[:-1]\r\n for i in range(0, len(plus) - 1, 2):\r\n cand.append(plus[i] * plus[i + 1] % MOD)\r\n cand.sort(reverse=True)\r\n for x in cand[: k // 2]:\r\n res *= x\r\n res %= MOD\r\n print(res)\r\n elif 0 in a:\r\n print(0)\r\n else:\r\n cand = sorted(map(abs, a))\r\n res = 1\r\n for i in range(k):\r\n res *= cand[i]\r\n res %= MOD\r\n res = MOD - res\r\n print(res)\r\n pass\r\n\r\n class ABC174:\r\n @staticmethod\r\n def a():\r\n print(\"Yes\" if int(sys.stdin.readline().rstrip()) >= 30 else \"No\")\r\n\r\n class ABC178:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n s = int(sys.stdin.readline().rstrip())\r\n if s == 0:\r\n print(1)\r\n return\r\n elif s == 1:\r\n print(0)\r\n return\r\n c = np.eye(3, k=-1, dtype=np.int64)\r\n c[0, 0] = c[0, 2] = 1\r\n a = np.array([0, 0, 1])\r\n print(Algebra.dot(Algebra.matrix_pow(c, s - 2), a)[0])\r\n\r\n class ABC179:\r\n @staticmethod\r\n def a():\r\n s = sys.stdin.readline().rstrip()\r\n print(s + \"s\" if s[-1] != \"s\" else s + \"es\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *d = map(int, sys.stdin.read().split())\r\n d = np.array(d).reshape(n, 2).T\r\n d = np.equal(d[0], d[1]).astype(int)\r\n dd = d.copy()\r\n dd[1:] += d[:-1]\r\n dd[:-1] += d[1:]\r\n print(\"Yes\" if (dd >= 3).any() else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = (n // np.arange(1, n + 1)).sum() - len(\r\n NumberTheory.find_divisors(n)\r\n )\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n mod = 998244353\r\n n, k, *lr = map(int, sys.stdin.read().split())\r\n l, r = np.array(lr).reshape(k, -1).T\r\n\r\n @njit((i8, i8[:], i8[:]), cache=True)\r\n def solve(n, l, r):\r\n res = np.zeros(n * 2, dtype=np.int64)\r\n res[0], res[1] = 1, -1\r\n for i in range(n - 1):\r\n res[i + 1] = (res[i + 1] + res[i]) % mod\r\n res[i + l] = (res[i + l] + res[i]) % mod\r\n res[i + r + 1] = (res[i + r + 1] - res[i]) % mod\r\n print(res[n - 1])\r\n\r\n solve(n, l, r)\r\n\r\n @staticmethod\r\n def e():\r\n n, x, m = map(int, sys.stdin.readline().split())\r\n res = [-1 for _ in range(m)]\r\n s = 0\r\n loop = np.zeros(m, dtype=np.int64)\r\n for i in range(m + 1):\r\n if i == n:\r\n print(s)\r\n return\r\n if res[x] != -1:\r\n l, loop = i - res[x], loop[res[x] : i]\r\n q, r = divmod(n - i, l)\r\n print(s + q * loop.sum() + loop[:r].sum())\r\n return\r\n res[x], loop[i] = i, x\r\n s += x\r\n x = x**2 % m\r\n\r\n class ABC180:\r\n @staticmethod\r\n def a():\r\n n, a, b = map(int, sys.stdin.readline().split())\r\n print(n - a + b)\r\n\r\n @staticmethod\r\n def b():\r\n n, *x = map(int, sys.stdin.read().split())\r\n x = np.absolute(np.array(x))\r\n print(x.sum())\r\n print(np.sqrt((x**2).sum()))\r\n print(x.max())\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n div = NumberTheory.find_divisors(n)\r\n print(*div, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n x, y, a, b = map(int, sys.stdin.readline().split())\r\n cnt = 0\r\n while x * a <= x + b:\r\n x *= a\r\n if x >= y:\r\n print(cnt)\r\n return\r\n cnt += 1\r\n cnt += (y - x - 1) // b\r\n print(cnt)\r\n\r\n @staticmethod\r\n def e():\r\n n, *xyz = map(int, sys.stdin.read().split())\r\n\r\n xyz = list(zip(*[iter(xyz)] * 3))\r\n dist = [[0] * n for _ in range(n)]\r\n for i in range(n):\r\n a, b, c = xyz[i]\r\n for j in range(n):\r\n p, q, r = xyz[j]\r\n dist[i][j] = abs(p - a) + abs(q - b) + max(0, r - c)\r\n\r\n dp = [[inf] * n for _ in range(1 << n)]\r\n dp[0][0] = 0\r\n for s in range(1 << n):\r\n for i in range(n):\r\n t = s | (1 << i)\r\n for j in range(n):\r\n dp[t][i] = min(dp[t][i], dp[s][j] + dist[j][i])\r\n print(dp[-1][0])\r\n\r\n @staticmethod\r\n def f(): # rewrite with jit compiling later.\r\n n, m, l = map(int, sys.stdin.readline().split())\r\n c = Combinatorics.CombinationsMod(n, MOD)\r\n path = np.zeros(n + 1, dtype=np.int64)\r\n path[1] = path[2] = 1\r\n for i in range(3, n + 1):\r\n path[i] = path[i - 1] * i % MOD\r\n cycle = np.zeros(n + 1, dtype=np.int64)\r\n cycle[1:] = path[:-1]\r\n dp = np.zeros((n + 1, m + 1), dtype=np.int64)\r\n\r\n def f(l):\r\n dp[:, :] = 0\r\n dp[0, 0] = 1\r\n for i in range(n):\r\n for j in range(m + 1):\r\n k = np.arange(1, min(l, n - i, m - j + 1) + 1)\r\n dp[i + k, j + k - 1] += (\r\n dp[i, j]\r\n * c(n - i - 1, k - 1)\r\n % MOD\r\n * path[k]\r\n % MOD\r\n )\r\n dp[i + k, j + k - 1] %= MOD\r\n k = np.arange(2, min(l, n - i, m - j) + 1)\r\n dp[i + k, j + k] += (\r\n dp[i, j]\r\n * c(n - i - 1, k - 1)\r\n % MOD\r\n * cycle[k]\r\n % MOD\r\n )\r\n dp[i + k, j + k] %= MOD\r\n return dp[n, m]\r\n\r\n print((f(l) - f(l - 1)) % MOD)\r\n\r\n @staticmethod\r\n def f_2(): # PyPy\r\n n, m, l = map(int, sys.stdin.readline().split())\r\n c = Combinatorics.CombinationsMod(n, MOD)\r\n path = [0] * (n + 1)\r\n path[1] = path[2] = 1\r\n for i in range(3, n + 1):\r\n path[i] = path[i - 1] * i % MOD\r\n cycle = [0] + path[:-1]\r\n\r\n def f(l):\r\n dp = [[0] * (m + 1) for _ in range(n + 1)]\r\n dp[0][0] = 1\r\n for i in range(n):\r\n for j in range(m + 1):\r\n for k in range(1, min(l, n - i, m - j + 1) + 1):\r\n dp[i + k][j + k - 1] += (\r\n dp[i][j]\r\n * c(n - i - 1, k - 1)\r\n % MOD\r\n * path[k]\r\n % MOD\r\n )\r\n dp[i + k][j + k - 1] %= MOD\r\n for k in range(1, min(l, n - i, m - j) + 1):\r\n dp[i + k][j + k] += (\r\n dp[i][j]\r\n * c(n - i - 1, k - 1)\r\n % MOD\r\n * cycle[k]\r\n % MOD\r\n )\r\n dp[i + k][j + k] %= MOD\r\n\r\n return dp[n][m]\r\n\r\n print((f(l) - f(l - 1)) % MOD)\r\n\r\n class ACL001:\r\n @staticmethod\r\n def a():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*[iter(xy)] * 2)\r\n print(xy)\r\n pass\r\n\r\n class TDPC:\r\n @staticmethod\r\n def t():\r\n pass\r\n\r\n class MSolutions2020:\r\n @staticmethod\r\n def a():\r\n x = int(sys.stdin.readline().rstrip())\r\n x -= 400\r\n print(8 - x // 200)\r\n\r\n @staticmethod\r\n def b():\r\n r, g, b, k = map(int, sys.stdin.read().split())\r\n while k and g <= r:\r\n g *= 2\r\n k -= 1\r\n while k and b <= g:\r\n b *= 2\r\n k -= 1\r\n print(\"Yes\" if r < g < b else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n for i in range(k, n):\r\n print(\"Yes\" if a[i] > a[i - k] else \"No\")\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n m = 1000\r\n s = 0\r\n for i in range(n):\r\n if a[i + 1] == a[i]:\r\n continue\r\n elif a[i + 1] > a[i]:\r\n cnt = m // a[i]\r\n m -= a[i] * cnt\r\n s += cnt\r\n else:\r\n m += a[i] * s\r\n s = 0\r\n print(m)\r\n\r\n\r\nclass Codeforces:\r\n class CR676div2:\r\n @staticmethod\r\n def a():\r\n t = int(sys.stdin.readline().rstrip())\r\n for _ in range(t):\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(a ^ b)\r\n\r\n @staticmethod\r\n def b():\r\n t = int(sys.stdin.readline().rstrip())\r\n for _ in range(t):\r\n n = int(sys.stdin.readline().rstrip())\r\n s = [list(sys.stdin.readline().rstrip()) for _ in range(n)]\r\n s[0][0] = s[-1][-1] = \"0\"\r\n for i in range(n):\r\n for j in range(n):\r\n s[i][j] = int(s[i][j])\r\n\r\n def can_goal(g, c=0):\r\n visited = [0] * n\r\n stack = [(0, 0)]\r\n visited[0] |= 1 << 0\r\n while stack:\r\n y, x = stack.pop()\r\n for dy, dx in [(-1, 0), (0, -1), (1, 0), (0, 1)]:\r\n i, j = y + dy, x + dx\r\n if i < 0 or i >= n or j < 0 or j >= n:\r\n continue\r\n if i == j == n - 1:\r\n return True\r\n if visited[i] >> j & 1:\r\n continue\r\n visited[i] |= 1 << j\r\n if g[i][j] != c:\r\n continue\r\n stack.append((i, j))\r\n return False\r\n\r\n if not (can_goal(s, 0) or can_goal(s, 1)):\r\n print(0)\r\n continue\r\n\r\n flg = 0\r\n for i in range(n):\r\n for j in range(n):\r\n if i == j == 0 or i == j == n - 1:\r\n continue\r\n s[i][j] ^= 1\r\n if not (can_goal(s, 0) or can_goal(s, 1)):\r\n print(1)\r\n print(i + 1, j + 1)\r\n flg = 1\r\n break\r\n s[i][j] ^= 1\r\n if flg:\r\n break\r\n if flg:\r\n continue\r\n\r\n print(2)\r\n if s[0][1] == s[1][0]:\r\n print(n, n - 1)\r\n print(n - 1, n)\r\n continue\r\n\r\n if s[0][1] == s[-1][-2]:\r\n print(1, 2)\r\n print(n - 1, n)\r\n else:\r\n print(1, 2)\r\n print(n, n - 1)\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n\r\nclass ProjectEuler:\r\n @staticmethod\r\n def p1():\r\n def f(n, x):\r\n return (x + n // x * x) * (n // x) // 2\r\n\r\n n = 1000\r\n ans = f(n - 1, 3) + f(n - 1, 5) - f(n - 1, 15)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p2():\r\n fib = [1, 2]\r\n while fib[-1] < 4 * 10**6:\r\n fib.append(fib[-1] + fib[-2])\r\n print(sum(fib[1:-1:3]))\r\n\r\n @staticmethod\r\n def p3():\r\n pn = NumberTheory.PrimeNumbers()\r\n res = pn.factorize(600851475143)\r\n print(max(res.keys()))\r\n\r\n @staticmethod\r\n def p4():\r\n def is_palindrome(n):\r\n n = str(n)\r\n return n == n[::-1]\r\n\r\n cand = []\r\n for a in range(100, 1000):\r\n for b in range(a, 1000):\r\n n = a * b\r\n if is_palindrome(n):\r\n cand.append(n)\r\n print(max(cand))\r\n\r\n @staticmethod\r\n def p5():\r\n pn = NumberTheory.PrimeNumbers()\r\n res = defaultdict(int)\r\n for i in range(1, 21):\r\n for p, c in pn.factorize(i).items():\r\n res[p] = max(res[p], c)\r\n ans = 1\r\n for p, c in res.items():\r\n ans *= pow(p, c)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p6():\r\n a = np.arange(101)\r\n b = np.cumsum(a**2)\r\n a = a.cumsum()\r\n print(a[100] ** 2 - b[100])\r\n\r\n @staticmethod\r\n def p7():\r\n nt = NumberTheory.PrimeNumbers()\r\n print(sorted(nt)[10000])\r\n\r\n @staticmethod\r\n def p8():\r\n n = \"7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450\"\r\n n = [int(d) for d in list(n)]\r\n res = 0\r\n for i in range(988):\r\n x = 1\r\n for j in range(13):\r\n x *= n[i + j]\r\n res = max(res, x)\r\n print(res)\r\n\r\n @staticmethod\r\n def p9():\r\n for a in range(1, 997):\r\n for b in range(a, 998 - a):\r\n c = 1000 - a - b\r\n if a**2 + b**2 == c**2:\r\n print(a * b * c)\r\n return\r\n\r\n @staticmethod\r\n def p10():\r\n pn = NumberTheory.PrimeNumbers(2 * 10**6 + 1)\r\n print(sum(pn))\r\n\r\n @staticmethod\r\n def p11():\r\n grid = \"08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48\"\r\n print(grid)\r\n\r\n pass\r\n\r\n\r\nclass Yukicoder:\r\n def __init__(self):\r\n pass\r\n\r\n def __call__(self):\r\n print(1)\r\n\r\n\r\nclass AOJ:\r\n @staticmethod\r\n def ALDS1_12_A():\r\n n, *a = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for i in range(n - 1):\r\n for j in range(i + 1, n):\r\n if a[i * n + j] == -1:\r\n continue\r\n g.add_edge(i, j, weight=a[i * n + j])\r\n g.add_edge(j, i, weight=a[i * n + j])\r\n _, d = g.kruskal()\r\n # _, d = g.prim()\r\n # _, d = g.boruvka()\r\n print(d)\r\n\r\n @staticmethod\r\n def GRL_3_C(): # strongly connected components\r\n n, m = map(int, sys.stdin.readline().split())\r\n g = GeometryTopology.Graph(n)\r\n for _ in range(m):\r\n g.add_edge(*map(int, sys.stdin.readline().split()))\r\n r = g.scc()\r\n q, *uv = map(int, sys.stdin.read().split())\r\n for u, v in zip(*[iter(uv)] * 2):\r\n print(int(r[u] == r[v]))\r\n\r\n\r\nclass YosupoJudge:\r\n @staticmethod\r\n def Directed_MST():\r\n n, m, s, *abc = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b, c in zip(*[iter(abc)] * 3):\r\n g.add_edge(a, b, weight=c)\r\n _, d, p = g.prim(src=s, return_parent=True)\r\n print(d)\r\n print(*p)\r\n\r\n @staticmethod\r\n def Manhattan_MST():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # AtCoder.ABC179.f()\r\n # AtCoder.ABC060.d()\r\n AtCoder.ABC068.d()\r\n # YosupoJudge.Directed_MST()\r\n pass\r\n", "import sys\r\n\r\nimport numpy as np\r\n\r\nh, w = map(int, sys.stdin.readline().split())\r\ns = np.array([list(sys.stdin.readline().rstrip()) for _ in range(h)], dtype='U')\r\ns = np.pad(s, 1)\r\n\r\ndef main():\r\n l = np.zeros((h+2, w+2), dtype=np.int64)\r\n r = np.zeros((h+2, w+2), dtype=np.int64)\r\n u = np.zeros((h+2, w+2), dtype=np.int64)\r\n d = np.zeros((h+2, w+2), dtype=np.int64)\r\n\r\n for i in range(1, w+1):\r\n bl = s[:, i] == '#'\r\n l[~bl, i] = l[~bl, i-1] + 1\r\n i = w+1-i\r\n bl = s[:, i] == '#'\r\n r[~bl, i] = r[~bl, i+1] + 1\r\n\r\n for i in range(1, h+1):\r\n bl = s[i, :] == '#'\r\n u[i, ~bl] = u[i-1, ~bl] + 1\r\n i = h+1-i\r\n bl = s[i, :] == '#'\r\n d[i, ~bl] = d[i+1, ~bl] + 1\r\n\r\n res = l + r + u + d - 3\r\n return np.amax(res[1:h+1, 1:w+1])\r\n\r\nif __name__ == '__main__':\r\n ans = main()\r\n print(ans)\n", "import sys\r\n\r\nimport numpy as np\r\n\r\nI = np.array(sys.stdin.read().split(), dtype=np.int64)\r\nR, C, K, n = I[:4]\r\nr, c = I[4:].reshape(-1, 2).T - 1\r\n\r\n\r\ndef main():\r\n y_cnt = np.bincount(r, minlength=R)\r\n x_cnt = np.bincount(c, minlength=C)\r\n cnt_y_cnt = np.bincount(y_cnt, minlength=K + 1)\r\n cnt_x_cnt = np.bincount(x_cnt, minlength=K + 1)\r\n\r\n res = np.sum(cnt_y_cnt[: K + 1] * cnt_x_cnt[K::-1])\r\n real_cnt = y_cnt[r] + x_cnt[c] - 1 # (grid[r][c]からみた本当の飴の数)\r\n cnt_real_cnt = np.bincount(real_cnt, minlength=K + 1)\r\n ans = res + cnt_real_cnt[K] - cnt_real_cnt[K - 1]\r\n return ans\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ans = main()\r\n print(ans)\r\n", "import sys\r\nimport typing\r\n\r\nimport numba as nb\r\nimport numpy as np\r\n\r\n\r\[email protected]\r\ndef uf_build(n: int) -> np.ndarray:\r\n return np.full(n, -1, np.int64)\r\n\r\n\r\[email protected]\r\ndef uf_find(\r\n uf: np.ndarray,\r\n u: int,\r\n) -> int:\r\n if uf[u] < 0: return u\r\n uf[u] = uf_find(uf, uf[u])\r\n return uf[u]\r\n\r\n\r\[email protected]\r\ndef uf_unite(\r\n uf: np.ndarray,\r\n u: int,\r\n v: int,\r\n) -> typing.NoReturn:\r\n u = uf_find(uf, u)\r\n v = uf_find(uf, v)\r\n if u == v: return\r\n if uf[u] > uf[v]: u, v = v, u\r\n uf[u] += uf[v]\r\n uf[v] = u\r\n\r\n\r\n\r\[email protected]((nb.i8, nb.i8[:, :]), cache=True)\r\ndef solve(\r\n n: int,\r\n abc: np.ndarray,\r\n) -> typing.NoReturn:\r\n sort_idx = np.argsort(abc[:, 2], kind='mergesort')\r\n g = abc[sort_idx]\r\n m = len(g)\r\n added_edge_indices = np.zeros(m, np.int64)\r\n idx_to_add = 0\r\n def add_edge(i):\r\n nonlocal idx_to_add\r\n added_edge_indices[idx_to_add] = i\r\n idx_to_add += 1\r\n\r\n uf = uf_build(n)\r\n for i in range(m):\r\n u, v, w = g[i]\r\n if w >= 0 and uf_find(uf, u) == uf_find(uf, v):\r\n continue\r\n uf_unite(uf, u, v)\r\n add_edge(i)\r\n\r\n mst = g[added_edge_indices[:idx_to_add]]\r\n print(g[:, 2].sum() - mst[:, 2].sum())\r\n\r\n\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n n, m = map(int, input().split())\r\n abc = np.array(\r\n sys.stdin.read().split(),\r\n dtype=np.int64,\r\n ).reshape(m, 3)\r\n abc[:, :2] -= 1\r\n solve(n, abc)\r\n\r\n\r\nmain()\n", "import sys\r\n\r\nimport numpy as np\r\nfrom scipy.sparse import csr_matrix\r\nfrom scipy.sparse.csgraph import floyd_warshall\r\n\r\nn = int(sys.stdin.readline().rstrip())\r\nA = np.array(sys.stdin.read().split(), dtype=np.int64).reshape(n, n)\r\n\r\n\r\ndef main():\r\n B = floyd_warshall(csr_matrix(A), directed=False)\r\n if np.any(A - B > 0):\r\n return -1\r\n\r\n will_subtract = []\r\n for v in range(n - 1):\r\n for u in range(v + 1, n):\r\n d = B[v, u]\r\n for w in range(v):\r\n if B[v, w] + B[w, u] == d:\r\n will_subtract.append(d)\r\n break\r\n else:\r\n for w in range(v + 1, u):\r\n if B[v, w] + B[w, u] == d:\r\n will_subtract.append(d)\r\n break\r\n else:\r\n for w in range(u + 1, n):\r\n if B[v, w] + B[w, u] == d:\r\n will_subtract.append(d)\r\n break\r\n\r\n ans = np.sum(B) // 2 - sum(will_subtract)\r\n return int(ans)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ans = main()\r\n print(ans)\r\n", "import itertools\r\nimport math\r\nimport string\r\nimport sys\r\nfrom bisect import bisect_left as bi_l\r\nfrom bisect import bisect_right as bi_r\r\nfrom collections import Counter, defaultdict, deque\r\nfrom functools import lru_cache, reduce\r\nfrom heapq import heapify, heappop, heappush\r\nfrom operator import or_, xor\r\n\r\nsys.setrecursionlimit(10**8)\r\ninf = float('inf')\r\nMOD = 10**9+7\r\n# MOD = 998244353\r\n\r\n\r\nusing_numpy = 1\r\nimport networkx as nx\r\nimport numpy as np\r\nfrom numba import i8, njit\r\nfrom scipy import optimize\r\nfrom scipy.ndimage import distance_transform_cdt\r\nfrom scipy.sparse import csr_matrix\r\nfrom scipy.sparse.csgraph import (\r\n connected_components,\r\n csgraph_to_dense,\r\n maximum_flow,\r\n minimum_spanning_tree,\r\n shortest_path,\r\n)\r\nfrom scipy.spatial import ConvexHull\r\nfrom scipy.special import comb\r\n\r\n\r\nclass Algebra:\r\n class Modular(int):\r\n def __init__(self, n, mod=MOD):\r\n self.value = n\r\n self.mod = mod\r\n\r\n def __str__(self): return f'{self.value}'\r\n\r\n def __add__(self, other):\r\n return self.__class__((self.value + other.value) % self.mod)\r\n def __sub__(self, x): return self.__class__((self.value - x.value) % self.mod)\r\n def __mul__(self, x): return self.__class__((self.value * x.value) % self.mod)\r\n def __pow__(self, x): return self.__class__(pow(self.value, x.value, self.mod))\r\n\r\n def __lt__(self, x): return self.value < x.value\r\n def __le__(self, x): return self.value <= x.value\r\n def __eq__(self, x): return self.value == x.value\r\n def __ne__(self, x): return self.value != x.value\r\n def __gt__(self, x): return self.value > x.value\r\n def __ge__(self, x): return self.value >= x.value\r\n\r\n\r\n\r\n class SemiGroup:\r\n pass\r\n class Monoid:\r\n pass\r\n class Group:\r\n pass\r\n class SemiRing:\r\n pass\r\n class Ring:\r\n pass\r\n\r\n\r\n @staticmethod\r\n def identity(n):\r\n if using_numpy:\r\n return np.identity(n, dtype=np.int64)\r\n else:\r\n a = [[0]*n for _ in range(n)]\r\n for i in range(n): a[i][i] = 1\r\n return a\r\n\r\n @staticmethod\r\n def dot(a, b):\r\n if using_numpy:\r\n return np.dot(a, b)\r\n else:\r\n h, w, l = len(a), len(b[0]), len(b)\r\n assert len(a[0]) == l\r\n c = [[0]*w for _ in range(h)]\r\n for i in range(h):\r\n for j in range(w):\r\n for k in range(l):\r\n c[i][j] += a[i][k]*b[k][j]\r\n return c\r\n\r\n @classmethod\r\n def matrix_pow(cls, a, n, mod=10**9+7):\r\n m = len(a)\r\n b = cls.identity(m)\r\n while n:\r\n if n&1: b = cls.dot(b, a)\r\n n >>= 1; a = cls.dot(a, a)\r\n if using_numpy:\r\n a %= mod; b %= mod\r\n else:\r\n for i in range(m):\r\n for j in range(m):\r\n a[i][j] %= mod\r\n b[i][j] %= mod\r\n return b\r\n\r\n @staticmethod\r\n def bitwise_dot(a, b):\r\n if using_numpy:\r\n return np.bitwise_xor.reduce(a[:,None,:] & b.T[None,:,:], axis=-1)\r\n else:\r\n h, w, l = len(a), len(b[0]), len(b)\r\n assert len(a[0]) == l\r\n c = [[0]*w for _ in range(h)]\r\n for i in range(h):\r\n for j in range(w):\r\n for k in range(l):\r\n c[i][j] ^= a[i][k]&b[k][j]\r\n return c\r\n\r\n @classmethod\r\n def bitwise_mat_pow(cls, a, n):\r\n if n==0: return np.eye(len(a), dtype=np.uint32)*((1<<32)-1)\r\n res = cls.bitwise_mat_pow(a, n//2)\r\n res = cls.bitwise_dot(res, res)\r\n return cls.bitwise_dot(res, a) if n&1 else res\r\n\r\n\r\n @staticmethod\r\n def cumprod(a, mod):\r\n l = len(a); sql = int(np.sqrt(l)+1)\r\n a = np.resize(a, sql**2).reshape(sql, sql)\r\n for i in range(sql-1): a[:, i+1] *= a[:, i]; a[:, i+1] %= mod\r\n for i in range(sql-1): a[i+1] *= a[i, -1]; a[i+1] %= mod\r\n return np.ravel(a)[:l]\r\n\r\n @classmethod\r\n def generate_fac_ifac(cls, n, p=MOD):\r\n if using_numpy:\r\n fac = np.arange(n+1); fac[0] = 1; fac = cls.cumprod(fac, p)\r\n ifac = np.arange(n+1, 0, -1); ifac[0] = pow(int(fac[-1]), p-2, p)\r\n ifac = cls.cumprod(ifac, p)[n::-1]\r\n else:\r\n fac = [None]*(n+1); fac[0] = 1\r\n for i in range(n): fac[i+1] = fac[i]*(i+1)%p\r\n ifac = [None]*(n+1); ifac[n] = pow(fac[n], p-2, p)\r\n for i in range(n, 0, -1): ifac[i-1] = ifac[i]*i%p\r\n return fac, ifac\r\n\r\n class Kitamasa:\r\n pass\r\n\r\n\r\nmint = Algebra.Modular\r\n\r\n\r\nclass NumberTheory:\r\n class PrimeNumbers: # pn\r\n def __init__(self, n=2*10**6):\r\n self.is_prime, self.prime_nums = self.find(n)\r\n\r\n def __call__(self, n): return self.is_prime[n]\r\n def __iter__(self): return iter(self.prime_nums)\r\n def __getitem__(self, key): return self.prime_nums[key]\r\n\r\n @staticmethod\r\n def find(n): # Sieve of eratosthenes\r\n if using_numpy:\r\n is_prime = np.ones(n+1, dtype=np.bool); is_prime[:2] = 0\r\n for i in range(2, int(n**.5)+1):\r\n if is_prime[i]: is_prime[i*2::i] = 0\r\n prime_nums = np.flatnonzero(is_prime)\r\n else:\r\n is_prime = [True]*(n+1); is_prime[0] = is_prime[1] = 0\r\n for i in range(2, int(n**.5)+1):\r\n if not is_prime[i]: continue\r\n for j in range(i*2, n+1, i): is_prime[j] = 0\r\n prime_nums = [i for i in range(2, n+1) if is_prime[i]]\r\n return is_prime, prime_nums\r\n\r\n @lru_cache(maxsize=None)\r\n def factorize(self, n):\r\n res = defaultdict(int)\r\n if n < 2: return res\r\n for p in self:\r\n if p*p > n: break\r\n while n%p == 0: res[p] += 1; n //= p\r\n if n == 1: return res\r\n res[n] = 1; return res\r\n\r\n def factorize_factorial(self, n):\r\n res = defaultdict(int)\r\n for i in range(2, n+1):\r\n for p, c in self.factorize(i).items(): res[p] += c\r\n return res\r\n\r\n @classmethod\r\n @lru_cache(maxsize=None)\r\n def gcd(cls, a, b): return cls.gcd(b, a%b) if b else abs(a)\r\n @classmethod\r\n def lcm(cls, a, b): return abs(a // cls.gcd(a, b) * b)\r\n\r\n @staticmethod\r\n def find_divisors(n):\r\n divisors = []\r\n for i in range(1, int(n**.5)+1):\r\n if n%i: continue\r\n divisors.append(i)\r\n j = n // i\r\n if j != i: divisors.append(j)\r\n return sorted(divisors)\r\n\r\n @staticmethod\r\n def base_convert(n, b):\r\n if not n: return [0]\r\n res = []\r\n while n:\r\n n, r = divmod(n, b)\r\n if r < 0: n += 1; r -= b\r\n res.append(r)\r\n return res\r\n\r\n\r\n\r\nclass Combinatorics:\r\n @classmethod\r\n @lru_cache(maxsize=None)\r\n def choose(cls, n, r, mod=None):\r\n if r > n or r < 0: return 0\r\n if r == 0: return 1\r\n res = cls.choose(n-1,r,mod) + cls.choose(n-1,r-1,mod)\r\n if mod: res %= mod\r\n return res\r\n\r\n class CombinationsMod:\r\n def __init__(self, n=2*10**6, mod=MOD):\r\n self.__mod = mod\r\n self.fac, self.ifac = Algebra.generate_fac_ifac(n, mod)\r\n\r\n def __call__(self, n, r): return self.__choose(n, r)\r\n\r\n def __choose(self, n, r):\r\n bl = (0<=r) & (r<=n)\r\n p = self.__mod\r\n return bl * self.fac[n] * self.ifac[r] % p * self.ifac[n-r] % p\r\n\r\n def make_nchoose_table(self, n):\r\n p = self.__mod\r\n r = len(self.__fac)-1\r\n if using_numpy:\r\n n_choose = np.arange(n+1, n-r, -1); n_choose[0] = 1\r\n n_choose = Algebra.cumprod(n_choose, p)*self.ifac%p\r\n else:\r\n n_choose = [None]*(r+1); n_choose[0] = 1\r\n for i in range(r): n_choose[i+1] = n_choose[i]*(n-i)%p\r\n for i in range(1,r+1): n_choose[i] = n_choose[i]*self.ifac[i]%p\r\n return n_choose\r\n\r\n @classmethod\r\n def permutations(cls, a, r=None, i=0):\r\n a = list(a); n = len(a)\r\n if r is None: r = n\r\n res = []\r\n if r > n or i > r: return res\r\n if i == r: return [tuple(a[:r])]\r\n for j in range(i, n): a[i],a[j] = a[j],a[i]; res += cls.permutations(a, r, i+1)\r\n return res\r\n\r\n @staticmethod\r\n def combinations(a, r):\r\n a = tuple(a)\r\n n = len(a)\r\n if r > n: return\r\n indices = list(range(r))\r\n yield a[:r]\r\n while True:\r\n for i in range(r-1, -1, -1):\r\n if indices[i] != i+n-r: break\r\n else: return\r\n indices[i] += 1\r\n for j in range(i+1, r): indices[j] = indices[j-1]+1\r\n yield tuple(a[i] for i in indices)\r\n\r\n\r\n\r\nclass DP:\r\n @staticmethod\r\n def LIS(a):\r\n res = [inf] * len(a)\r\n for x in a: res[bi_l(res, x)] = x\r\n return res\r\n\r\n\r\nclass String:\r\n @staticmethod\r\n def z_algorithm(s):\r\n n = len(s)\r\n a = [0] * n; a[0] = n\r\n l = r = -1\r\n for i in range(1, n):\r\n if r >= i: a[i] = min(a[i-l], r-i)\r\n while i + a[i] < n and s[i+a[i]] == s[a[i]]: a[i] += 1\r\n if i+a[i] >= r: l, r = i, i+a[i]\r\n return a\r\n\r\n\r\nclass GeometryTopology:\r\n class Graph:\r\n class __Edge:\r\n def __init__(self, weight=1, capacity=1, **args):\r\n self.weight = weight\r\n self.capacity = capacity\r\n\r\n def __str__(self):\r\n return f'weight: {self.weight}, cap: {self.capacity}'\r\n\r\n class __Node:\r\n def __init__(self, **args):\r\n pass\r\n\r\n def __init__(self, n=0):\r\n self.__N = n\r\n self.nodes = [None] * n\r\n self.edges = [{} for _ in range(n)]\r\n\r\n def add_node_info(self, v, **args): self.nodes[v] = self.__Node(**args)\r\n\r\n def add_edge(self, u, v, update=False, **args):\r\n if not update and v in self.edges[u]: return\r\n self.edges[u][v] = self.__Edge(**args)\r\n\r\n def get_size(self): return self.__N\r\n\r\n def bfs(self, src=0):\r\n n = self.__N\r\n self.depth = self.lv = lv = [None]*n; lv[src] = 0 # depth in tree, or level in general graph.\r\n self.dist = dist = [inf]*n; dist[src] = 0 # dist for only tree.\r\n self.parent = par = [None]*n; par[src] = src\r\n q = deque([src])\r\n while q:\r\n u = q.popleft()\r\n for v, e in self.edges[u].items():\r\n if e.capacity == 0 or lv[v] is not None: continue\r\n lv[v], dist[v], par[v] = lv[u]+1, dist[u]+e.weight, u\r\n q.append(v)\r\n return dist\r\n\r\n def dinic(self, src, sink):\r\n def flow_to_sink(u, flow_in):\r\n if u == sink: return flow_in\r\n flow = 0\r\n for v, e in self.edges[u].items():\r\n if e.capacity == 0 or self.lv[v] <= self.lv[u]: continue\r\n f = flow_to_sink(v, min(flow_in, e.capacity))\r\n if not f: continue\r\n self.edges[u][v].capacity -= f\r\n if u in self.edges[v]: self.edges[v][u].capacity += f\r\n else: self.add_edge(v, u, capacity=f)\r\n flow_in -= f\r\n flow += f\r\n return flow\r\n\r\n flow = 0\r\n while True:\r\n self.bfs(src)\r\n if self.lv[sink] is None: return flow\r\n flow += flow_to_sink(src, inf)\r\n\r\n def ford_fulkerson(self):\r\n pass\r\n\r\n def push_relabel(self):\r\n pass\r\n\r\n def floyd_warshall(self):\r\n n = self.__N\r\n d = [[inf]*n for _ in range(n)]\r\n for u in range(n):\r\n d[u][u] = 0\r\n for v, e in self.edges[u].items(): d[u][v] = e.weight\r\n for w in range(n):\r\n for u in range(n):\r\n for v in range(n):\r\n d[u][v] = min(d[u][v], d[u][w]+d[w][v])\r\n return d\r\n\r\n def dijkstra(self, src, paths_cnt=False, mod=None):\r\n dist = [inf] * self.__N; dist[src] = 0\r\n visited = [False] * self.__N\r\n paths = [0] * self.__N; paths[src] = 1\r\n q = [(0, src)]\r\n while q:\r\n d, u = heappop(q)\r\n if visited[u]: continue\r\n visited[u] = True\r\n for v, e in self.edges[u].items():\r\n dv = d + e.weight\r\n if dv > dist[v]: continue\r\n elif dv == dist[v]:\r\n paths[v] += paths[u]\r\n if mod: paths[v] %= mod\r\n continue\r\n paths[v], dist[v] = paths[u], dv\r\n heappush(q, (dv, v))\r\n if paths_cnt: return dist, paths\r\n else: return dist\r\n\r\n def astar(self, src, tgt, heuristic_func):\r\n cost = [inf] * self.__N\r\n q = [(heuristic_func(src, tgt), 0, src)]\r\n while q:\r\n _, c, u = heappop(q)\r\n if u == tgt: return c\r\n if cost[u] != inf: continue\r\n cost[u] = c\r\n for v, e in self.edges[u].items():\r\n if cost[v] != inf: continue\r\n h = heuristic_func(v, tgt)\r\n nc = c + e.weight\r\n heappush(q, (h+nc, nc, v))\r\n return inf\r\n\r\n def bellman_ford(self, src):\r\n n = self.__N\r\n d = [inf] * n; d[src] = 0\r\n for _ in range(n-1):\r\n for u in range(n):\r\n for v, e in self.edges[u].items(): d[v] = min(d[v], d[u]+e.weight)\r\n for u in range(n):\r\n for v, e in self.edges[u].items():\r\n if d[u]+e.weight < d[v]: raise Exception('found negative cycle.')\r\n return d\r\n\r\n def bfs01(self, src=0):\r\n d = [inf]*self.__N; d[src] = 0\r\n q = deque([src])\r\n while q:\r\n u = q.popleft()\r\n for v, e in self.edges[u].items():\r\n dv = d[u] + e.weight\r\n if d[v] <= dv: continue\r\n d[v] = dv\r\n if e.weight: q.append(v)\r\n else: q.appendleft(v)\r\n return d\r\n\r\n\r\n def find_ancestors(self): # tree doubling.\r\n self.__ancestors = ancestors = [self.parent]\r\n for _ in range(max(self.depth).bit_length()):\r\n ancestors.append([ancestors[-1][u] for u in ancestors[-1]])\r\n\r\n\r\n def find_dist(self, u, v):\r\n return self.dist[u]+self.dist[v]-2*self.dist[self.__find_lca(u, v)]\r\n\r\n\r\n def __find_lca(self, u, v):\r\n du, dv = self.depth[u], self.depth[v]\r\n if du > dv:\r\n u, v = v, u\r\n du, dv = dv, du\r\n\r\n d = dv - du\r\n for i in range(d.bit_length()): # up-stream\r\n if d>>i&1: v = self.__ancestors[i][v]\r\n if v == u: return v\r\n\r\n for i in range(du.bit_length()-1, -1, -1): # find direct child of LCA.\r\n nu, nv = self.__ancestors[i][u], self.__ancestors[i][v]\r\n if nu == nv: continue\r\n u, v = nu, nv\r\n\r\n return self.__ancestors[0][u]\r\n\r\n def init_dsu(self): # disjoint set union (union-find)\r\n n = self.__N\r\n self.parent = list(range(n))\r\n self.rank = [0] * n\r\n self.size = [1] * n\r\n\r\n def find(self, u):\r\n if self.parent[u] == u: return u\r\n self.parent[u] = self.find(self.parent[u])\r\n return self.parent[u]\r\n\r\n def unite(self, u, v):\r\n u, v = self.find(u), self.find(v)\r\n if u == v: return\r\n if self.rank[u] < self.rank[v]: u,v = v,u\r\n self.parent[v] = u\r\n self.size[u] += self.size[v]\r\n self.rank[u] = max(self.rank[u], self.rank[v]+1)\r\n\r\n def same(self, u, v): return self.find(u)==self.find(v)\r\n\r\n def groups(self, empty=True):\r\n n = self.__N\r\n groups = [[] for _ in range(n)]\r\n for u in range(n): groups[self.find(u)].append(u)\r\n return groups if empty else [g for g in groups if g]\r\n\r\n\r\n def scc(self): # strongly connected components\r\n n = self.__N\r\n visited, q, root, r = [False]*n, [], [None]*n, 0\r\n gg = self.__class__(n)\r\n for u in range(n):\r\n for v in self.edges[u]: gg.add_edge(v, u)\r\n\r\n def dfs(u):\r\n if visited[u]: return\r\n visited[u] = True\r\n for v in self.edges[u]: dfs(v)\r\n q.append(u)\r\n\r\n def rev_dfs(u, r):\r\n if root[u] is not None: return\r\n root[u] = r\r\n for v in gg.edges[u]: rev_dfs(v, r)\r\n\r\n for u in range(n): dfs(u)\r\n for u in q[::-1]: rev_dfs(u, r); r += 1\r\n return root\r\n\r\n\r\n def kruskal(self): # minimum spanning tree\r\n n = self.__N\r\n uf = self.__class__(n); uf.init_dsu()\r\n edges = sorted([(u,v,e.weight) for u in range(n) for v,e in self.edges[u].items()], key=lambda x: x[2])\r\n g = self.__class__(n)\r\n d = 0\r\n for u, v, w in edges:\r\n if uf.same(u,v): continue\r\n uf.unite(u, v); g.add_edge(u, v, weight=w); d += w\r\n return g, d\r\n\r\n def prim(self, src=0, return_parent=False): # minimum spanning tree\r\n n = self.__N\r\n g = self.__class__(n)\r\n parent, visited, dist = [None]*n, [False]*n, 0\r\n q = [(0, (src, src))]\r\n while q:\r\n d, (w, u) = heappop(q)\r\n if visited[u]: continue\r\n visited[u], parent[u] = True, w; dist += d; g.add_edge(w,u, weight=d)\r\n for v, e in self.edges[u].items():\r\n if not visited[v]: heappush(q, (e.weight, (u,v)))\r\n if return_parent: return g, dist, parent\r\n return g, dist\r\n\r\n def boruvka(self): # minimum spanning tree\r\n n = self.__N\r\n uf = self.__class__(n); uf.init_dsu()\r\n g = self.__class__(n)\r\n d = 0\r\n\r\n def dfs(u):\r\n if visited[u]: return (inf, (None, None))\r\n visited[u] = True\r\n cand = []\r\n for v, e in self.edges[u].items():\r\n if uf.same(u,v): cand.append(dfs(v)); continue\r\n cand.append((e.weight, (u,v)))\r\n return sorted(cand)[0]\r\n\r\n while len(set(uf.parent))!=1:\r\n edges, visited = [], [False]*n\r\n for u in range(n):\r\n if visited[u]: continue\r\n edges.append(dfs(u))\r\n for w, (u, v) in edges:\r\n if uf.same(u,v): continue\r\n g.add_edge(u,v, weight=w); uf.unite(u,v); d += w\r\n for u in range(n): uf.find(u)\r\n\r\n return g, d\r\n\r\n def tsp(self): # traveling salesperson problem\r\n pass\r\n\r\n class FenwickTree: # BIT (Binary Indexed Tree)\r\n def __init__(self, n):\r\n self.__N = n\r\n self.data = [0]*(n+1)\r\n\r\n def add(self, i, x):\r\n while i <= self.__N: self.data[i] += x; i += i&-i\r\n\r\n def __sum(self, i):\r\n s = 0\r\n while i > 0: s += self.data[i]; i -= i&-i\r\n return s\r\n\r\n def sum(self, l, r): return self.__sum(r) - self.__sum(l-1)\r\n\r\n @staticmethod\r\n def triangle_area(p0, p1, p2, signed=False):\r\n x1, y1, x2, y2 = p1[0]-p0[0], p1[1]-p0[1], p2[0]-p0[0], p2[1]-p0[1]\r\n return (x1*y2 - x2*y1)/2 if signed else abs(x1*y2 - x2*y1)/2\r\n\r\n @classmethod\r\n def intersect(cls, seg1, seg2):\r\n (p1, p2), (p3, p4) = seg1, seg2\r\n t1 = cls.triangle_area(p1, p2, p3, signed=True)\r\n t2 = cls.triangle_area(p1, p2, p4, signed=True)\r\n t3 = cls.triangle_area(p3, p4, p1, signed=True)\r\n t4 = cls.triangle_area(p3, p4, p2, signed=True)\r\n return (t1*t2<0) & (t3*t4<0)\r\n\r\n\r\ndef cumxor(a): return reduce(xor, a, 0)\r\ndef cumor(a): return reduce(or_, a, 0)\r\n\r\ndef bit_count(n):\r\n cnt = 0\r\n while n: cnt += n&1; n >>= 1\r\n return cnt\r\n\r\n\r\nclass AtCoder:\r\n class ABC001:\r\n @staticmethod\r\n def a():\r\n h1, h2 = map(int, sys.stdin.read().split()); print(h1-h2)\r\n\r\n @staticmethod\r\n def d():\r\n def to_minuites(x):\r\n q, r = divmod(x, 100)\r\n return 60*q + r\r\n\r\n def to_hmform(x):\r\n q, r = divmod(x, 60)\r\n return 100*q + r\r\n\r\n n = int(sys.stdin.readline().rstrip())\r\n term = [0] * 2001\r\n for _ in range(n):\r\n s, e = map(to_minuites, map(int, sys.stdin.readline().rstrip().split('-')))\r\n s = s//5 * 5\r\n e = (e+4)//5 * 5\r\n term[s] += 1\r\n term[e+1] -= 1\r\n for i in range(2000):\r\n term[i+1] += term[i]\r\n\r\n res = []\r\n raining = False\r\n for i in range(2001):\r\n if term[i]:\r\n if not raining:\r\n s = i\r\n raining = True\r\n elif raining:\r\n res.append((s, i-1))\r\n raining = False\r\n for s, e in res:\r\n print(f'{to_hmform(s):04}-{to_hmform(e):04}')\r\n\r\n\r\n\r\n\r\n class ABC002:\r\n @staticmethod\r\n def a():\r\n print(max(map(int, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n vowels = set('aeiou')\r\n print(''.join([c for c in sys.stdin.readline().rstrip() if c not in vowels]))\r\n\r\n @staticmethod\r\n def c():\r\n print(GeometryTopology.triangle_area(*map(int, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n edges = set((x-1, y-1) for x, y in zip(*[map(int, sys.stdin.read().split())]*2))\r\n print(max(len(s) for i in range(1, 1<<n) for s in [[j for j in range(n) if i>>j&1]] if all((x, y) in edges for x, y in itertools.combinations(s, 2))))\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m = map(int, sys.stdin.readline().split())\r\n relations = [1<<i for i in range(n)]\r\n for x, y in zip(*[map(int, sys.stdin.read().split())]*2):\r\n relations[x] |= 1<<(y-1); relations[y] |= 1<<(x-1)\r\n res = 0\r\n for i in range(1<<n):\r\n s, cnt = (1<<n)-1, 0\r\n for j in range(n):\r\n if i>>j & 1: t &= relations[j] | 1<<j; cnt += 1\r\n if s&i == i: res = max(res, cnt)\r\n print(res)\r\n\r\n class ABC003:\r\n @staticmethod\r\n def a():\r\n print((int(sys.stdin.readline().rstrip())+1)*5000)\r\n @staticmethod\r\n def b():\r\n atcoder = set('atcoder')\r\n s, t = sys.stdin.read().split()\r\n print(all(s[i]==t[i] or s[i]=='@' and t[i] in atcoder or t[i]=='@' and s[i] in atcoder for i in range(len(s))) and 'You can win' or 'You will lose')\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *r = map(int, sys.stdin.read().split()); print(reduce(lambda x, y: (x+y)/2, sorted(r)[-k:], 0))\r\n\r\n class ABC004:\r\n @staticmethod\r\n def a():\r\n print(int(sys.stdin.readline().rstrip())*2)\r\n @staticmethod\r\n def b():\r\n for l in [sys.stdin.readline().rstrip() for _ in range(4)][::-1]: print(l[::-1])\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())%30\r\n res = list(range(1, 7))\r\n for i in range(n): i %= 5; res[i], res[i+1] = res[i+1], res[i]\r\n print(*res, sep='')\r\n\r\n\r\n\r\n class ABC005:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(y//x)\r\n @staticmethod\r\n def b():\r\n n, *t = map(int, sys.stdin.read().split())\r\n print(min(t))\r\n @staticmethod\r\n def c():\r\n t = int(sys.stdin.readline().rstrip())\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n m = int(sys.stdin.readline().rstrip())\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n i = 0\r\n for p in b:\r\n if i == n: print('no'); return\r\n while p-a[i] > t:\r\n i += 1\r\n if i == n: print('no'); return\r\n if a[i] > p: print('no'); return\r\n i += 1\r\n print('yes')\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n d = np.array([sys.stdin.readline().split() for _ in range(n)], np.int64)\r\n s = d.cumsum(axis=0).cumsum(axis=1)\r\n s = np.pad(s, 1)\r\n max_del = np.zeros((n+1, n+1), dtype=np.int64)\r\n for y in range(1, n+1):\r\n for x in range(1, n+1):\r\n max_del[y, x] = np.amax(s[y:n+1, x:n+1] - s[0:n-y+1, x:n+1] - s[y:n+1, 0:n-x+1] + s[0:n-y+1, 0:n-x+1])\r\n res = np.arange(n**2+1)[:, None]\r\n i = np.arange(1, n+1)\r\n res = max_del[i, np.minimum(res//i, n)].max(axis=1)\r\n q = int(sys.stdin.readline().rstrip())\r\n p = np.array(sys.stdin.read().split(), dtype=np.int64)\r\n print(*res[p], sep='\\n')\r\n\r\n\r\n class ABC006:\r\n @staticmethod\r\n def a():\r\n n = sys.stdin.readline().rstrip()\r\n if '3' in n: print('YES')\r\n elif int(n)%3 == 0: print('YES')\r\n else: print('NO')\r\n\r\n @staticmethod\r\n def b():\r\n mod = 10007\r\n a = np.eye(N=3, k=-1, dtype=np.int64); a[0] = 1\r\n n = int(sys.stdin.readline().rstrip())\r\n a = Algebra.matrix_pow(a, n-1, mod)\r\n print(a[2][0])\r\n\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n cnt = [0, 0, 0]\r\n if m == 1: cnt = [-1, -1, -1]\r\n else:\r\n if m & 1: m -= 3; cnt[1] += 1; n -= 1\r\n cnt[2] = m//2 - n\r\n cnt[0] = n - cnt[2]\r\n if cnt[0]<0 or cnt[1]<0 or cnt[2]<0: print(-1, -1, -1)\r\n else: print(*cnt, sep=' ')\r\n\r\n @staticmethod\r\n def d():\r\n n, *c = map(int, sys.stdin.read().split())\r\n lis = [inf]*n\r\n for x in c: lis[bi_l(lis, x)] = x\r\n print(n - bi_l(lis, inf))\r\n\r\n class ABC007:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n-1)\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n if s == 'a': print(-1)\r\n else: print('a')\r\n @staticmethod\r\n def c():\r\n r, c = map(int, sys.stdin.readline().split())\r\n sy, sx = map(int, sys.stdin.readline().split())\r\n gy, gx = map(int, sys.stdin.readline().split())\r\n sy -= 1; sx -=1; gy -= 1; gx -= 1\r\n maze = [sys.stdin.readline().rstrip() for _ in range(r)]\r\n queue = deque([(sy, sx)])\r\n dist = np.full((r, c), np.inf); dist[sy, sx] = 0\r\n while queue:\r\n y, x = queue.popleft()\r\n for i, j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\r\n i += y; j += x\r\n if maze[i][j] == '#' or dist[i, j] != np.inf: continue\r\n dist[i, j] = dist[y, x] + 1\r\n queue.append((i, j))\r\n print(int(dist[gy, gx]))\r\n @staticmethod\r\n def d():\r\n ng = set([4, 9])\r\n def count(d):\r\n return d if d<=4 else d-1\r\n def f(n):\r\n x = [int(d) for d in str(n)]\r\n flg = True\r\n dp = 0\r\n for d in x:\r\n dp = dp*8 + flg*count(d)\r\n if d in ng: flg = False\r\n return n-(dp+flg)\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(f(b) - f(a-1))\r\n\r\n class ABC008:\r\n @staticmethod\r\n def a():\r\n s, t = map(int, sys.stdin.readline().split())\r\n print(t-s+1)\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n res = defaultdict(int)\r\n for name in s: res[name] += 1\r\n print(sorted(res.items(), key=lambda x: x[1])[-1][0])\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n c = n - np.count_nonzero(a[:, None]%a, axis=1)\r\n print(np.sum((c+1)//2/c))\r\n\r\n @staticmethod\r\n def d():\r\n w, h, n, *xy = map(int, sys.stdin.read().split())\r\n *xy, = zip(*([iter(xy)]*2))\r\n\r\n @lru_cache(maxsize=None)\r\n def count(x1, y1, x2, y2):\r\n res = 0\r\n for x, y in xy:\r\n if not (x1 <= x <= x2 and y1 <= y <= y2): continue\r\n cnt = (x2-x1) + (y2-y1) + 1\r\n cnt += count(x1, y1, x-1, y-1)\r\n cnt += count(x1, y+1, x-1, y2)\r\n cnt += count(x+1, y1, x2, y-1)\r\n cnt += count(x+1, y+1, x2, y2)\r\n res = max(res, cnt)\r\n return res\r\n print(count(1, 1, w, h))\r\n\r\n\r\n class ABC009:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((n+1)//2)\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n print(sorted(set(a))[-2])\r\n @staticmethod\r\n def c():\r\n n, k = map(int, sys.stdin.readline().split())\r\n s = list(sys.stdin.readline().rstrip())\r\n cost = [1]*n\r\n r = k\r\n for i in range(n-1):\r\n q = []\r\n for j in range(i+1, n):\r\n if s[j] < s[i] and cost[i]+cost[j] <= r:\r\n heappush(q, (s[j], cost[i]+cost[j], -j))\r\n if not q: continue\r\n _, c, j = heappop(q); j = -j\r\n s[i], s[j] = s[j], s[i]\r\n r -= c\r\n cost[i] = cost[j] = 0\r\n print(''.join(s))\r\n\r\n @staticmethod\r\n def d():\r\n k, m = map(int, sys.stdin.readline().split())\r\n a = np.array([int(x) for x in sys.stdin.readline().split()])\r\n c = np.array([int(x) for x in sys.stdin.readline().split()])\r\n mask = (1<<32) - 1\r\n d = np.eye(k, k, -1, dtype=np.uint32) * mask; d[0] = c\r\n if m <= k: print(a[m-1]); return\r\n # print(Algebra.bitwise_mat_pow(d, m-k))\r\n # print(Algebra.bitwise_dot(Algebra.bitwise_mat_pow(d, m-k), a[::-1].reshape(-1, 1))[0].item())\r\n print(Algebra.bitwise_dot(Algebra.bitwise_mat_pow(d, m-k), a[::-1].reshape(-1, 1))[0][0])\r\n\r\n\r\n\r\n class ABC010:\r\n @staticmethod\r\n def a():\r\n print(sys.stdin.readline().rstrip()+'pp')\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n tot = 0\r\n for x in a:\r\n c = 0\r\n while x%2==0 or x%3==2:\r\n x -= 1\r\n c += 1\r\n tot += c\r\n print(tot)\r\n @staticmethod\r\n def c():\r\n sx, sy, gx, gy, t, v, n, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(-1, 2).T\r\n def dist(x1, y1, x2, y2):\r\n return np.sqrt((x2-x1)**2 + (y2-y1)**2)\r\n ans = 'YES' if (dist(sx, sy, x, y)+dist(x, y, gx, gy) <= v*t).any() else 'NO'\r\n print(ans)\r\n\r\n @staticmethod\r\n def d():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n p = [int(x) for x in sys.stdin.readline().split()]\r\n x, y = [], []\r\n for _ in range(e):\r\n a, b = map(int, sys.stdin.readline().split())\r\n x.append(a); y.append(b)\r\n x.append(b); y.append(a)\r\n for a in p:\r\n x.append(a)\r\n y.append(n)\r\n if not x:\r\n print(0)\r\n return\r\n c = [1] * len(x)\r\n min_cut = maximum_flow(csr_matrix((c, (x, y)), (n+1, n+1)), source=0, sink=n).flow_value\r\n print(min_cut)\r\n\r\n @staticmethod\r\n def d_2():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n graph = nx.DiGraph()\r\n graph.add_nodes_from(range(n+1))\r\n for p in [int(x) for x in sys.stdin.readline().split()]:\r\n graph.add_edge(p, n, capacity=1)\r\n for _ in range(e):\r\n a, b = map(int, sys.stdin.readline().split())\r\n graph.add_edge(a, b, capacity=1)\r\n graph.add_edge(b, a, capacity=1)\r\n print(nx.minimum_cut_value(graph, 0, n))\r\n\r\n @staticmethod\r\n def d_3():\r\n n, q, m = map(int, sys.stdin.readline().split())\r\n g = GeometryTopology.Graph(n+1)\r\n # for i in range(n+1): g.add_node(i)\r\n for p in [int(x) for x in sys.stdin.readline().split()]:\r\n g.add_edge(p, n, capacity=1)\r\n for a, b in zip(*[map(int, sys.stdin.read().split())]*2):\r\n g.add_edge(a, b, capacity=1)\r\n g.add_edge(b, a, capacity=1)\r\n print(g.dinic(0, n))\r\n\r\n\r\n\r\n class ABC011:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n%12+1)\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(s[0].upper()+s[1:].lower())\r\n @staticmethod\r\n def c():\r\n n, *ng = map(int, sys.stdin.read().split())\r\n ng = set(ng)\r\n if n in ng: print('NO')\r\n else:\r\n r = 100\r\n while n > 0:\r\n if r == 0: print('NO'); return\r\n for i in range(3, 0, -1):\r\n if (n-i) in ng: continue\r\n n -= i\r\n r -= 1\r\n break\r\n else: print('NO'); return\r\n print('YES')\r\n\r\n @staticmethod\r\n def d():\r\n n, d, x, y = map(int, sys.stdin.read().split())\r\n x, y = abs(x), abs(y)\r\n if x%d or y%d: print(0); return\r\n x, y = x//d, y//d\r\n r = n - (x+y)\r\n if r < 0 or r&1: print(0); return\r\n\r\n res = 0\r\n half_p = pow(1/2, n)\r\n for d in range(r//2 + 1): # 0 <= d <= r//2, south\r\n south, north = d, y+d\r\n west = (r - 2*d)//2\r\n res += half_p * comb(n, south, exact=True) * comb(n-south, north, exact=True)\\\r\n * comb(n-south-north, west, exact=True) * half_p\r\n print(res)\r\n\r\n\r\n class ABC012:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(b, a)\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n h, n = divmod(n, 3600)\r\n m, s = divmod(n, 60)\r\n print(f'{h:02}:{m:02}:{s:02}')\r\n\r\n @staticmethod\r\n def c():\r\n n = 2025 - int(sys.stdin.readline().rstrip())\r\n res = []\r\n for i in range(1, 10):\r\n if n%i != 0 or n//i > 9: continue\r\n res.append(f'{i} x {n//i}')\r\n print(*sorted(res), sep='\\n')\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abt = map(int, sys.stdin.read().split())\r\n a, b, t = np.array(abt).reshape(m, 3).T\r\n res = shortest_path(csr_matrix((t, (a-1, b-1)), (n, n)), method='FW', directed=False)\r\n print(res.max(axis=-1).min().astype(np.int64))\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m, *abt = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b, t in zip(*[iter(abt)]*3):\r\n a -= 1; b -= 1\r\n g.add_edge(a, b, weight=t)\r\n g.add_edge(b, a, weight=t)\r\n\r\n print(min(max(d) for d in g.floyd_warshall()))\r\n\r\n\r\n\r\n class ABC013:\r\n @staticmethod\r\n def a():\r\n print(ord(sys.stdin.readline().rstrip()) - ord('A') + 1)\r\n\r\n @staticmethod\r\n def b():\r\n a, b = map(int, sys.stdin.read().split())\r\n d = abs(a - b)\r\n print(min(d, 10-d))\r\n\r\n @staticmethod\r\n def c():\r\n n, h, a, b, c, d, e = map(int, sys.stdin.read().split())\r\n y = np.arange(n+1)\r\n x = (n*e-h-(d+e)*y)//(b+e) + 1\r\n np.maximum(x, 0, out=x)\r\n np.minimum(x, n-y, out=x)\r\n print(np.amin(a*x + c*y))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, d, *a = map(int, sys.stdin.read().split())\r\n res = list(range(n))\r\n def swap(i, j): res[i], res[j] = res[j], res[i]\r\n for i in a[::-1]: swap(i-1, i)\r\n res = np.array(res)\r\n def binary_method(a, p):\r\n b = np.arange(n)\r\n while p:\r\n if p&1: b = a[b]\r\n p >>= 1\r\n a = a[a]\r\n return b\r\n print(*(binary_method(res, d)+1), sep='\\n')\r\n\r\n class ABC014:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.read().split())\r\n print((a+b-1)//b * b - a)\r\n\r\n @staticmethod\r\n def b():\r\n n, x, *a = map(int, sys.stdin.read().split())\r\n print(sum(a[i] for i in range(n) if x>>i&1))\r\n\r\n @staticmethod\r\n def c():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n a, b = np.array(ab).reshape(n, 2).T\r\n res = np.zeros(10**6+2, dtype=np.int64)\r\n np.add.at(res, a, 1)\r\n np.subtract.at(res, b+1, 1)\r\n np.cumsum(res, out=res)\r\n print(res.max())\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n g = GeometryTopology.Graph(n)\r\n for _ in range(n-1):\r\n x, y = map(int, sys.stdin.readline().split())\r\n x -= 1; y -= 1\r\n g.add_edge(x, y, weight=1)\r\n g.add_edge(y, x, weight=1)\r\n\r\n g.bfs(0)\r\n g.find_ancestors()\r\n\r\n q, *ab = map(int, sys.stdin.read().split())\r\n for a, b in zip(*[iter(ab)]*2):\r\n a -= 1; b -= 1\r\n print(g.find_dist(a, b) + 1)\r\n\r\n class ABC015:\r\n @staticmethod\r\n def a():\r\n a, b = sys.stdin.read().split()\r\n print(a if len(a) > len(b) else b)\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n print(np.ceil(a[np.nonzero(a)[0]].sum() / np.count_nonzero(a)).astype(np.int8))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *t = map(int, sys.stdin.read().split())\r\n t = np.array(t).reshape(n, k)\r\n x = np.zeros((1, 1), dtype=np.int8)\r\n for i in range(n):\r\n x = x.reshape(-1, 1) ^ t[i]\r\n print('Found' if np.count_nonzero(x==0) > 0 else 'Nothing')\r\n\r\n @staticmethod\r\n def d():\r\n w, n, k, *ab = map(int, sys.stdin.read().split())\r\n dp = np.zeros((k+1, w+1), dtype=np.int32)\r\n for a, b in zip(*[iter(ab)]*2): np.maximum(dp[1:,a:], dp[:-1,:-a]+b, out=dp[1:,a:])\r\n print(dp[k][w])\r\n\r\n\r\n class ABC016:\r\n @staticmethod\r\n def a():\r\n m, d = map(int, sys.stdin.readline().split())\r\n print('YES' if m%d == 0 else 'NO')\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n f1, f2 = a+b==c, a-b==c\r\n if f1 & f2: print('?')\r\n elif f1 & (~f2): print('+')\r\n elif (~f1) & f2: print('-')\r\n else: print('!')\r\n\r\n @staticmethod\r\n def c():\r\n n, _, *ab = map(int, sys.stdin.read().split())\r\n f = [0] * n\r\n for a, b in zip(*[iter(ab)]*2):\r\n a -= 1; b -= 1\r\n f[a] |= 1<<b\r\n f[b] |= 1<<a\r\n res = [bit_count(cumor(f[j] for j in range(n) if f[i]>>j&1) & ~(f[i] | 1<<i)) for i in range(n)]\r\n print(*res, sep='\\n')\r\n\r\n @staticmethod\r\n def d():\r\n sx, sy, gx, gy = map(int, sys.stdin.readline().split())\r\n seg1 = ((sx, sy), (gx, gy))\r\n n = int(sys.stdin.readline().rstrip())\r\n p1 = np.array(sys.stdin.read().split(), dtype=np.int64).reshape(n, 2).T\r\n p2 = np.hstack((p1[:, 1:], p1[:, :1]))\r\n seg2 = (p1, p2)\r\n print(np.count_nonzero(GeometryTopology.intersect(seg1, seg2))//2 + 1)\r\n\r\n class ABC017:\r\n @staticmethod\r\n def a():\r\n s, e = np.array(sys.stdin.read().split(), dtype=np.int16).reshape(3, 2).T\r\n print((s // 10 * e).sum())\r\n\r\n @staticmethod\r\n def b():\r\n choku_tail = set('ch, o, k, u'.split(', '))\r\n def is_choku(s):\r\n if s == '': return True\r\n if len(s)>=1 and (s[-1] in choku_tail) and is_choku(s[:-1]): return True\r\n if len(s)>=2 and (s[-2:] in choku_tail) and is_choku(s[:-2]): return True\r\n return False\r\n print('YES' if is_choku(sys.stdin.readline().rstrip()) else 'NO')\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *lrs = map(int, sys.stdin.read().split())\r\n l, r, s = np.array(lrs).reshape(n, 3).T\r\n score = np.zeros((m+1, ), dtype=np.int32)\r\n np.add.at(score, l-1, s)\r\n np.subtract.at(score, r, s)\r\n np.cumsum(score, out=score)\r\n print(s.sum() - score[:m].min())\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *f = map(int, sys.stdin.read().split())\r\n prev = [0] * (n+1)\r\n tmp = defaultdict(int)\r\n for i in range(n):\r\n prev[i+1] = tmp[f[i]]\r\n tmp[f[i]] = i+1\r\n\r\n dp = [0] * (n+1); dp[0] = 1\r\n l, s = 0, dp[0]\r\n for i in range(1, n+1):\r\n while l < prev[i]:\r\n s = (s - dp[l]) % MOD\r\n l += 1\r\n dp[i] = s\r\n s = (s + dp[i]) % MOD\r\n print(dp[n])\r\n\r\n class ABC018:\r\n @staticmethod\r\n def a():\r\n *a, = map(int, sys.stdin.read().split())\r\n a = sorted(enumerate(a), key=lambda x: -x[1])\r\n res = [None] * 3\r\n for i in range(3):\r\n res[a[i][0]] = i+1\r\n print(*res, sep='\\n')\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n n, *lr = map(int, sys.stdin.read().split())\r\n for l, r in zip(*[iter(lr)]*2):\r\n l -= 1; r -= 1\r\n s = s[:l] + s[l:r+1][::-1] + s[r+1:]\r\n print(s)\r\n\r\n @staticmethod\r\n def c():\r\n r, c, k = map(int, sys.stdin.readline().split())\r\n s = np.array([list(s) for s in sys.stdin.read().split()])\r\n s = np.pad(s, 1, constant_values='x')\r\n\r\n a = np.zeros_like(s, dtype=np.float64)\r\n a[s=='o'] = np.inf\r\n for i in range(1, r+1): np.minimum(a[i-1,:]+1, a[i,:], out=a[i,:])\r\n for i in range(r, 0, -1): np.minimum(a[i+1,:]+1, a[i,:], out=a[i,:])\r\n for j in range(1, c+1): np.minimum(a[:,j-1]+1, a[:,j], out=a[:,j])\r\n for j in range(c, 0, -1): np.minimum(a[:,j+1]+1, a[:,j], out=a[:,j])\r\n print(np.count_nonzero(a>=k))\r\n\r\n @staticmethod\r\n def c_2():\r\n r, c, k = map(int, sys.stdin.readline().split())\r\n s = np.array([list(s) for s in sys.stdin.read().split()])\r\n s = np.pad(s, 1, constant_values='x')\r\n a = (s=='o').astype(np.int16)\r\n a = distance_transform_cdt(a, metric='taxicab')\r\n print(np.count_nonzero(a>=k))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, p, q, r, *xyz = map(int, sys.stdin.read().split())\r\n x, y, z = np.array(xyz).reshape(r, 3).T\r\n h = np.zeros((n, m), dtype=np.int32); h[x-1, y-1] = z\r\n g = np.array([*itertools.combinations(range(n), p)])\r\n print(np.sort(h[g].sum(axis=1), axis=1)[:,-q:].sum(axis=1).max())\r\n\r\n\r\n class ABC019:\r\n @staticmethod\r\n def a():\r\n *a, = map(int, sys.stdin.readline().split())\r\n print(sorted(a)[1])\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip() + '$'\r\n cnt = 0\r\n prev = '$'\r\n t = ''\r\n for c in s:\r\n if c == prev: cnt += 1; continue\r\n t += prev+str(cnt)\r\n prev = c; cnt = 1\r\n print(t[2:])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n res = set()\r\n for x in a:\r\n while not x&1:\r\n x >>= 1\r\n res.add(x)\r\n print(len(res))\r\n\r\n @staticmethod\r\n def d():\r\n def inquire(u, v):\r\n print(f'? {u} {v}'.format(u, v), flush=True)\r\n return int(sys.stdin.readline().rstrip())\r\n\r\n n = int(sys.stdin.readline().rstrip())\r\n u = sorted([(inquire(1, v), v) for v in range(2, n+1)])[-1][1]\r\n d = max((inquire(u, v)) for v in range(1, n+1) if u!=v)\r\n print(f'! {d}')\r\n\r\n class ABC020:\r\n @staticmethod\r\n def a():\r\n print('ABC' if int(sys.stdin.readline().rstrip())==1 else 'chokudai')\r\n\r\n @staticmethod\r\n def b():\r\n a, b = sys.stdin.readline().split()\r\n print(int(a+b) * 2)\r\n\r\n @staticmethod\r\n def c():\r\n h, w, t = map(int, sys.stdin.readline().split())\r\n s = [list(s) for s in sys.stdin.read().split()]\r\n for i in range(h):\r\n for j in range(w):\r\n if s[i][j] == 'S': sy, sx = i, j\r\n if s[i][j] == 'G': gy, gx = i, j\r\n s[sy][sx] = s[gy][gx] = '.'\r\n source, target = sy*w+sx, gy*w+gx\r\n\r\n def heuristic_function(u, v=target):\r\n uy, ux = divmod(u, w)\r\n vy, vx = divmod(v, w)\r\n return abs(vy-uy) + abs(ux-vx)\r\n\r\n def min_time(x):\r\n g = GeometryTopology.Graph(h*w)\r\n # g = nx.DiGraph()\r\n\r\n for i in range(h):\r\n for j in range(w):\r\n u = i*w + j\r\n if i > 0: g.add_edge(u, (i-1)*w+j, weight=(1 if s[i-1][j]=='.' else x))\r\n if i < h-1: g.add_edge(u, (i+1)*w+j, weight=(1 if s[i+1][j]=='.' else x))\r\n if j > 0: g.add_edge(u, i*w+j-1, weight=(1 if s[i][j-1]=='.' else x))\r\n if j < w-1: g.add_edge(u, i*w+j+1, weight=(1 if s[i][j+1]=='.' else x))\r\n\r\n return g.dijkstra(source)[target]\r\n return g.astar(source, target, heuristic_function)\r\n # return nx.dijkstra_path_length(g, source, target)\r\n # return nx.astar_path_length(g, source, target, heuristic_function)\r\n\r\n def binary_search():\r\n lo, hi = 1, t+1\r\n while lo+1 < hi:\r\n x = (lo+hi)//2\r\n if min_time(x) > t:\r\n hi = x\r\n else:\r\n lo = x\r\n return lo\r\n\r\n print(binary_search())\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.readline().split())\r\n div = sorted(NumberTheory.find_divisors(k))\r\n l = len(div)\r\n s = [0] * l\r\n for i, d in enumerate(div): s[i] = (1+n//d)*(n//d)//2 * d % MOD\r\n for i in range(l-1, -1, -1):\r\n for j in range(i+1, l):\r\n if div[j]%div[i]: continue\r\n s[i] = (s[i]-s[j])%MOD\r\n\r\n print(sum(s[i]*k//div[i]%MOD for i in range(l))%MOD) # ans is LCM.\r\n\r\n class ABC021:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n s = [1<<i for i in range(5) if n>>i&1]\r\n print(len(s), *s, sep='\\n')\r\n\r\n @staticmethod\r\n def b():\r\n n, a, b, k, *p = map(int, sys.stdin.read().split())\r\n print('YES' if len(set(p)|set([a, b])) == k+2 else 'NO')\r\n\r\n @staticmethod\r\n def c():\r\n n, a, b, m, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(m, 2).T - 1\r\n a -= 1; b -= 1\r\n g = csgraph_to_dense(csr_matrix((np.ones(m), (x, y)), (n, n), dtype=np.int8))\r\n g = np.logical_or(g, g.T)\r\n paths = np.zeros(n, dtype=np.int64).reshape(-1, 1)\r\n paths[a, 0] = 1\r\n while not paths[b, 0]:\r\n paths = np.dot(g, paths) % MOD\r\n print(paths[b, 0])\r\n\r\n @staticmethod\r\n def c_2():\r\n n, a, b, m, *xy = map(int, sys.stdin.read().split())\r\n a -= 1; b -= 1\r\n g = GeometryTopology.Graph()\r\n\r\n for x, y in zip(*[iter(xy)]*2):\r\n x -= 1; y -= 1\r\n g.add_edge(x, y, weight=1)\r\n g.add_edge(y, x, weight=1)\r\n\r\n dist, paths = g.dijkstra(a, paths_cnt=True, mod=MOD)\r\n print(paths[b])\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.read().split())\r\n cn = Combinatorics.CombinationsMod()\r\n print(cn(n+k-1, k))\r\n\r\n\r\n class ABC022:\r\n @staticmethod\r\n def a():\r\n n, s, t, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n np.cumsum(a, out=a)\r\n print(((s<=a) & (a<=t)).sum())\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n c = Counter(a)\r\n print(sum(c.values())-len(c))\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *uvl = map(int, sys.stdin.read().split())\r\n u, v, l = np.array(uvl).reshape(m, 3).T\r\n u -= 1; v -= 1\r\n g = csgraph_to_dense(csr_matrix((l, (u,v)), (n,n)))\r\n g += g.T\r\n g[g==0] = np.inf\r\n dist0 = g[0].copy()\r\n g[0] = 0; g[:, 0] = 0\r\n dist = shortest_path(g, method='FW', directed=False)\r\n u, v = np.array([*itertools.combinations(range(1,n), 2)]).T\r\n res = (dist0[u]+dist[u,v]+dist0[v]).min()\r\n print(-1 if res==np.inf else int(res))\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n c = np.array(ab).reshape(2,n,2)\r\n g = c.mean(axis=1)\r\n d = np.sqrt(((c-g[:,None,:])**2).sum(axis=-1)).sum(axis=1)\r\n print(d[1]/d[0])\r\n\r\n\r\n class ABC023:\r\n @staticmethod\r\n def a():\r\n print(sum(divmod(int(sys.stdin.readline().rstrip()), 10)))\r\n\r\n @staticmethod\r\n def b():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n t = 'b'\r\n for i in range(n//2):\r\n if i%3==0: t = 'a'+t+'c'\r\n elif i%3==1: t = 'c'+t+'a'\r\n else: t = 'b'+t+'b'\r\n print(n//2 if t==s else -1)\r\n\r\n @staticmethod\r\n def b_2():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n if n&1^1: print(-1); return\r\n a = list('abc')\r\n i = (1-n//2)%3\r\n for c in s:\r\n if c != a[i]:\r\n print(-1); return\r\n i = (i+1) % 3\r\n print(n//2)\r\n\r\n @staticmethod\r\n def c():\r\n h, w, k, n, *rc = map(int, sys.stdin.read().split())\r\n r, c = np.array(rc).reshape(n,2).T - 1\r\n rb = np.bincount(r, minlength=h)\r\n cb = np.bincount(c, minlength=w)\r\n rbb = np.bincount(rb, minlength=k+1)\r\n cbb = np.bincount(cb, minlength=k+1)\r\n tot = (rbb[:k+1]*cbb[k::-1]).sum()\r\n real = np.bincount(rb[r]+cb[c]-1, minlength=k+1)\r\n print(tot-real[k-1]+real[k])\r\n\r\n @staticmethod\r\n def d():\r\n n, *hs = map(int, sys.stdin.read().split())\r\n h, s = np.array(hs).reshape(n,2).T\r\n\r\n t = np.arange(n)\r\n def is_ok(x): return np.all(np.sort((x-h)//s) >= t)\r\n def binary_search():\r\n lo, hi = 0, 10**14\r\n while lo+1 < hi:\r\n x = (lo+hi)//2\r\n if is_ok(x): hi = x\r\n else: lo = x\r\n return hi\r\n\r\n print(binary_search())\r\n\r\n class ABC024:\r\n @staticmethod\r\n def a():\r\n a, b, c, k, s, t = map(int, sys.stdin.read().split())\r\n print(a*s + b*t - c*(s+t)*(s+t>=k))\r\n\r\n @staticmethod\r\n def b():\r\n n, t, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n print(np.minimum(a[1:]-a[:-1], t).sum() + t)\r\n\r\n @staticmethod\r\n def c():\r\n n, d, k, *lrst = map(int, sys.stdin.read().split())\r\n lrst = np.array(lrst)\r\n lr = lrst[:2*d].reshape(d,2)\r\n s, t = lrst[2*d:].reshape(k,2).T\r\n day = np.zeros((k,),dtype=np.int32)\r\n for i in range(d):\r\n l, r = lr[i]\r\n move = (l<=s)&(s<=r)&(s!=t)\r\n reach = move&(l<=t)&(t<=r)\r\n s[move&(s<t)] = r\r\n s[move&(s>t)] = l\r\n s[reach] = t[reach]; day[reach] = i+1\r\n print(*day, sep='\\n')\r\n\r\n @staticmethod\r\n def d():\r\n a, b, c = map(int, sys.stdin.read().split())\r\n p = MOD\r\n denom = pow(a*b%p - b*c%p + c*a%p, p-2, p)\r\n w = (b*c-a*b)%p*denom%p\r\n h = (b*c-a*c)%p*denom%p\r\n print(h,w)\r\n\r\n class ABC025:\r\n @staticmethod\r\n def a():\r\n s, n = sys.stdin.read().split()\r\n n = int(n)\r\n i, j = divmod(n-1, 5)\r\n print(s[i]+s[j])\r\n\r\n @staticmethod\r\n def b():\r\n n, a, b = map(int, sys.stdin.readline().split())\r\n res = defaultdict(int)\r\n for _ in range(n):\r\n s, d = sys.stdin.readline().split()\r\n d = int(d)\r\n res[s] += min(max(d,a),b)\r\n res = res['East'] - res['West']\r\n if res == 0: ans = 0\r\n elif res > 0: ans = f'East {res}'\r\n else: ans = f'West {-res}'\r\n print(ans)\r\n\r\n @staticmethod\r\n def c():\r\n b = [0] * 6\r\n for i in range(2):\r\n *row, = map(int, sys.stdin.readline().split())\r\n for j in range(3):\r\n b[i*3+j] = row[j]\r\n c = [0] * 8\r\n for i in range(3):\r\n *row, = map(int, sys.stdin.readline().split())\r\n for j in range(2):\r\n c[i*3+j] = row[j]\r\n tot = sum(b) + sum(c)\r\n\r\n @lru_cache(maxsize=None)\r\n def f(s=tuple(0 for _ in range(9))):\r\n if all(s):\r\n res = 0\r\n for i in range(6): res += (s[i]==s[i+3])*b[i]\r\n for i in range(8): res += (s[i]==s[i+1])*c[i]\r\n return res\r\n cand = [i for i in range(9) if not s[i]]\r\n flg = len(cand)&1\r\n s = list(s)\r\n res = []\r\n for i in cand:\r\n s[i] = (flg^1)+1\r\n res.append(f(tuple(s)))\r\n s[i] = 0\r\n return sorted(res, reverse=flg)[0]\r\n\r\n a = f(); b = tot-a\r\n print(a)\r\n print(b)\r\n\r\n\r\n\r\n class ABC026:\r\n @staticmethod\r\n def a():\r\n a = int(sys.stdin.readline().rstrip())\r\n print(a//2 * (a-a//2))\r\n\r\n @staticmethod\r\n def b():\r\n n, *r = map(int, sys.stdin.read().split())\r\n s = np.pi * np.array([0]+r)**2; s.sort()\r\n res = s[n::-2].sum() - s[n-1::-2].sum()\r\n print(res)\r\n\r\n @staticmethod\r\n def c():\r\n n, *b = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph()\r\n for i in range(1, n): g.add_edge(b[i-1]-1, i, weight=1)\r\n\r\n def f(u=0):\r\n if not g.edges[u]: return 1\r\n s = [f(v) for v in g.edges[u]]\r\n return max(s) + min(s) + 1\r\n\r\n print(f())\r\n\r\n @staticmethod\r\n def d():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n def f(t): return a*t + b*np.sin(c*t*np.pi) - 100\r\n print(optimize.brenth(f, 0, 200))\r\n\r\n\r\n class ABC027:\r\n @staticmethod\r\n def a():\r\n l = [int(l) for l in sys.stdin.readline().split()]\r\n l.sort()\r\n print(l[2] if l[0]==l[1] else l[0])\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n m, r = divmod(sum(a), n)\r\n if r: print(-1); return\r\n population = 0\r\n towns = 0\r\n cnt = 0\r\n for x in a:\r\n population += x\r\n towns += 1\r\n if population/towns != m: cnt+=1; continue\r\n population, towns = 0, 0\r\n print(cnt)\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n flg = n.bit_length()&1^1\r\n t = 0\r\n x = 1\r\n while x <= n:\r\n t += 1\r\n x = 2*x+1 if t&1^flg else 2*x\r\n print('Aoki' if t&1 else 'Takahashi')\r\n\r\n\r\n class ABC028:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print('Bad' if n<60 else 'Good' if n<90 else 'Great' if n<100 else 'Perfect')\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n cnt = Counter(s)\r\n print(*[cnt.get(c, 0) for c in 'ABCDEF'])\r\n\r\n @staticmethod\r\n def c():\r\n a, b, c, d, e = map(int, sys.stdin.readline().split())\r\n print(max(b+c+e, a+d+e))\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.readline().split())\r\n c = 3*2*(n-k)*(k-1) + 3*(n-1) + 1\r\n print(c/n**3)\r\n\r\n\r\n class ABC029:\r\n @staticmethod\r\n def a():\r\n print(sys.stdin.readline().rstrip()+'s')\r\n\r\n @staticmethod\r\n def b():\r\n print(sum('r' in s for s in sys.stdin.read().split()))\r\n\r\n @staticmethod\r\n def c():\r\n print(*[''.join(s) for s in itertools.product('abc', repeat=int(sys.stdin.readline().rstrip()))], sep='\\n')\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(sum(n//10**(i+1)*10**i + min(max((n%10**(i+1)-10**i+1), 0), 10**i) for i in range(9)))\r\n\r\n class ABC030:\r\n @staticmethod\r\n def a():\r\n a, b, c, d = map(int, sys.stdin.readline().split())\r\n e, f = b*c, d*a\r\n print('TAKAHASHI' if e>f else 'AOKI' if f>e else 'DRAW')\r\n\r\n @staticmethod\r\n def b():\r\n n, m = map(int, sys.stdin.readline().split())\r\n n = (n%12 + m/60)*30; m *= 6\r\n d = abs(n-m)\r\n print(min(d, 360-d))\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n x, y = map(int, sys.stdin.readline().split())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n\r\n t = 0\r\n p = 1\r\n cnt = 0\r\n while True:\r\n if p:\r\n i = bi_l(a, t)\r\n if i == n: break\r\n t = a[i] + x\r\n else:\r\n i = bi_l(b, t)\r\n if i == m: break\r\n t = b[i] + y\r\n cnt += 1\r\n p ^= 1\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, a = map(int , sys.stdin.readline().split()); a -= 1\r\n k = sys.stdin.readline().rstrip()\r\n b = [int(x)-1 for x in sys.stdin.readline().split()]\r\n\r\n c = [None] * n\r\n for i in range(n+1):\r\n if str(i)==k: print(a+1);return\r\n if c[a] is not None: l, d = i-c[a], c[a];break\r\n c[a] = i; a = b[a]\r\n\r\n r = [None] * len(k); r[0] = 1\r\n for i in range(len(k)-1): r[i+1] = r[i]*10%l\r\n k = [int(c) for c in k][::-1]\r\n d = (sum(r[i]*k[i] for i in range(len(k)))-d) % l\r\n for _ in range(d): a = b[a]\r\n print(a+1)\r\n\r\n @staticmethod\r\n def d_2():\r\n n, a, k, *b = map(int, sys.stdin.read().split())\r\n a -= 1; b = [x-1 for x in b]\r\n c = [None]*n\r\n for i in range(n+1):\r\n if i==k: print(a+1); return\r\n if c[a] is not None:\r\n for _ in range((k-c[a])%(i-c[a])): a = b[a]\r\n print(a+1); return\r\n c[a] = i; a = b[a]\r\n\r\n\r\n class ABC031:\r\n @staticmethod\r\n def a():\r\n a, d = map(int, sys.stdin.readline().split())\r\n if a > d: a,d = d,a\r\n print((a+1)*d)\r\n\r\n @staticmethod\r\n def b():\r\n l, h, n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n res = np.maximum(l-a, 0)\r\n res[a>h] = -1\r\n print(*res, sep='\\n')\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n np.cumsum(a[::2], out=a[::2])\r\n np.cumsum(a[1::2], out=a[1::2])\r\n a = list(a) + [0]*2\r\n\r\n def score(i, j):\r\n if i > j: i, j = j, i\r\n if (j-i)&1: x, y = a[j-1]-a[i-2], a[j]-a[i-1]\r\n else: x, y = a[j]-a[i-2], a[j-1]-a[i-1]\r\n return x, y\r\n\r\n res = -inf\r\n for i in range(n):\r\n s = -inf\r\n for j in range(n):\r\n if i==j: continue\r\n x, y = score(i, j)\r\n if y>s: s,t = y,x\r\n res = max(res, t)\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n k, m = map(int, sys.stdin.readline().split())\r\n *vw, = zip(*[iter(sys.stdin.read().split())]*2)\r\n for l in itertools.product((1,2,3), repeat=k):\r\n s = dict()\r\n for v, w in vw:\r\n i = 0\r\n for d in v:\r\n d = int(d)-1\r\n j = i+l[d]\r\n if j > len(w): break\r\n t = w[i:j]\r\n if d in s and s[d] != t: break\r\n s[d] = t\r\n i = j\r\n else:\r\n if i == len(w): continue\r\n break\r\n else:\r\n for i in range(k): print(s[i])\r\n return\r\n\r\n\r\n class ABC032:\r\n @staticmethod\r\n def a():\r\n a, b, n = map(int, sys.stdin.read().split())\r\n l = NumberTheory.lcm(a, b)\r\n print((n+l-1)//l*l)\r\n\r\n @staticmethod\r\n def b():\r\n s, k = sys.stdin.read().split()\r\n k = int(k)\r\n res = set()\r\n for i in range(len(s)-k+1):\r\n res.add(s[i:i+k])\r\n print(len(res))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *s = map(int, sys.stdin.read().split())\r\n if 0 in s: print(n); return\r\n if k == 0: print(0); return\r\n res, tmp, l = 0, 1, 0\r\n for r in range(n):\r\n tmp *= s[r]\r\n while tmp > k: tmp //= s[l]; l+=1\r\n res = max(res, r-l+1)\r\n\r\n print(res)\r\n\r\n class ABC033:\r\n @staticmethod\r\n def a():\r\n print('SAME' if len(set(sys.stdin.readline().rstrip()))==1 else 'DIFFERENT')\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = dict()\r\n for _ in range(n):\r\n s, p = sys.stdin.readline().split()\r\n res[s] = int(p)\r\n tot = sum(res.values())\r\n for s, p in res.items():\r\n if p > tot/2: print(s); return\r\n print('atcoder')\r\n\r\n @staticmethod\r\n def c():\r\n s = sys.stdin.readline().rstrip()\r\n print(sum(not '0' in f for f in s.split('+')))\r\n\r\n\r\n class ABC034:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print('Better' if y>x else 'Worse')\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n+1 if n&1 else n-1)\r\n\r\n @staticmethod\r\n def c():\r\n h, w = map(int, sys.stdin.read().split())\r\n choose = Combinatorics.CombinationsMod()\r\n print(choose(h+w-2, h-1))\r\n\r\n @staticmethod\r\n def d():\r\n n, k, *wp = map(int, sys.stdin.read().split())\r\n w, p = np.array(wp).reshape(-1, 2).T\r\n def f(x):\r\n return np.sort(w*(p-x))[-k:].sum()\r\n print(optimize.bisect(f, 0, 100))\r\n\r\n class ABC035:\r\n @staticmethod\r\n def a():\r\n w, h = map(int, sys.stdin.readline().split())\r\n print('4:3' if 4*h==3*w else '16:9')\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n y = x = z = 0\r\n for c in s:\r\n if c == '?': z += 1\r\n elif c == 'L': x -= 1\r\n elif c == 'R': x += 1\r\n elif c == 'D': y -= 1\r\n elif c == 'U': y += 1\r\n d = abs(y)+abs(x)\r\n print(d+z if t=='1' else max(d-z, (d-z)&1))\r\n\r\n @staticmethod\r\n def c():\r\n n, q, *lr = map(int, sys.stdin.read().split())\r\n l, r = np.array(lr).reshape(q, 2).T\r\n res = np.zeros(n+1, dtype=int)\r\n np.add.at(res, l-1, 1)\r\n np.subtract.at(res, r, 1)\r\n np.cumsum(res, out=res)\r\n res = res&1\r\n print(''.join(map(str, res[:-1])))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, t = map(int, sys.stdin.readline().split())\r\n point = np.array(sys.stdin.readline().split(), dtype=int)\r\n a, b, c = np.array(sys.stdin.read().split(), dtype=np.int64).reshape(m, 3).T\r\n a -= 1; b -= 1\r\n d_1 = shortest_path(csr_matrix((c, (a, b)), (n, n)), method='D', directed=True, indices=0)\r\n d_2 = shortest_path(csr_matrix((c, (b, a)), (n, n)), method='D', directed=True, indices=0)\r\n print(int(np.amax((t-(d_1+d_2))*point)))\r\n\r\n class ABC036:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print((b+a-1)//a)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n n = int(n)\r\n for j in range(n):\r\n row = ''\r\n for i in range(n-1, -1, -1):\r\n row += s[i][j]\r\n print(row)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n b = [None]*n\r\n prev = None\r\n j = -1\r\n for i, x in sorted(enumerate(a), key=lambda x: x[1]):\r\n if x != prev: j += 1\r\n b[i] = j\r\n prev = x\r\n print(*b, sep='\\n')\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n edges = [[] for _ in range(n)]\r\n for a, b in zip(*[iter(ab)]*2):\r\n a -= 1; b -= 1\r\n edges[a].append(b)\r\n edges[b].append(a)\r\n parent = [None]*n\r\n def count(u):\r\n black, white = 1, 1\r\n for v in edges[u]:\r\n if v == parent[u]: continue\r\n parent[v] = u\r\n b, w = count(v)\r\n black *= w; black %= MOD\r\n white *= (b+w)%MOD; white %= MOD\r\n return black, white\r\n print(sum(count(0))%MOD)\r\n\r\n class ABC037:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(c//min(a, b))\r\n\r\n @staticmethod\r\n def b():\r\n n, q, *lrt = map(int, sys.stdin.read().split())\r\n a = np.zeros(n, dtype=int)\r\n for l, r, t in zip(*[iter(lrt)]*3):\r\n a[l-1:r] = t\r\n print(*a, sep='\\n')\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = np.array([0]+a)\r\n np.cumsum(a, out=a)\r\n s = (a[k:] - a[:-k]).sum()\r\n print(s)\r\n\r\n @staticmethod\r\n def d():\r\n h, w, *a = map(int, sys.stdin.read().split())\r\n p = [None]*(h*w)\r\n def paths(k):\r\n if p[k]: return p[k]\r\n p[k] = 1\r\n i, j = divmod(k,w)\r\n if j>0 and a[k]>a[k-1]: p[k] += paths(k-1)\r\n if j<w-1 and a[k]>a[k+1]: p[k] += paths(k+1)\r\n if i>0 and a[k]>a[k-w]: p[k] += paths(k-w)\r\n if i<h-1 and a[k]>a[k+w]: p[k] += paths(k+w)\r\n p[k] %= MOD; return p[k]\r\n print(sum(paths(i) for i in range(h*w))%MOD)\r\n\r\n\r\n class ABC038:\r\n @staticmethod\r\n def a():\r\n s = sys.stdin.readline().rstrip()\r\n print('YES' if s[-1]=='T' else 'NO')\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c, d = map(int, sys.stdin.read().split())\r\n print('YES' if a==c or b==c or a==d or b==d else 'NO')\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n cnt = n\r\n tmp = 1\r\n for i in range(n):\r\n if a[i+1] > a[i]:\r\n tmp += 1\r\n else:\r\n cnt += tmp*(tmp-1)//2\r\n tmp = 1\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, *wh = map(int, sys.stdin.read().split())\r\n a = [x[1] for x in sorted(zip(*[iter(wh)]*2), key=lambda x: (x[0], -x[1]))]\r\n print(bi_l(DP.LIS(a), inf))\r\n\r\n class ABC039:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print((a*b+b*c+c*a)*2)\r\n\r\n @staticmethod\r\n def b():\r\n x = int(sys.stdin.readline().rstrip())\r\n for n in range(1, int(x**0.5)+1):\r\n if pow(n, 4)==x:\r\n print(n); return\r\n\r\n\r\n @staticmethod\r\n def c():\r\n board = 'WBWBWWBWBWBW' * 3\r\n convert = 'Do, *, Re, *, Mi, Fa, *, So, *, La, *, Si'.split(', ')\r\n s = sys.stdin.readline().rstrip()\r\n print(convert[board.index(s)])\r\n\r\n\r\n @staticmethod\r\n def d():\r\n h, w = map(int, sys.stdin.readline().split())\r\n s = ''.join(sys.stdin.read().split())\r\n white = set()\r\n for i in range(h*w):\r\n if s[i]=='#': continue\r\n l = 0 if i%w==0 else -1\r\n r = 0 if (i+1)%w==0 else 1\r\n white |= {i+dy+dx for dy in range(-w, w+1, w) for dx in range(l,r+1)}\r\n black_before = set(range(h*w)) - white\r\n black_after = set()\r\n for i in black_before:\r\n l = 0 if i%w==0 else -1\r\n r = 0 if (i+1)%w==0 else 1\r\n black_after |= {i+dy+dx for dy in range(-w, w+1, w) for dx in range(l,r+1)}\r\n black_after &= set(range(h*w))\r\n for i in range(h*w):\r\n if s[i]=='#' and not i in black_after: print('impossible'); return\r\n print('possible')\r\n for i in range(h):\r\n print(''.join(['#' if i*w+j in black_before else '.' for j in range(w)]))\r\n\r\n\r\n\r\n\r\n class ABC040:\r\n @staticmethod\r\n def a():\r\n n, x = map(int, sys.stdin.readline().split())\r\n print(min(x-1, n-x))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = inf\r\n for i in range(1, int(n**.5)+1):\r\n res = min(res, n//i-i+n%i)\r\n print(res)\r\n\r\n @staticmethod\r\n def c():\r\n n, *h = map(int, sys.stdin.read().split())\r\n h = [h[0]]+h\r\n cost = [None] * (n+1); cost[0] = cost[1] = 0\r\n for i in range(2, n+1):\r\n cost[i] = min(\r\n cost[i-2] + abs(h[i]-h[i-2]),\r\n cost[i-1] + abs(h[i]-h[i-1])\r\n )\r\n print(cost[n])\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n uf = GeometryTopology.Graph(n); uf.init_dsu()\r\n queue = []\r\n for _ in range(m):\r\n a, b, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2*y), a-1, b-1))\r\n q = int(sys.stdin.readline().rstrip())\r\n for i in range(q):\r\n v, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2*y+1), v-1, i))\r\n res = [None] * q\r\n while queue:\r\n y, i, j = heappop(queue)\r\n if y&1:\r\n res[j] = uf.size[uf.find(i)]\r\n else:\r\n uf.unite(i, j)\r\n print(*res, sep='\\n')\r\n\r\n class ABC041:\r\n @staticmethod\r\n def a():\r\n s, i = sys.stdin.read().split()\r\n i = int(i)\r\n print(s[i-1])\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n ans = a * b % MOD * c % MOD\r\n print(ans)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n for i, h in sorted(enumerate(a), key=lambda x: -x[1]):\r\n print(i+1)\r\n\r\n @staticmethod\r\n def d():\r\n n, _, *xy = map(int, sys.stdin.read().split())\r\n g = [0]*n\r\n for x, y in zip(*[iter(xy)]*2): g[x-1] |= 1<<(y-1)\r\n res = [0]*(1<<n); res[0] = 1\r\n for i in range(1<<n):\r\n for j in range(n):\r\n if i>>j&1^1: continue\r\n if not(g[j]&i): res[i] += res[i&~(1<<j)]\r\n print(res[-1])\r\n\r\n\r\n\r\n class ABC042:\r\n @staticmethod\r\n def a():\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n c = Counter(a)\r\n print('YES' if c[5]==2 and c[7]==1 else 'NO')\r\n\r\n @staticmethod\r\n def b():\r\n n, l, *s = sys.stdin.read().split()\r\n print(''.join(sorted(s)))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *d = sys.stdin.read().split()\r\n l = len(n)\r\n ok = sorted(set(string.digits)-set(d))\r\n cand = [int(''.join(p)) for p in itertools.product(ok, repeat=l)] + [int(min(x for x in ok if x > '0')+min(ok)*l)]\r\n print(cand[bi_l(cand, int(n))])\r\n\r\n @staticmethod\r\n def d():\r\n h, w, a, b = map(int, sys.stdin.read().split())\r\n combinations = Combinatorics.CombinationsMod(n=2*10**5, mod=MOD)\r\n i = np.arange(h-a, h)\r\n ng = np.sum(combinations(i+b-1, i) * combinations(h-i+w-b-2, h-1-i) % MOD)\r\n print((combinations(h+w-2, h-1)-ng)%MOD)\r\n\r\n\r\n class ABC043:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((1+n)*n//2)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n t = ''\r\n for c in s:\r\n if c == 'B': t = t[:-1]\r\n else: t += c\r\n print(t)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n x = np.around(a.sum()/n).astype(int)\r\n print(np.sum((a-x)**2))\r\n\r\n @staticmethod\r\n def d():\r\n s = sys.stdin.readline().rstrip()\r\n n = len(s)\r\n for i in range(n-1):\r\n if s[i] == s[i+1]: print(i+1, i+2); return\r\n for i in range(n-2):\r\n if s[i] == s[i+2]: print(i+1, i+3); return\r\n print(-1, -1)\r\n\r\n class ABC044:\r\n @staticmethod\r\n def a():\r\n n, k, x, y = map(int, sys.stdin.read().split())\r\n print(min(n,k)*x + max(0,n-k)*y)\r\n\r\n @staticmethod\r\n def b():\r\n res = set(c&1 for c in Counter(sys.stdin.readline().rstrip()).values())\r\n print('Yes' if len(res)==1 and res.pop()==0 else 'No')\r\n\r\n @staticmethod\r\n def c():\r\n n, a, *x = map(int, sys.stdin.read().split())\r\n dp = np.zeros((n+1, 2501), dtype=np.int64); dp[0,0] = 1\r\n for v in x: dp[1:,v:] += dp[:-1,:-v]\r\n i = np.arange(1, n+1)\r\n print(dp[i, i*a].sum())\r\n\r\n @staticmethod\r\n def c_2():\r\n n, a, *x = map(int, sys.stdin.read().split())\r\n for i in range(n): x[i] -= a\r\n\r\n s = defaultdict(int); s[0] = 1\r\n for i in range(n):\r\n ns = s.copy()\r\n for k, v in s.items(): ns[k+x[i]] += v\r\n s = ns\r\n print(s[0]-1)\r\n\r\n\r\n\r\n @staticmethod\r\n def d():\r\n pass\r\n\r\n class ABC045:\r\n @staticmethod\r\n def a():\r\n a, b, h = map(int, sys.stdin.read().split())\r\n print((a+b)*h//2)\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = sys.stdin.read().split()\r\n d = {'a': a[::-1], 'b': b[::-1], 'c': c[::-1]}\r\n nx = 'a'\r\n while 1:\r\n if not d[nx]: print(nx.upper()); return\r\n d[nx], nx = d[nx][:-1], d[nx][-1]\r\n\r\n @staticmethod\r\n def c():\r\n def c(l): return pow(2, max(0,l-1))\r\n s = sys.stdin.readline().rstrip()\r\n n = len(s)\r\n print(sum(int(s[i:j+1])*c(i)*c(n-1-j) for i in range(n) for j in range(i, n)))\r\n\r\n @staticmethod\r\n def d():\r\n h, w, n, *ab = map(int, sys.stdin.read().split())\r\n c = defaultdict(int)\r\n for y, x in zip(*[iter(ab)] * 2):\r\n y -= 1; x -= 1\r\n for dy, dx in itertools.product(range(-1, 2), repeat=2):\r\n i, j = y+dy, x+dx\r\n if not(0<i<h-1 and 0<j<w-1): continue\r\n c[(i,j)] += 1\r\n c = Counter(c.values())\r\n c[0] = (h-2)*(w-2)-sum(c.values())\r\n for i in range(10): print(c[i])\r\n\r\n\r\n class ABC046:\r\n @staticmethod\r\n def a():\r\n print(len(set(sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n n, k = map(int, sys.stdin.readline().split())\r\n print(k*pow(k-1, n-1))\r\n\r\n @staticmethod\r\n def c():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n a, b = 1, 1\r\n for x, y in zip(*[iter(xy)]*2):\r\n n = max((a+x-1)//x, (b+y-1)//y)\r\n a, b = n*x, n*y\r\n print(a+b)\r\n\r\n @staticmethod\r\n def d():\r\n c = Counter(sys.stdin.readline().rstrip())\r\n print((c['g']-c['p'])//2)\r\n\r\n\r\n\r\n class ABC047:\r\n @staticmethod\r\n def a():\r\n c = sorted(map(int, sys.stdin.readline().split()))\r\n print('Yes' if c[0]+c[1]==c[2] else 'No')\r\n\r\n @staticmethod\r\n def b():\r\n w, h, n, *xyf = map(int, sys.stdin.read().split())\r\n l, r, d, u = 0, w, 0, h\r\n for x, y, f in zip(*[iter(xyf)]*3):\r\n if f == 1: l = max(l, x)\r\n if f == 2: r = min(r, x)\r\n if f == 3: d = max(d, y)\r\n if f == 4: u = min(u, y)\r\n print(max(0, r-l)*max(0, u-d))\r\n\r\n @staticmethod\r\n def c():\r\n s = sys.stdin.readline().rstrip()\r\n print(sum(s[i]!=s[i+1] for i in range(len(s)-1)))\r\n\r\n @staticmethod\r\n def d():\r\n mn, mx, c = inf, -1, 0\r\n n, t, *a = map(int, sys.stdin.read().split())\r\n for p in a:\r\n if p-mn == mx: c += 1\r\n elif p-mn>mx: mx, c = p-mn, 1\r\n mn = min(mn, p)\r\n print(c)\r\n\r\n class ABC048:\r\n @staticmethod\r\n def a():\r\n def initial(s): return s[0].upper()\r\n print(''.join(map(initial, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n a, b, x = map(int, sys.stdin.readline().split())\r\n print(b//x - (a-1)//x) # if a=0, (a-1)/x is rounded down to -1.\r\n\r\n @staticmethod\r\n def c():\r\n n, x, *a = map(int, sys.stdin.read().split())\r\n cnt = prev = 0\r\n for i in range(n):\r\n d = prev+a[i] - x\r\n prev = a[i]\r\n if d <= 0: continue\r\n cnt += d; prev -= d\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n s = sys.stdin.readline().rstrip()\r\n print('First' if len(s)&1^(s[0]==s[-1]) else 'Second')\r\n\r\n\r\n class ABC049:\r\n @staticmethod\r\n def a():\r\n vowels = set('aeiou')\r\n print('vowel' if sys.stdin.readline().rstrip() in vowels else 'consonant')\r\n\r\n @staticmethod\r\n def b():\r\n h, w, *s = sys.stdin.read().split()\r\n for l in s:\r\n for _ in range(2): print(l)\r\n\r\n @staticmethod\r\n def c():\r\n t = set('dream, dreamer, erase, eraser'.split(', '))\r\n def obtainable(s):\r\n while True:\r\n for i in range(5, 8):\r\n if s[-i:] in t:\r\n s = s[:-i]\r\n if not s: return True\r\n break\r\n else: return False\r\n\r\n s = sys.stdin.readline().rstrip()\r\n print('YES' if obtainable(s) else 'NO')\r\n\r\n @staticmethod\r\n def d():\r\n n, k, l = map(int, sys.stdin.readline().split())\r\n uf1 = GeometryTopology.Graph(n); uf1.init_dsu()\r\n uf2 = GeometryTopology.Graph(n); uf2.init_dsu()\r\n\r\n def add_edges(uf, m):\r\n for _ in range(m):\r\n x, y = map(int, sys.stdin.readline().split())\r\n x -= 1; y -= 1\r\n uf.unite(x, y)\r\n\r\n add_edges(uf1, k); add_edges(uf2, l)\r\n\r\n g = defaultdict(list)\r\n for i in range(n): g[(uf1.find(i), uf2.find(i))].append(i)\r\n\r\n res = [None] * n\r\n for a in g:\r\n for i in g[a]: res[i] = len(g[a])\r\n\r\n print(*res, sep=' ')\r\n\r\n\r\n class ABC050:\r\n @staticmethod\r\n def a():\r\n print(eval(sys.stdin.readline().rstrip()))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n t = np.array(sys.stdin.readline().split(), dtype=np.int64)\r\n m, *px = map(int, sys.stdin.read().split())\r\n p, x = np.array(px).reshape(m, 2).T; p -= 1\r\n print(*(t.sum()+x-t[p]), sep='\\n')\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = Counter(a)\r\n if n&1 and not(a[0]==1 and all(a[i]==2 for i in range(2, n, 2))):\r\n print(0); return\r\n if ~n&1 and any(a[i]!= 2 for i in range(1, n, 2)):\r\n print(0); return\r\n print(pow(2, n//2, MOD))\r\n\r\n @staticmethod\r\n def d(): pass\r\n\r\n\r\n class ABC051:\r\n @staticmethod\r\n def a():\r\n print(' '.join(sys.stdin.readline().rstrip().split(',')))\r\n\r\n @staticmethod\r\n def b():\r\n k, s = map(int, sys.stdin.readline().split())\r\n tot = 0\r\n for x in range(k+1):\r\n if s-x < 0: break\r\n if s-x > 2*k: continue\r\n tot += s-x+1 if s-x<=k else 2*k-(s-x)+1\r\n print(tot)\r\n\r\n @staticmethod\r\n def c():\r\n x1, y1, x2, y2 = map(int, sys.stdin.readline().split())\r\n dx, dy = x2-x1, y2-y1\r\n print('U'*dy+'R'*(dx+1)+'D'*(dy+1)+'L'*(dx+1)+'U'+'L'+'U'*(dy+1)+'R'*(dx+1)+'D'*(dy+1)+'L'*dx)\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abc = map(int, sys.stdin.read().split())\r\n x = np.arange(n)\r\n a, b, c = np.array(abc).reshape(m, 3).T; a -= 1; b -= 1\r\n d = shortest_path(csr_matrix((c, (a, b)), shape=(n, n)), method='FW', directed=False).astype(np.int64)\r\n print(m-np.any(d[x,a[:,None]]+c[:,None]==d[x,b[:,None]], axis=1).sum())\r\n\r\n\r\n class ABC052:\r\n @staticmethod\r\n def a():\r\n a, b, c, d = map(int, sys.stdin.readline().split())\r\n print(max(a*b, c*d))\r\n\r\n @staticmethod\r\n def b():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n a = [0] * (n+1)\r\n for i in range(n):\r\n a[i+1] = a[i] + (1 if s[i]=='I' else -1)\r\n print(max(a))\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n pn = NumberTheory.PrimeNumbers(n)\r\n s = 1\r\n for c in pn.factorize_factorial(n).values():\r\n s = s*(c+1)%MOD\r\n print(s)\r\n\r\n @staticmethod\r\n def d():\r\n n, a, b, *x = map(int, sys.stdin.read().split())\r\n x = np.array(x)\r\n print(np.minimum((x[1:]-x[:-1])*a, b).sum())\r\n\r\n class ABC053:\r\n @staticmethod\r\n def a():\r\n print('ABC' if int(sys.stdin.readline().rstrip())<1200 else 'ARC')\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(len(s)-s.find('A')-s[::-1].find('Z'))\r\n\r\n @staticmethod\r\n def c():\r\n x = int(sys.stdin.readline().rstrip())\r\n q, r = divmod(x, 11)\r\n print(2*q + (r+5)//6)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n print(n-((n-len(set(a))+1)//2*2))\r\n\r\n class ABC054:\r\n @staticmethod\r\n def a():\r\n def f(x):\r\n return (x+11)%13\r\n a, b = map(int, sys.stdin.readline().split())\r\n print('Alice' if f(a)>f(b) else 'Bob' if f(a)<f(b) else 'Draw')\r\n\r\n @staticmethod\r\n def b():\r\n n, m = map(int, sys.stdin.readline().split())\r\n a = [sys.stdin.readline().rstrip() for _ in range(n)]\r\n b = [sys.stdin.readline().rstrip() for _ in range(m)]\r\n\r\n for i in range(n-m+1):\r\n for j in range(n-m+1):\r\n for y in range(m):\r\n for x in range(m):\r\n if a[i+y][j+x]==b[y][x]: continue\r\n break\r\n else: continue\r\n break\r\n else: print('Yes'); return\r\n print('No')\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *ab = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b in zip(*[iter(ab)]*2):\r\n a -= 1; b -= 1\r\n g.add_edge(a,b)\r\n g.add_edge(b,a)\r\n\r\n cnt = 0\r\n stack = [(0, 1)]\r\n while stack:\r\n u, s = stack.pop()\r\n if s==(1<<n)-1: cnt+=1; continue\r\n for v in g.edges[u]:\r\n if s>>v&1: continue\r\n stack.append((v, s|1<<v))\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, ma, mb, *abc = map(int, sys.stdin.read().split())\r\n dp = np.full((401, 401), np.inf); dp[0,0] = 0\r\n for a, b, c in zip(*[iter(abc)]*3):\r\n np.minimum(dp[a:, b:], dp[:-a, :-b]+c, out=dp[a:, b:])\r\n i = np.arange(1, 400//max(ma,mb)+1)\r\n res = dp[i*ma, i*mb].min()\r\n print(int(res) if res != np.inf else -1)\r\n\r\n\r\n class ABC055:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(800*n - 200*(n//15))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n fac, _ = Algebra.generate_fac_ifac(n, MOD)\r\n print(fac[-1])\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n print(m//2 if m<=2*n else n+(m-2*n)//4)\r\n\r\n @staticmethod\r\n def d():\r\n n, s = sys.stdin.read().split(); n = int(n)\r\n s = [1 if c=='o' else 0 for c in s]\r\n def possible(t):\r\n for i in range(1, n-1): t[i+1] = t[i-1]^t[i]^s[i]\r\n return ((t[0]^s[0]^t[1]^t[-1])|(t[-1]^s[-1]^t[-2]^t[0]))^1\r\n\r\n for fst in [(1,0), (0,1), (1,1), (0,0)]:\r\n t = [None]*n; t[0], t[1] = fst[0], fst[1]\r\n if possible(t): print(''.join('S' if x==1 else 'W' for x in t)); return\r\n print(-1)\r\n\r\n\r\n class ABC056:\r\n @staticmethod\r\n def a():\r\n def to_i(c):\r\n return 1 if c=='H' else 0\r\n a, b = map(to_i, sys.stdin.readline().split())\r\n print('D' if a^b else 'H')\r\n\r\n @staticmethod\r\n def b():\r\n w, a, b = map(int, sys.stdin.readline().split())\r\n if a>b: a,b = b,a\r\n print(max(b-(a+w), 0))\r\n\r\n @staticmethod\r\n def c():\r\n x = int(sys.stdin.readline().rstrip())\r\n print(int(math.ceil(math.sqrt(2*x+1/4)-.5)))\r\n\r\n\r\n @staticmethod\r\n def d():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = sorted(min(x,k) for x in a)\r\n\r\n def necessary(i):\r\n dp = np.zeros(k, dtype=np.bool); dp[0] = True\r\n for j in range(n):\r\n if j==i: continue\r\n dp[a[j]:] += dp[:-a[j]]\r\n return np.any(dp[k-a[i]:])\r\n\r\n def binary_search():\r\n lo, hi = -1, n\r\n while hi-lo > 1:\r\n i = (lo+hi)//2\r\n if necessary(i): hi = i\r\n else: lo = i\r\n return hi\r\n\r\n print(binary_search())\r\n\r\n\r\n\r\n class ABC057:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print((a+b)%24)\r\n\r\n @staticmethod\r\n def b():\r\n n, m, *I = map(int, sys.stdin.read().split())\r\n I = np.array(I).reshape(-1, 2)\r\n ab, cd = I[:n], I[n:]\r\n print(*(np.argmin(np.absolute(ab[:,None]-cd).sum(axis=-1), axis=-1)+1), sep='\\n')\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n divs = NumberTheory.find_divisors(n)\r\n print(len(str(divs[bi_l(divs, math.sqrt(n))])))\r\n\r\n\r\n @staticmethod\r\n def d():\r\n c = Combinatorics.choose\r\n n, a, b, *v = map(int, sys.stdin.read().split())\r\n v.sort()\r\n print(sum(v[-a:])/a)\r\n l, r = bi_l(v, v[-a]), bi_r(v, v[-a])\r\n print(sum(c(r-l, i) for i in range(r-n+a, r-max(l,n-b)+1)) if r==n else c(r-l, r-n+a))\r\n\r\n\r\n class ABC058:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print('YES' if c-b==b-a else 'NO')\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n a = ''\r\n for i in range(len(t)): a += s[i]+t[i]\r\n if len(s)>len(t): a += s[-1]\r\n print(a)\r\n\r\n @staticmethod\r\n def c():\r\n n, *s = sys.stdin.read().split()\r\n res = {c: 100 for c in string.ascii_lowercase}\r\n for counter in map(Counter, s):\r\n for c, x, in res.items(): res[c] = min(x, counter[c])\r\n t = ''\r\n for c, x in sorted(res.items()): t += c*x\r\n print(t)\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy[:n]), np.array(xy[n:])\r\n print((x*(np.arange(n)+1)-np.cumsum(x)).sum()%MOD*((y*(np.arange(m)+1)-np.cumsum(y)).sum()%MOD)%MOD)\r\n\r\n class ABC059:\r\n @staticmethod\r\n def a():\r\n def initial(s): return s[0].upper()\r\n print(''.join(map(initial, sys.stdin.readline().split())))\r\n\r\n\r\n @staticmethod\r\n def b():\r\n a, b = sys.stdin.read().split()\r\n la, lb = len(a), len(b)\r\n print('GREATER' if la>lb else 'LESS' if la<lb else 'GREATER' if a>b else 'LESS' if a<b else 'EQUAL')\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n c = s = 0\r\n for i in range(n):\r\n s += a[i]\r\n if i&1 and s>=0: c += s+1; s=-1\r\n elif i&1^1 and s<=0: c += 1-s; s=1\r\n c1 = c\r\n c = s = 0\r\n for i in range(n):\r\n s += a[i]\r\n if i&1 and s<=0: c += 1-s; s=1\r\n elif i&1^1 and s>=0: c += s+1; s=-1\r\n c2 = c\r\n print(min(c1, c2))\r\n\r\n\r\n @staticmethod\r\n def d():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print('Brown' if abs(x-y)<=1 else 'Alice')\r\n\r\n\r\n class ABC060:\r\n @staticmethod\r\n def a():\r\n a, b, c = sys.stdin.readline().split()\r\n print('YES' if a[-1]==b[0] and b[-1]==c[0] else 'NO')\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print('NO' if c%NumberTheory.gcd(a,b) else 'YES')\r\n\r\n\r\n @staticmethod\r\n def c():\r\n n, t, *a = map(int, sys.stdin.read().split())\r\n print(sum(min(a[i+1]-a[i], t) for i in range(n-1))+t)\r\n\r\n\r\n @staticmethod\r\n def d():\r\n n, W, *wv = map(int, sys.stdin.read().split())\r\n v, w0 = [[] for _ in range(4)], wv[0]\r\n for a, b in zip(*[iter(wv)]*2): v[a-w0].append(b)\r\n for i in range(4):\r\n v[i] = (sorted(v[i])+[0])[::-1]\r\n *v[i], = itertools.accumulate(v[i])\r\n global res; res = 0\r\n @lru_cache(maxsize=None)\r\n def dfs(i,j,k):\r\n if i>=len(v[0]) or j>=len(v[1]) or k>=len(v[2]): return\r\n w = j+2*k + (i+j+k)*w0\r\n if w > W: return\r\n l = min(len(v[3])-1, (W-w)//(w0+3))\r\n global res; res = max(res, v[0][i]+v[1][j]+v[2][k]+v[3][l])\r\n dfs(i+1,j,k); dfs(i,j+1,k); dfs(i,j,k+1)\r\n dfs(0,0,0)\r\n print(res)\r\n\r\n class ABC061:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print('Yes' if a <= c <= b else 'No')\r\n\r\n @staticmethod\r\n def b():\r\n n, m, *ab = map(int, sys.stdin.read().split())\r\n ab = np.array(ab) - 1\r\n g = np.zeros(n, dtype=np.int32)\r\n np.add.at(g, ab, 1)\r\n print(*g, sep='\\n')\r\n\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *ab = map(int, sys.stdin.read().split())\r\n ab = np.transpose(np.array(ab).reshape(n,2))\r\n a, b = ab[:, np.argsort(ab[0])]\r\n print(a[np.cumsum(b)>=k][0])\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abc = map(int, sys.stdin.read().split())\r\n a, b, c = np.array(abc).reshape(m, 3).T; a -= 1; b -= 1; c *= -1\r\n g = csr_matrix(([1]*(m+1), (np.append(a, n-1), np.append(b, 0))), (n, n))\r\n _, labels = connected_components(g, connection='strong')\r\n bl = (labels[a]==labels[0]) & (labels[b]==labels[0])\r\n g = csr_matrix((c[bl], (a[bl], b[bl])), (n, n))\r\n try: print(-shortest_path(g, method='BF', directed=True, indices=0)[-1].astype(int))\r\n except: print('inf')\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m, *abc = map(int, sys.stdin.read().split())\r\n a, b, c = np.array(abc).reshape(m, 3).T; a -= 1; b -= 1; c *= -1\r\n d = np.full(n, np.inf); d[0] = 0\r\n for _ in range(n-1): np.minimum.at(d, b, d[a]+c)\r\n neg_cycle = np.zeros(n, dtype=np.bool)\r\n for _ in range(n):\r\n np.logical_or.at(neg_cycle, b, d[a]+c<d[b])\r\n np.minimum.at(d, b, d[a]+c)\r\n print(inf if neg_cycle[-1] else -d[-1].astype(int))\r\n\r\n\r\n class ABC062:\r\n @staticmethod\r\n def a():\r\n g = [0, 2, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0]\r\n x, y = map(int, sys.stdin.readline().split())\r\n print('Yes' if g[x-1]==g[y-1] else 'No')\r\n\r\n @staticmethod\r\n def b():\r\n h, w = map(int, sys.stdin.readline().split())\r\n a = np.array([list(s) for s in sys.stdin.read().split()], dtype='U1')\r\n a = np.pad(a, pad_width=1, constant_values='#')\r\n for s in a: print(''.join(s))\r\n\r\n @staticmethod\r\n def c():\r\n h, w = map(int, sys.stdin.readline().split())\r\n if h*w%3==0: print(0); return\r\n def minimize(h, w):\r\n return min(h, *(s[-1]-s[0] for x in range(w//3, w//3+2) for s in (sorted([h*x, h//2*(w-x), (h+1)//2*(w-x)]),)))\r\n\r\n print(min(minimize(h,w), minimize(w,h)))\r\n\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n\r\n def optimize(a):\r\n a = list(a)\r\n l, r = a[:n], a[n:]; heapify(l)\r\n s = [None]*(n+1); s[0] = sum(l)\r\n for i in range(n):\r\n x = heappop(l)\r\n heappush(l, max(x, r[i]))\r\n s[i+1] = s[i]+max(0, r[i]-x)\r\n return np.array(s)\r\n\r\n print((optimize(a[:2*n]) + optimize(-a[-1:n-1:-1])[::-1]).max())\r\n\r\n class ABC063:\r\n @staticmethod\r\n def a():\r\n a = sum(map(int, sys.stdin.readline().split()))\r\n print('error' if a>=10 else a)\r\n\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print('yes' if len(set(s))==len(s) else 'no')\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n s = a.sum()\r\n if s%10: print(s)\r\n elif not np.count_nonzero(a%10): print(0)\r\n else: print(s-a[a%10!=0].min())\r\n\r\n\r\n\r\n @staticmethod\r\n def d():\r\n n, a, b, *h = map(int, sys.stdin.read().split())\r\n h = np.array(h)\r\n d = a-b\r\n\r\n def possible(c):\r\n hh = h.copy()\r\n np.maximum(hh-b*c, 0, out=hh)\r\n return ((hh+d-1)//d).sum() <= c\r\n\r\n def binary_search():\r\n lo, hi = 0, 10**9\r\n while hi-lo > 1:\r\n c = (lo+hi)//2\r\n if possible(c): hi = c\r\n else: lo = c\r\n return hi\r\n\r\n print(binary_search())\r\n\r\n class ABC064:\r\n @staticmethod\r\n def a():\r\n r, g, b = map(int, sys.stdin.readline().split())\r\n print('NO' if (10*g+b)%4 else 'YES')\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a.sort()\r\n print(a[-1]-a[0])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.bincount(np.minimum(np.array(a)//400, 8), minlength=9)\r\n mx = np.count_nonzero(a[:-1]) + a[-1]\r\n mn = max(mx-a[-1], 1)\r\n print(mn, mx)\r\n\r\n @staticmethod\r\n def d():\r\n n, s = sys.stdin.read().split()\r\n l = r = 0\r\n for c in s:\r\n if c=='(': r += 1\r\n else:\r\n if r==0: l += 1\r\n else: r -= 1\r\n print('('*l+s+')'*r)\r\n\r\n class ABC065:\r\n @staticmethod\r\n def a():\r\n x, a, b = map(int, sys.stdin.readline().split())\r\n y = -a+b\r\n print('delicious' if y<=0 else 'safe' if y<=x else 'dangerous')\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = [int(x)-1 for x in sys.stdin.read().split()]\r\n i = 0\r\n for c in range(n):\r\n i = a[i]\r\n if i == 1: print(c+1); return\r\n print(-1)\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n d = abs(n-m)\r\n if d >= 2: print(0); return\r\n fac, _ = Algebra.generate_fac_ifac(10**5)\r\n print(fac[n]*fac[m]*(1 if d else 2)%MOD)\r\n\r\n @staticmethod\r\n def d():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(n,2).T\r\n i = np.argsort(x); ax, bx, cx = i[:-1], i[1:], x[i[1:],]-x[i[:-1]]\r\n i = np.argsort(y); ay, by, cy = i[:-1], i[1:], y[i[1:],]-y[i[:-1]]\r\n e = np.vstack([np.hstack([ax,ay]),np.hstack([bx,by]),np.hstack([cx,cy])])\r\n e = e[:,np.argsort(e[-1])]\r\n _, i = np.unique(e[:-1], return_index=True, axis=1)\r\n a, b, c = e[:,i]\r\n print(minimum_spanning_tree(csr_matrix((c,(a,b)), (n,n))).astype(np.int64).sum())\r\n\r\n\r\n @staticmethod\r\n def d_2():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n x, y = xy[::2], xy[1::2]\r\n g = GeometryTopology.Graph(n)\r\n def make(a):\r\n b = sorted(enumerate(a), key=lambda x: x[1])\r\n for i in range(n-1):\r\n u, v, w = b[i][0], b[i+1][0], b[i+1][1]-b[i][1]\r\n for u, v in [(v,u), (u,v)]:\r\n if not v in g.edges[u]: g.add_edge(u, v, weight=w)\r\n else: g.edges[u][v].weight = min(g.edges[u][v].weight, w)\r\n make(x); make(y)\r\n _, d = g.kruskal()\r\n # _, d = g.prim()\r\n # _, d = g.boruvka()\r\n print(d)\r\n\r\n\r\n\r\n class ABC066:\r\n @staticmethod\r\n def a():\r\n print(sum(sorted(map(int, sys.stdin.readline().split()))[:-1]))\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n def f(s):\r\n n = len(s)//2\r\n return s[:n] == s[n:]\r\n for i in range(len(s)-2, 0, -2):\r\n if f(s[:i]): print(i); return\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n b = deque()\r\n for i in range(n):\r\n if i&1: b.appendleft(a[i])\r\n else: b.append(a[i])\r\n if n&1: b.reverse()\r\n print(*b)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n tmp = [None]*(n+1)\r\n for i in range(n+1):\r\n if tmp[a[i]] is not None: d=tmp[a[i]]+n-i; break\r\n tmp[a[i]] = i\r\n k = np.arange(1, n+2)\r\n c = Combinatorics.CombinationsMod(n+1, MOD)\r\n print(*((c(n+1,k)-c(d,k-1))%MOD), sep='\\n')\r\n\r\n\r\n class ABC067:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print('Impossible' if a%3 and b%3 and (a+b)%3 else 'Possible')\r\n\r\n @staticmethod\r\n def b():\r\n n, k, *l = map(int, sys.stdin.read().split())\r\n print(sum(sorted(l)[-k:]))\r\n\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n np.cumsum(a, out=a)\r\n print(np.absolute(a[-1]-2*a[:-1]).min())\r\n\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b in zip(*[iter(ab)]*2):\r\n a -= 1; b -= 1\r\n g.add_edge(a, b); g.add_edge(b,a)\r\n d1, d2 = g.bfs(0), g.bfs(n-1)\r\n print('Fennec' if sum(d1[i]<=d2[i] for i in range(n)) > n//2 else 'Snuke')\r\n\r\n class ABC068:\r\n @staticmethod\r\n def a():\r\n print('ABC'+sys.stdin.readline().rstrip())\r\n\r\n @staticmethod\r\n def b():\r\n print(2**math.floor(math.log2(int(sys.stdin.readline().rstrip()))))\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *ab = map(int, sys.stdin.read().split())\r\n a, b = np.array(ab).reshape(m, 2).T\r\n d = shortest_path(csr_matrix(([1]*m, (a-1, b-1)), (n,n)), method='D', directed=False, indices=0).astype(np.int32)\r\n print('POSSIBLE' if d[-1]==2 else 'IMPOSSIBLE')\r\n\r\n @staticmethod\r\n def d():\r\n k = int(sys.stdin.readline().rstrip())\r\n n = 50; print(n)\r\n q,r = divmod(k,n); a = np.arange(n-1,-1,-1)+q; a[:r]+=1; print(*a)\r\n\r\n class ABC069:\r\n @staticmethod\r\n def a():\r\n n, m = map(int, sys.stdin.readline().split())\r\n print((n-1)*(m-1))\r\n\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n\r\n @staticmethod\r\n def d():\r\n h, w, n, *a = map(int, sys.stdin.read().split())\r\n c = [i+1 for i in range(n) for j in range(a[i])]\r\n for i in range(h):\r\n row = c[i*w:(i+1)*w]\r\n if i&1: row = row[::-1]\r\n print(*row)\r\n\r\n class ABC070:\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n g = GeometryTopology.Graph(n)\r\n for _ in range(n-1):\r\n a, b, c = map(int, sys.stdin.readline().split()); a-=1; b-=1\r\n g.add_edge(a, b, weight=c); g.add_edge(b, a, weight=c)\r\n q, k = map(int, sys.stdin.readline().split())\r\n d = g.bfs(k-1)\r\n for _ in range(q):\r\n x, y = map(int, sys.stdin.readline().split()); x-=1; y-=1\r\n print(d[x]+d[y])\r\n\r\n class ABC071:\r\n @staticmethod\r\n def d():\r\n n, *s = sys.stdin.read().split(); n = int(n)\r\n s = list(zip(*s))\r\n dp = [0]*n; dp[0] = 3 if s[0][0]==s[0][1] else 6\r\n for i in range(1,n):\r\n dp[i] = dp[i-1]\r\n if s[i][0]==s[i-1][0]: continue\r\n dp[i] *= 2 if s[i-1][0]==s[i-1][1] else 3 if s[i][0]!=s[i][1] else 1\r\n dp[i] %= MOD\r\n print(dp[-1])\r\n\r\n class ABC072:\r\n @staticmethod\r\n def d():\r\n n, *p = map(int, sys.stdin.read().split())\r\n p += [-1]\r\n cnt, i = 0, 0\r\n while i < n:\r\n if p[i]==i+1:\r\n cnt += p[i]==i+1\r\n if p[i+1]==i+2: i += 1\r\n i += 1\r\n print(cnt)\r\n\r\n\r\n class ABC073:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n n, m, r, *I = map(int, sys.stdin.read().split())\r\n I = np.array(I)\r\n a, b, c = I[r:].reshape(m,3).T\r\n d = shortest_path(csr_matrix((c, (a-1, b-1)), (n,n)), method='FW', directed=False).astype(np.int32)\r\n r = np.array([*itertools.permutations(I[:r]-1)])\r\n print((d[r[:,:-1], r[:,1:]].sum(axis=1)).min())\r\n\r\n class ABC074:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a, dtype=np.int32).reshape(n,n)\r\n b = shortest_path(a, method='FW').astype(np.int32)\r\n if (b < a).any(): print(-1); return\r\n np.fill_diagonal(b, 10**9)\r\n a[np.any(b[:,None]+b<=a[:,:,None], axis=2)] = 0\r\n print(a.sum()//2)\r\n\r\n\r\n\r\n class ABC075:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n n, k, *xy = map(int, sys.stdin.read().split())\r\n xy = np.array(xy).reshape(n,2)\r\n x_y = xy.copy()[np.argsort(xy[:,0])]\r\n y_x = xy.copy()[np.argsort(xy[:,1])]\r\n comb = np.array([*itertools.combinations(range(n),2)])\r\n i1, i2 = comb.T\r\n j1, j2 = comb[None,:].T\r\n s = (y_x[:,1][i2]-y_x[:,1][i1]) * (x_y[:,0][j2]-x_y[:,0][j1])\r\n c = np.zeros((n+1,n+1), dtype=np.int64)\r\n for i in range(n): c[i+1, 1:] += c[i, 1:] + (y_x[i,0]<=x_y[:,0])\r\n a = c[i2+1, j2+1] - c[i2+1, j1] - c[i1, j2+1] + c[i1, j1]\r\n print(s[a>=k].min())\r\n\r\n\r\n class ABC076:\r\n @staticmethod\r\n def d():\r\n n, *tv = map(int, sys.stdin.read().split())\r\n t, v = np.array(tv).reshape(2, n)\r\n t = np.pad(t, pad_width=[2,1], constant_values=0)\r\n np.cumsum(t, out=t)\r\n l, r = t[:-1], t[1:]\r\n v = np.pad(v, pad_width=[1,1], constant_values=0)\r\n x = np.arange(0, r[-1]+0.1, 0.5, dtype=np.float32)[:,None]\r\n # y = np.stack([v-(x-l), np.zeros(r[-1]*2+1, dtype=np.float32)[:,None]+v, v+(x-r)]).max(axis=0).min(axis=1)\r\n mx = v-(x-l); np.maximum(mx, v, out=mx); np.maximum(mx, v+(x-r), out=mx)\r\n y = mx.min(axis=1)\r\n print(((y[:-1]+y[1:])/4).sum())\r\n\r\n\r\n class ABC077:\r\n @staticmethod\r\n def d():\r\n k = int(sys.stdin.readline().rstrip())\r\n g = GeometryTopology.Graph(k)\r\n for i in range(k):\r\n g.add_edge(i, i*10%k, weight=0)\r\n g.add_edge(i, (i+1)%k, update=False, weight=1)\r\n print(1+g.bfs01(1)[0])\r\n\r\n\r\n class ABC078:\r\n @staticmethod\r\n def d():\r\n n, z, w, *a = map(int, sys.stdin.read().split())\r\n print(abs(a[0]-w) if n==1 else max(abs(a[-1]-w), abs(a[-1]-a[-2])))\r\n\r\n class ABC079:\r\n @staticmethod\r\n def d():\r\n h, w, *I = map(int, sys.stdin.read().split())\r\n I = np.array(I)\r\n c = I[:100].reshape(10,10)\r\n a = I[100:].reshape(h,w)\r\n c = shortest_path(c.T, method='D', indices=1).astype(np.int32)\r\n print(c[a[a!=-1]].sum())\r\n\r\n class ABC080:\r\n @staticmethod\r\n def d():\r\n n, c, *stc = map(int, sys.stdin.read().split())\r\n using = np.zeros((c, 10**5+2), dtype=np.int8)\r\n s, t, c = np.array(stc).reshape(n,3).T\r\n np.add.at(using, (c-1, s), 1)\r\n np.subtract.at(using, (c-1, t+1), 1)\r\n np.cumsum(using, axis=1, out=using)\r\n print(np.count_nonzero(using, axis=0).max())\r\n\r\n class ABC081:\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n i = np.argmax(np.absolute(a))\r\n print(2*n-1)\r\n for j in range(n): print(i+1, j+1)\r\n if a[i] >= 0:\r\n for j in range(n-1): print(j+1, j+2)\r\n else:\r\n for j in range(n-1, 0, -1): print(j+1, j)\r\n\r\n\r\n class ABC082:\r\n @staticmethod\r\n def d():\r\n s = [1 if c=='T' else 0 for c in sys.stdin.readline().rstrip()] + [1]\r\n x, y = map(int, sys.stdin.readline().split())\r\n i = j = 0\r\n while s[i]==0: x -= 1; i +=1\r\n d = [[], []]\r\n while i < len(s):\r\n if s[i]: j ^= 1; i += 1; continue\r\n c = 0\r\n while s[i]==0: c += 1; i += 1\r\n d[j].append(c)\r\n\r\n def possible(a, s):\r\n dp = np.zeros(sum(a)+1, dtype=np.bool)\r\n if s >= len(dp): return False\r\n dp[-1] = True\r\n for x in a: dp[:-2*x] += dp[2*x:]\r\n return dp[s]\r\n\r\n print('Yes' if possible(d[0], abs(x)) & possible(d[1], abs(y)) else 'No')\r\n\r\n\r\n class ABC083:\r\n @staticmethod\r\n def d():\r\n s = np.array(list(sys.stdin.readline().rstrip()), dtype=np.int8)\r\n k = np.argwhere(s[:-1] != s[1:]).ravel()\r\n if not k.size: print(len(s)); return\r\n print(np.maximum(k+1, len(s)-1-k).min())\r\n\r\n\r\n class ABC084:\r\n @staticmethod\r\n def d():\r\n pn = NumberTheory.PrimeNumbers()\r\n n = np.arange(10**5+1)\r\n cnt = (pn.is_prime[n] & pn.is_prime[(n+1)//2]).astype(np.int32)\r\n np.cumsum(cnt, out=cnt)\r\n q, *lr = map(int, sys.stdin.read().split())\r\n l, r = np.array(lr).reshape(q, 2).T\r\n print(*(cnt[r]-cnt[l-1]), sep='\\n')\r\n\r\n\r\n class ABC085:\r\n @staticmethod\r\n def d():\r\n n, h, *ab = map(int, sys.stdin.read().split())\r\n a, b = np.array(ab).reshape(n, 2).T\r\n a = np.sort(a)[-1]; b = np.sort(b[b>=a])[::-1]\r\n np.cumsum(b, out=b)\r\n print(np.searchsorted(b, h, side='left')+1 if h<=b[-1] else len(b)+(h-b[-1]+a-1)//a)\r\n\r\n class ABC086:\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.readline().split())\r\n xy = []\r\n for _ in range(n):\r\n a, b, c = sys.stdin.readline().split()\r\n a, b = int(a), int(b)\r\n b += k*(c=='W')\r\n xy.append((a,b))\r\n x, y = np.array(xy, dtype=np.int32).T % (2*k)\r\n s = np.zeros((3*k, 3*k), dtype=np.int32)\r\n np.add.at(s, (y,x), 1); np.add.at(s, (y+k, x+k), 1); np.add.at(s, (y+k, x), -1); np.add.at(s, (y, x+k), -1)\r\n del x; del y\r\n s = s.cumsum(axis=0).cumsum(axis=1)\r\n s[:k] += s[-k:]; s[:, :k] += s[:, -k:]; s = s[:-k, :-k]\r\n s[:k, :k] += s[-k:, -k:]; s[:k, -k:] += s[-k:, :k]; s = s[:k]\r\n print(s.max())\r\n\r\n class ABC087:\r\n @staticmethod\r\n def d():\r\n n, m, *lrd = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for l, r, d in zip(*[iter(lrd)]*3):\r\n l -= 1; r -= 1\r\n g.add_edge(l, r, weight=d); g.add_edge(r, l, weight=-d)\r\n\r\n x = [None] * n\r\n @lru_cache(maxsize=None)\r\n def dfs(u, y):\r\n if x[u] is not None:\r\n if x[u] != y: raise Exception('conflict!')\r\n return\r\n x[u] = y\r\n for v, e in g.edges[u].items(): dfs(v, y+e.weight)\r\n\r\n for u in range(n):\r\n if x[u] is not None: continue\r\n # try: dfs(u, 0)\r\n # except: print('No'); return\r\n stack = [(u, 0)]\r\n while stack:\r\n u, y = stack.pop()\r\n if x[u] is not None:\r\n if x[u] != y: print('No'); return\r\n continue\r\n x[u] = y\r\n for v, e in g.edges[u].items(): stack.append((v, y+e.weight))\r\n print('Yes')\r\n\r\n class ABC088:\r\n @staticmethod\r\n def d():\r\n h, w = map(int, sys.stdin.readline().split())\r\n s = ''.join(sys.stdin.read().split())\r\n g = GeometryTopology.Graph(h*w)\r\n cnt = h*w\r\n for u in range(h*w):\r\n if s[u] == '#': cnt -= 1; continue\r\n i, j = divmod(u, w)\r\n if i>0 and s[u-w]=='.': g.add_edge(u, u-w, weight=1)\r\n if i<h-1 and s[u+w]=='.': g.add_edge(u, u+w, weight=1)\r\n if j>0 and s[u-1]=='.': g.add_edge(u, u-1, weight=1)\r\n if j<w-1 and s[u+1]=='.': g.add_edge(u, u+1, weight=1)\r\n d = g.bfs(0)\r\n print(-1 if d[-1]==inf else cnt-d[-1]-1)\r\n\r\n class ABC089:\r\n @staticmethod\r\n def d():\r\n h, w, d, *I = map(int, sys.stdin.read().split())\r\n I = np.array(I)\r\n a = I[:h*w].reshape(h,w)\r\n l, r = I[h*w+1:].reshape(-1,2).T - 1\r\n yx = np.pad(np.argwhere(a)[np.argsort(a.ravel())], pad_width=[(0,d), (0,0)], constant_values=0)\r\n a = np.zeros(h*w+d, dtype=np.int32)\r\n for i in range(0, h*w-d, d):\r\n a[i+d:i+2*d] = a[i:i+d] + np.absolute(yx[i+d:i+2*d]-yx[i:i+d]).sum(axis=1)\r\n print(*(a[r]-a[l]), sep='\\n')\r\n\r\n class ABC090:\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.readline().split())\r\n b = np.arange(k+1, n+1)\r\n print((n//b*(b-k) + np.maximum(0, (n%b)-k+1*(k!=0))).sum())\r\n\r\n class ABC091:\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n c = 2**np.arange(30)\r\n a, b = np.sort(np.array(ab).reshape(2,n)[:, None] % (2*c)[:,None])\r\n res = 0\r\n for i in range(30):\r\n j = np.searchsorted(b[i], np.arange(1, 5)[:,None]*c[i]-a[i]).sum(axis=1)\r\n j[1::2] *= -1\r\n res += (j.sum()&1) * c[i]\r\n print(res)\r\n\r\n class ABC092:\r\n @staticmethod\r\n def d():\r\n a, b = map(int, sys.stdin.readline().split())\r\n def make(color, cnt):\r\n g = [[color^1]*100 for _ in range(21)]\r\n for i in range(1, 21, 2):\r\n for j in range(0, 100, 2):\r\n if not cnt: return g\r\n g[i][j] = color; cnt -= 1\r\n g = make(0,a-1) + make(1,b-1)\r\n def convert(s): return ''.join('#' if c else '.' for c in s)\r\n print(42, 100)\r\n print(*map(convert, g), sep='\\n')\r\n\r\n class ABC093:\r\n @staticmethod\r\n def d():\r\n q, *ab = map(int, sys.stdin.read().split())\r\n a, b = np.sort(np.array(ab).reshape(q,2)).T\r\n x = np.sqrt(a*b).astype(int)\r\n x[x*x==a*b] -= 1\r\n res = a-1\r\n res += (a-1) * (b-a<=1)\r\n res += (x+np.minimum(x-a-1*(x*(x+1)>=a*b), b-x-1)) * (b-a>=2)\r\n\r\n # res = 0\r\n # res += 2*(a-1) * (b-a<=1)\r\n # res += (2*x-1 - 1*(x*(x+1)>=a*b)) * (b-a >= 2)\r\n\r\n print(*res, sep='\\n')\r\n\r\n\r\n\r\n class ABC094:\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a.sort()\r\n print(a[-1], end=' ')\r\n b = (a[-1]+1)//2\r\n i = bi_l(a, b)\r\n print(a[-2] if i==n-1 else a[i-1] if b-a[i-1]<=a[i]-b else a[i])\r\n\r\n\r\n\r\n class ABC095:\r\n @staticmethod\r\n def d():\r\n n, c, *xv = map(int, sys.stdin.read().split())\r\n\r\n def make(xv):\r\n x, v = xv.T\r\n s = np.cumsum(v)-x; rs = s-x\r\n np.maximum.accumulate(s, out=s)\r\n np.maximum.accumulate(rs, out=rs)\r\n return s, rs\r\n\r\n xv = np.pad(np.array(xv).reshape(n,2), pad_width=[(1,0), (0,0)], constant_values=0)\r\n ls, lrs = make(xv)\r\n xv[1:, 0] = c-xv[1:, 0]; xv[1:] = xv[-1:0:-1]\r\n rs, rrs = make(xv)\r\n print(np.maximum(ls+rrs[::-1], rs+lrs[::-1]).max())\r\n\r\n class ABC096:\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n pn = NumberTheory.PrimeNumbers()\r\n a = [p for p in pn if p%5==1]\r\n print(*a[:n])\r\n\r\n\r\n class ABC097:\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n p = [int(x)-1 for x in sys.stdin.readline().split()]\r\n uf = GeometryTopology.Graph(n); uf.init_dsu()\r\n for x, y in zip(*[map(int, sys.stdin.read().split())]*2): uf.unite(x-1, y-1)\r\n groups = [set(p[u] for u in g) for g in uf.groups()]\r\n print(sum(i in groups[uf.find(i)] for i in range(n)))\r\n\r\n class ABC098:\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n r = s = cnt = 0\r\n for l in range(n):\r\n while r<n and not(s&a[r]): s ^= a[r]; r += 1\r\n cnt += r-l; s ^= a[l]\r\n print(cnt)\r\n\r\n\r\n class ABC099:\r\n @staticmethod\r\n def d():\r\n n, c, *I = map(int, sys.stdin.read().split())\r\n I = np.array(I)\r\n d = I[:c*c].reshape(c,c)\r\n r = np.arange(n*n); r = (r//n + r%n)%3\r\n a = d[I[c*c:]-1, np.arange(c)[:,None]]\r\n r = np.arange(n*n); r = (r//n + r%n)%3 == np.arange(3)[:,None]\r\n a = np.vstack([a[:,r[i]].sum(axis=1) for i in range(3)])\r\n p = np.array([*itertools.permutations(range(c), 3)])\r\n print(a[np.arange(3),p].sum(axis=1).min())\r\n\r\n\r\n class ABC100:\r\n @staticmethod\r\n def d():\r\n n, m, *xyz = map(int, sys.stdin.read().split())\r\n xyz = np.array(xyz).reshape(n,3)\r\n op = np.array([*itertools.product((-1,1), repeat=3)])\r\n print(np.sort((op[:,None]*xyz).sum(axis=-1), axis=-1)[:,n-m:].sum(axis=-1).max())\r\n\r\n class ABC101:\r\n @staticmethod\r\n def d():\r\n def s(n): return sum(int(d) for d in str(n))\r\n def f(n):\r\n return sorted([pow(10,d)*(n//pow(10,d)+2)-1 for d in range(int(math.log10(n))+2)], key=lambda x: x/s(x))[0]\r\n k = int(sys.stdin.readline().rstrip())\r\n n = 1\r\n for _ in range(k): print(n); n = f(n)\r\n\r\n class ABC102:\r\n @staticmethod\r\n def d(): # two pointers (online)\r\n n, *a = map(int, sys.stdin.read().split())\r\n mn = inf\r\n i, k = 0, 2\r\n p,q,r,s = a[0], 0, a[1]+a[2], sum(a[3:])\r\n for j in range(1,n-2):\r\n q += a[j]; r -= a[j]\r\n while i < j-1:\r\n if abs(q-p-2*a[i+1]) <= abs(q-p):\r\n q -= a[i+1]; p += a[i+1]\r\n i += 1; continue\r\n break\r\n while k < n-2:\r\n if abs(s-r-2*a[k+1]) <= abs(s-r):\r\n s -= a[k+1]; r += a[k+1]\r\n k += 1; continue\r\n break\r\n tmp = sorted([p,q,r,s])\r\n mn = min(mn, tmp[-1]-tmp[0])\r\n print(mn)\r\n\r\n @staticmethod\r\n def d_2(): # binary_search (offline)\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n def f(a):\r\n s = np.cumsum(a)\r\n i = np.searchsorted(s, s/2)\r\n l, r = s[i], s-s[i]\r\n bl = np.abs(r-l) > np.abs(r-l+2*a[i])\r\n l -= a[i]*bl; r += a[i]*bl\r\n return l, r\r\n (p,q), (s,r) = f(a), f(a[::-1])\r\n a = np.sort(np.vstack((p[:-1], q[:-1], r[-2::-1], s[-2::-1])), axis=0)[:,1:-1]\r\n print((a[-1]-a[0]).min())\r\n\r\n class ABC103:\r\n @staticmethod\r\n def d():\r\n n, m, *ab = map(int, sys.stdin.read().split())\r\n cnt = prev = 0\r\n for a, b in sorted(zip(*[iter(ab)]*2), key=lambda x: x[1]):\r\n a -= 1; b -= 1\r\n if a < prev: continue\r\n prev = b; cnt += 1\r\n print(cnt)\r\n\r\n class ABC104:\r\n @staticmethod\r\n def d():\r\n s = sys.stdin.readline().rstrip()[::-1]\r\n a = b = c = 0; d = 1\r\n for i in range(len(s)):\r\n if s[i]=='?': a,b,c,d = 3*a+b, 3*b+c, 3*c+d, 3*d\r\n elif s[i] == 'A': a += b\r\n elif s[i] == 'B': b += c\r\n elif s[i] == 'C': c += d\r\n a %= MOD; b %= MOD; c %= MOD; d %= MOD\r\n print(a)\r\n\r\n class ABC105:\r\n @staticmethod\r\n def d():\r\n n, m, *a = map(int, sys.stdin.read().split())\r\n c = Counter(np.array(a).cumsum()%m)\r\n print(c[0] + sum([v*(v-1)//2 for v in c.values()]))\r\n\r\n class ABC106:\r\n @staticmethod\r\n def d():\r\n n, m, q, *I = map(int, sys.stdin.read().split())\r\n I = np.array(I).reshape(-1,2) - 1\r\n (l,r), (p,q) = I[:m].T, I[-q:].T\r\n c = np.zeros((n+1, n+1), dtype=np.int64)\r\n np.add.at(c, (0,r), 1); np.add.at(c, (l+1,-1), 1)\r\n np.add.at(c, (l+1,r), -1); c[0,-1] -= m\r\n c = c.cumsum(axis=0).cumsum(axis=1)\r\n print(*c[p,q], sep='\\n')\r\n\r\n class ABC107:\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n h = (n*(n+1)//2 + 1)//2\r\n\r\n def f(x):\r\n *b, = itertools.accumulate([0]+[-1+2*(v>=x) for v in a])\r\n mn = min(b)\r\n b = [v-mn+1 for v in b]\r\n bit = GeometryTopology.FenwickTree(max(b))\r\n c = 0\r\n for v in b: c += bit.sum(1, v); bit.add(v, 1)\r\n return c >= h\r\n\r\n def f_2(x):\r\n tot = 0\r\n s, cs, c = 0, defaultdict(int), 0; cs[0] = 1\r\n for v in a:\r\n if v>=x: s += 1; c += cs[s]\r\n else: c -= cs[s]; s -= 1\r\n tot += c; cs[s] += 1; c += 1\r\n # print(tot)\r\n return tot >= h\r\n\r\n def binary_search():\r\n lo, hi = 1, 10**9+1\r\n while hi-lo > 1:\r\n x = (hi+lo)//2\r\n # if f(x): lo = x\r\n if f_2(x): lo = x\r\n else: hi = x\r\n return lo\r\n print(binary_search())\r\n\r\n\r\n\r\n class ABC108:\r\n @staticmethod\r\n def d():\r\n l = int(sys.stdin.readline().rstrip())\r\n n = l.bit_length()+1\r\n m = 2 * (n-2) + bit_count(l)\r\n edges = []\r\n for i in range(n-2):\r\n edges.append((i, i+1, 0))\r\n edges.append((i, i+1, 1<<i))\r\n edges.append((n-2, n-1, 0))\r\n j = 1<<(n-2)\r\n for i in range(n-2):\r\n if l>>i&1:\r\n edges.append((i, n-1, j))\r\n j += 1<<i\r\n print(n, m)\r\n for u, v, d in edges:\r\n print(u+1, v+1, d)\r\n\r\n\r\n\r\n class ABC109: pass\r\n class ABC110: pass\r\n class ABC111: pass\r\n class ABC112: pass\r\n class ABC113: pass\r\n class ABC114: pass\r\n class ABC115: pass\r\n class ABC116: pass\r\n class ABC117: pass\r\n class ABC118: pass\r\n class ABC119: pass\r\n class ABC120: pass\r\n class ABC121: pass\r\n class ABC122: pass\r\n class ABC123: pass\r\n class ABC124: pass\r\n class ABC125: pass\r\n class ABC126: pass\r\n class ABC127: pass\r\n class ABC128: pass\r\n class ABC129: pass\r\n class ABC130: pass\r\n class ABC131: pass\r\n class ABC132: pass\r\n class ABC133: pass\r\n class ABC134: pass\r\n class ABC135: pass\r\n class ABC136: pass\r\n class ABC137: pass\r\n class ABC138: pass\r\n class ABC139: pass\r\n class ABC140: pass\r\n class ABC141: pass\r\n class ABC142: pass\r\n class ABC143: pass\r\n class ABC144: pass\r\n class ABC145: pass\r\n class ABC146: pass\r\n class ABC147: pass\r\n class ABC148: pass\r\n class ABC149: pass\r\n class ABC150: pass\r\n class ABC151: pass\r\n class ABC152: pass\r\n class ABC153: pass\r\n class ABC154: pass\r\n class ABC155: pass\r\n class ABC156: pass\r\n class ABC157: pass\r\n class ABC158: pass\r\n class ABC159: pass\r\n class ABC160: pass\r\n class ABC161: pass\r\n class ABC162: pass\r\n class ABC163: pass\r\n class ABC164: pass\r\n class ABC165: pass\r\n class ABC166: pass\r\n class ABC167: pass\r\n class ABC168: pass\r\n class ABC169: pass\r\n\r\n class ABC170:\r\n @staticmethod\r\n def a():\r\n x = [int(x) for x in sys.stdin.readline().split()]\r\n for i in range(5):\r\n if x[i] != i+1:\r\n print(i+1)\r\n break\r\n\r\n @staticmethod\r\n def b():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print('Yes' if 2*x <= y <= 4*x and y%2 == 0 else 'No')\r\n @staticmethod\r\n def c():\r\n x, n, *p = map(int, sys.stdin.read().split())\r\n a = list(set(range(102)) - set(p))\r\n a = [(abs(y-x), y) for y in a]\r\n print(sorted(a)[0][1])\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n cand = set(a)\r\n cnt = 0\r\n for x, c in sorted(Counter(a).items()):\r\n cnt += c == 1 and x in cand\r\n cand -= set(range(x*2, 10**6+1, x))\r\n print(cnt)\r\n\r\n @staticmethod\r\n def e():\r\n n, q = map(int, sys.stdin.readline().split())\r\n queue = []\r\n m = 2*10**5\r\n infants = [[] for _ in range(m)]\r\n highest_rate = [None] * m\r\n where = [None] * n\r\n rate = [None] * n\r\n\r\n def entry(i, k):\r\n where[i] = k\r\n while infants[k]:\r\n r, j = heappop(infants[k])\r\n if where[j] != k or j == i: continue\r\n if rate[i] >= -r:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (r, j))\r\n break\r\n else:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (-rate[i], i))\r\n\r\n def transfer(i, k):\r\n now = where[i]\r\n while infants[now]:\r\n r, j = heappop(infants[now])\r\n if where[j] != now or j == i: continue\r\n if highest_rate[now] != -r:\r\n highest_rate[now] = -r\r\n heappush(queue, (-r, now, j))\r\n heappush(infants[now], (r, j))\r\n break\r\n else:\r\n highest_rate[now] = None\r\n entry(i, k)\r\n\r\n def inquire():\r\n while True:\r\n r, k, i = heappop(queue)\r\n if where[i] != k or r != highest_rate[k]: continue\r\n heappush(queue, (r, k, i))\r\n return r\r\n\r\n for i in range(n):\r\n a, b = map(int, sys.stdin.readline().split())\r\n rate[i] = a\r\n entry(i, b-1)\r\n for _ in range(q):\r\n c, d = map(int, sys.stdin.readline().split())\r\n transfer(c-1, d-1)\r\n print(inquire())\r\n\r\n\r\n\r\n class ABC171:\r\n @staticmethod\r\n def a():\r\n c = sys.stdin.readline().rstrip()\r\n print('A' if c < 'a' else 'a')\r\n\r\n @staticmethod\r\n def b():\r\n n, k, *p = map(int, sys.stdin.read().split())\r\n print(sum(sorted(p)[:k]))\r\n\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n n -= 1\r\n l = 1\r\n while True:\r\n if n < pow(26, l):\r\n break\r\n n -= pow(26, l)\r\n l += 1\r\n res = ''.join([chr(ord('a')+d) for d in NumberTheory.base_convert(n, 26)][::-1])\r\n res = 'a'*(l-len(res)) + res\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n s = sum(a)\r\n cnt = Counter(a)\r\n q = int(sys.stdin.readline().rstrip())\r\n for _ in range(q):\r\n b, c = map(int, sys.stdin.readline().split())\r\n s += (c-b)*cnt[b]\r\n print(s)\r\n cnt[c] += cnt[b]; cnt[b] = 0\r\n\r\n @staticmethod\r\n def e():\r\n n, *a = map(int, sys.stdin.read().split())\r\n s = 0\r\n for x in a: s ^= x\r\n b = map(lambda x: x^s, a)\r\n print(*b, sep=' ')\r\n\r\n\r\n class ABC172:\r\n @staticmethod\r\n def a():\r\n a = int(sys.stdin.readline().rstrip()); print(a*(1+a+a**2))\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split(); print(sum(s[i]!=t[i] for i in range(len(s))))\r\n\r\n @staticmethod\r\n def c():\r\n n, m, k = map(int, sys.stdin.readline().split())\r\n a = [0] + [int(x) for x in sys.stdin.readline().split()]\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n *sa, = itertools.accumulate(a)\r\n *sb, = itertools.accumulate(b)\r\n res = 0\r\n for i in range(n+1):\r\n r = k - sa[i]\r\n if r < 0: break\r\n res = max(res, i+bi_r(sb, r))\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n f = np.zeros(n+1, dtype=np.int64)\r\n for i in range(1, n+1):\r\n f[i::i] += 1\r\n print((np.arange(1, n+1)*f[1:]).sum())\r\n\r\n\r\n class ABC173:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n charge = (n+999)//1000 * 1000 - n\r\n print(charge)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n c = Counter(s)\r\n for v in 'AC, WA, TLE, RE'.split(', '):\r\n print(f'{v} x {c[v]}')\r\n\r\n\r\n @staticmethod\r\n def c():\r\n h, w, k = map(int, sys.stdin.readline().split())\r\n c = [sys.stdin.readline().rstrip() for _ in range(h)]\r\n tot = 0\r\n for i in range(1<<h):\r\n for j in range(1<<w):\r\n cnt = 0\r\n for y in range(h):\r\n for x in range(w):\r\n if i>>y & 1 or j>>x & 1:\r\n continue\r\n cnt += c[y][x] == '#'\r\n tot += cnt == k\r\n print(tot)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a.sort(reverse=True)\r\n res = a[0] + sum(a[1:1+(n-2)//2])*2 + a[1+(n-2)//2]*(n & 1)\r\n print(res)\r\n\r\n @staticmethod\r\n def e():\r\n MOD = 10**9+7\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n minus = [x for x in a if x < 0]\r\n plus = [x for x in a if x > 0]\r\n if len(plus) + len(minus)//2*2 >= k: # plus\r\n *minus, = map(abs, minus)\r\n minus.sort(reverse=True)\r\n plus.sort(reverse=True)\r\n cand = []\r\n if len(minus)&1: minus = minus[:-1]\r\n for i in range(0, len(minus)-1, 2):\r\n cand.append(minus[i]*minus[i+1]%MOD)\r\n if k & 1:\r\n res = plus[0]\r\n plus = plus[1:]\r\n else:\r\n res = 1\r\n if len(plus)&1: plus = plus[:-1]\r\n for i in range(0, len(plus)-1, 2):\r\n cand.append(plus[i]*plus[i+1]%MOD)\r\n cand.sort(reverse=True)\r\n for x in cand[:k//2]:\r\n res *= x\r\n res %= MOD\r\n print(res)\r\n elif 0 in a:\r\n print(0)\r\n else:\r\n cand = sorted(map(abs, a))\r\n res = 1\r\n for i in range(k):\r\n res *= cand[i]\r\n res %= MOD\r\n res = MOD - res\r\n print(res)\r\n pass\r\n\r\n\r\n class ABC174:\r\n @staticmethod\r\n def a():\r\n print('Yes' if int(sys.stdin.readline().rstrip())>=30 else 'No')\r\n\r\n\r\n\r\n\r\n class ABC178:\r\n @staticmethod\r\n def a(): pass\r\n\r\n @staticmethod\r\n def b(): pass\r\n\r\n @staticmethod\r\n def c(): pass\r\n\r\n @staticmethod\r\n def d():\r\n s = int(sys.stdin.readline().rstrip())\r\n if s == 0: print(1); return\r\n elif s == 1: print(0); return\r\n c = np.eye(3, k=-1, dtype=np.int64)\r\n c[0, 0] = c[0, 2] = 1\r\n a = np.array([0, 0, 1])\r\n print(Algebra.dot(Algebra.matrix_pow(c, s-2), a)[0])\r\n\r\n class ABC179:\r\n @staticmethod\r\n def a():\r\n s = sys.stdin.readline().rstrip()\r\n print(s+'s' if s[-1]!='s' else s+'es')\r\n\r\n @staticmethod\r\n def b():\r\n n, *d = map(int, sys.stdin.read().split())\r\n d = np.array(d).reshape(n, 2).T\r\n d = np.equal(d[0], d[1]).astype(int)\r\n dd = d.copy()\r\n dd[1:] += d[:-1]\r\n dd[:-1] += d[1:]\r\n print('Yes' if (dd>=3).any() else 'No')\r\n\r\n\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = (n//np.arange(1, n+1)).sum() - len(NumberTheory.find_divisors(n))\r\n print(res)\r\n\r\n\r\n @staticmethod\r\n def d():\r\n mod = 998244353\r\n n, k, *lr = map(int, sys.stdin.read().split())\r\n l, r = np.array(lr).reshape(k, -1).T\r\n @njit((i8, i8[:], i8[:]), cache=True)\r\n def solve(n, l, r):\r\n res = np.zeros(n*2, dtype=np.int64); res[0], res[1] = 1, -1\r\n for i in range(n-1):\r\n res[i+1] = (res[i+1]+res[i]) % mod\r\n res[i+l] = (res[i+l]+res[i]) % mod\r\n res[i+r+1] = (res[i+r+1]-res[i]) % mod\r\n print(res[n-1])\r\n solve(n, l, r)\r\n\r\n @staticmethod\r\n def e():\r\n n, x, m = map(int, sys.stdin.readline().split())\r\n res = [-1 for _ in range(m)]\r\n s = 0\r\n loop = np.zeros(m, dtype=np.int64)\r\n for i in range(m+1):\r\n if i==n: print(s); return\r\n if res[x] != -1:\r\n l, loop = i-res[x], loop[res[x]:i]\r\n q, r = divmod(n-i, l)\r\n print(s+q*loop.sum()+loop[:r].sum()); return\r\n res[x], loop[i] = i, x\r\n s += x; x = x**2 % m\r\n\r\n\r\n class ABC180:\r\n @staticmethod\r\n def a():\r\n n, a, b = map(int, sys.stdin.readline().split())\r\n print(n-a+b)\r\n\r\n @staticmethod\r\n def b():\r\n n, *x = map(int, sys.stdin.read().split())\r\n x = np.absolute(np.array(x))\r\n print(x.sum())\r\n print(np.sqrt((x**2).sum()))\r\n print(x.max())\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n div = NumberTheory.find_divisors(n)\r\n print(*div, sep='\\n')\r\n\r\n @staticmethod\r\n def d():\r\n x, y, a, b = map(int, sys.stdin.readline().split())\r\n cnt = 0\r\n while x*a <= x+b:\r\n x *= a\r\n if x >= y:\r\n print(cnt); return\r\n cnt += 1\r\n cnt += (y-x-1) // b\r\n print(cnt)\r\n\r\n @staticmethod\r\n def e():\r\n n, *xyz = map(int, sys.stdin.read().split())\r\n\r\n xyz = list(zip(*[iter(xyz)] * 3))\r\n dist = [[0] * n for _ in range(n)]\r\n for i in range(n):\r\n a, b, c = xyz[i]\r\n for j in range(n):\r\n p, q, r = xyz[j]\r\n dist[i][j] = abs(p-a) + abs(q-b) + max(0, r-c)\r\n\r\n dp = [[inf] * n for _ in range(1<<n)]\r\n dp[0][0] = 0\r\n for s in range(1<<n):\r\n for i in range(n):\r\n t = s|(1<<i)\r\n for j in range(n):\r\n dp[t][i] = min(dp[t][i], dp[s][j]+dist[j][i])\r\n print(dp[-1][0])\r\n\r\n\r\n\r\n @staticmethod\r\n def f(): # rewrite with jit compiling later.\r\n n, m, l = map(int, sys.stdin.readline().split())\r\n c = Combinatorics.CombinationsMod(n, MOD)\r\n path = np.zeros(n+1, dtype=np.int64); path[1] = path[2] = 1\r\n for i in range(3, n+1): path[i] = path[i-1]*i%MOD\r\n cycle = np.zeros(n+1, dtype=np.int64); cycle[1:] = path[:-1]\r\n dp = np.zeros((n+1, m+1), dtype=np.int64)\r\n def f(l):\r\n dp[:,:] = 0; dp[0,0] = 1\r\n for i in range(n):\r\n for j in range(m+1):\r\n k = np.arange(1, min(l, n-i, m-j+1)+1)\r\n dp[i+k, j+k-1] += dp[i, j]*c(n-i-1, k-1)%MOD*path[k]%MOD\r\n dp[i+k, j+k-1] %= MOD\r\n k = np.arange(2, min(l, n-i, m-j)+1)\r\n dp[i+k, j+k] += dp[i, j]*c(n-i-1, k-1)%MOD*cycle[k]%MOD\r\n dp[i+k, j+k] %= MOD\r\n return dp[n, m]\r\n print((f(l)-f(l-1))%MOD)\r\n\r\n @staticmethod\r\n def f_2(): # PyPy\r\n n, m, l = map(int, sys.stdin.readline().split())\r\n c = Combinatorics.CombinationsMod(n, MOD)\r\n path = [0] * (n+1); path[1] = path[2] = 1\r\n for i in range(3, n+1): path[i] = path[i-1]*i%MOD\r\n cycle = [0] + path[:-1]\r\n def f(l):\r\n dp = [[0]*(m+1) for _ in range(n+1)]; dp[0][0] = 1\r\n for i in range(n):\r\n for j in range(m+1):\r\n for k in range(1, min(l, n-i, m-j+1)+1):\r\n dp[i+k][j+k-1] += dp[i][j]*c(n-i-1, k-1)%MOD*path[k]%MOD\r\n dp[i+k][j+k-1] %= MOD\r\n for k in range(1, min(l, n-i, m-j)+1):\r\n dp[i+k][j+k] += dp[i][j]*c(n-i-1, k-1)%MOD*cycle[k]%MOD\r\n dp[i+k][j+k] %= MOD\r\n return dp[n][m]\r\n print((f(l)-f(l-1))%MOD)\r\n\r\n\r\n class ABC181:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print('White' if n&1==0 else 'Black')\r\n\r\n @staticmethod\r\n def b():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n a, b = np.array(ab).reshape(n,2).T\r\n print(((a+b)*(b-a+1)//2).sum())\r\n\r\n @staticmethod\r\n def c():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n i, j, k = np.array([*itertools.combinations(range(n),3)]).T\r\n x, y = np.array(xy).reshape(-1,2).T\r\n b = (y[j]-y[i])*(x[k]-x[j]) == (y[k]-y[j])*(x[j]-x[i])\r\n print('Yes' if b.any() else 'No')\r\n\r\n @staticmethod\r\n def d():\r\n n = sys.stdin.readline().rstrip()\r\n if len(n)<=2:\r\n print('Yes' if int(n)%8==0 or int(n[::-1])%8==0 else 'No')\r\n return\r\n c = Counter(n)\r\n for i in range(112, 1000, 8):\r\n if not Counter(str(i))-c: print('Yes'); return\r\n print('No')\r\n\r\n @staticmethod\r\n def e():\r\n n, m, *I = map(int, sys.stdin.read().split())\r\n I = np.array(I)\r\n h, w = np.sort(I[:n]), np.sort(I[-m:])\r\n tmp = np.pad(h[1:]-h[:-1], 1, constant_values=0)\r\n l = tmp.copy(); l[::2] = 0; np.cumsum(l, out=l)\r\n r = tmp.copy(); r[1::2] = 0; np.cumsum(r[::-1], out=r[::-1])\r\n i = np.searchsorted(w, h)\r\n d = np.pad(h[2:]-h[:-2], 1, constant_values=0); d[::2] = 0\r\n d += np.minimum(np.abs(h-w[np.maximum(i-1, 0)]), np.abs(h-w[np.minimum(m-1, i)]))\r\n print((l[:-1]+r[1:]+d).min())\r\n\r\n @staticmethod\r\n def f():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n xy = np.array(xy).reshape(n,2)\r\n y = xy[:, 1]\r\n if n == 1: print(np.maximum(100-y, y+100)[0]/2); return\r\n ij = np.array([*itertools.combinations(range(n),2)])\r\n d = (np.diff(xy[ij], axis=1)**2).sum(axis=-1).ravel()\r\n def f(r):\r\n r *= 2\r\n uf = GeometryTopology.Graph(n+2); uf.init_dsu()\r\n for i in np.argwhere(y+100<=r).ravel(): uf.unite(i, n)\r\n for i in np.argwhere(100-y<=r).ravel(): uf.unite(i, n+1)\r\n for i, j in ij[np.argwhere(d<=r*r).ravel()]: uf.unite(i, j)\r\n return uf.same(n, n+1)\r\n\r\n def binary_search():\r\n lo, hi = 0, 200.1\r\n while hi-lo > 1e-9:\r\n r = (lo+hi)/2\r\n if f(r): hi = r\r\n else: lo = r\r\n return lo\r\n print(binary_search())\r\n\r\n class ARC106:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n a = 1\r\n while pow(3,a)<=n:\r\n m, b = n-pow(3,a), 1\r\n while pow(5,b)<=m:\r\n if pow(5,b)==m: print(a, b); return\r\n b += 1\r\n a += 1\r\n print(-1)\r\n\r\n @staticmethod\r\n def b():\r\n n, m = map(int, sys.stdin.readline().split())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n uf = GeometryTopology.Graph(n); uf.init_dsu()\r\n for _ in range(m):\r\n c, d = map(int, sys.stdin.readline().split()); c -= 1; d -= 1\r\n uf.unite(c, d)\r\n ga, gb = [[] for _ in range(n)], [[] for _ in range(n)]\r\n for i in range(n):\r\n r = uf.find(i)\r\n ga[r].append(a[i]); gb[r].append(b[i])\r\n print('Yes' if all(sum(ga[i])==sum(gb[i]) for i in range(n)) else 'No')\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n if m < 0: print(-1); return\r\n if n == 1:\r\n if m != 0: print(-1); return\r\n print(1, 2); return\r\n if m >= n-1: print(-1); return\r\n l, r = 1, 10**9\r\n print(l, r)\r\n for _ in range(n-2-m):\r\n l += 1; r -= 1; print(l, r)\r\n r = l\r\n for _ in range(m+1):\r\n l, r = r+1, r+2\r\n print(l, r)\r\n\r\n @staticmethod\r\n def d():\r\n mod = 998244353\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n b = np.zeros((k+1, n), dtype=np.int64)\r\n b[0] = 1\r\n for i in range(k): b[i+1] = b[i]*a%mod\r\n s = b.sum(axis=1) % mod\r\n inv_2 = pow(2, mod-2, mod)\r\n c = Combinatorics.CombinationsMod(mod=mod)\r\n for x in range(1, k+1):\r\n l = np.arange(x+1)\r\n print(((c(x, l)*s[l]%mod*s[l][::-1]%mod).sum()%mod - pow(2,x,mod)*s[x])%mod*inv_2%mod)\r\n\r\n @staticmethod\r\n def e():\r\n pass\r\n\r\n @staticmethod\r\n def f():\r\n pass\r\n\r\n\r\n class ARC107:\r\n @staticmethod\r\n def a():\r\n a = np.array(sys.stdin.read().split(), dtype=np.int64)\r\n print(Algebra.cumprod((1+a)*a//2%MOD, mod=MOD)[2])\r\n\r\n @staticmethod\r\n def b():\r\n n, k = map(int, sys.stdin.readline().split())\r\n def c(m): return np.minimum(m-1,2*n-m+1)\r\n x = np.arange(2, 2*n+1)\r\n print((c(x)*c(x-k)*((x-k>=2)&(x-k<=2*n))).sum())\r\n\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a).reshape(n,n)\r\n fac, _ = Algebra.generate_fac_ifac(n=50, p=MOD)\r\n def count(a):\r\n uf = GeometryTopology.Graph(n); uf.init_dsu()\r\n for i, j in itertools.combinations(range(n),2):\r\n if (a[i]+a[j] <= k).all(): uf.unite(i,j)\r\n c = 1\r\n for g in uf.groups():\r\n if g: c *= fac[len(g)]; c %= MOD\r\n return c\r\n print(count(a)*count(a.T)%MOD)\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.readline().split())\r\n @njit((i8,i8), cache=True)\r\n def solve(n, k):\r\n dp = np.zeros((n+1, 2*n+1), dtype=np.int64); dp[0,0] = 1\r\n for i in range(1, n+1):\r\n for j in range(i, 0, -1):\r\n dp[i,j] = dp[i-1,j-1] + dp[i,2*j]\r\n dp[i,j] %= MOD\r\n return dp[-1,k]\r\n print(solve(n,k))\r\n\r\n @staticmethod\r\n def e():\r\n pass\r\n\r\n @staticmethod\r\n def f():\r\n pass\r\n\r\n\r\n\r\n class ACL001:\r\n @staticmethod\r\n def a():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n *xy, = zip(*[iter(xy)]*2)\r\n print(xy)\r\n pass\r\n\r\n\r\n\r\n class TDPC:\r\n @staticmethod\r\n def t():\r\n pass\r\n\r\n\r\n class ChokudaiSpecialRun001:\r\n @staticmethod\r\n def j():\r\n n, *a = map(int, sys.stdin.read().split())\r\n bit = GeometryTopology.FenwickTree(n)\r\n c = 0\r\n for x in a:\r\n c += bit.sum(1,n) - bit.sum(1,x)\r\n bit.add(x,1)\r\n print(c)\r\n\r\n class ALPC: # AtCoder Library Practice Contest\\\r\n @staticmethod\r\n def a():\r\n n, q, *tuv = map(int, sys.stdin.read().split())\r\n uf = GeometryTopology.Graph(n); uf.init_dsu()\r\n for t, u, v in zip(*[iter(tuv)]*3):\r\n if t == 0: uf.unite(u,v)\r\n else: print(int(uf.same(u,v)))\r\n\r\n @staticmethod\r\n def b():\r\n n, q = map(int, sys.stdin.readline().split())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n bit = GeometryTopology.FenwickTree(n)\r\n for i in range(n): bit.add(i+1, a[i])\r\n for t, i, j in zip(*[map(int, sys.stdin.read().split())]*3):\r\n if t==0: bit.add(i+1,j)\r\n else: print(bit.sum(i+1,j))\r\n\r\n\r\n @staticmethod\r\n def g():\r\n n, m, *ab = map(int, sys.stdin.read().split())\r\n a, b = np.array(ab).reshape(m,2).T\r\n _, r = connected_components(csr_matrix(([1]*m, (a,b)), (n,n)), connection='strong')\r\n groups = [[] for _ in range(n)]\r\n for u in range(n): groups[r[u]].append(u)\r\n groups = [group for group in groups if group]\r\n print(len(groups))\r\n for group in groups[::-1]: print(len(group), *group)\r\n\r\n\r\n class MSolutions2020:\r\n @staticmethod\r\n def a():\r\n x = int(sys.stdin.readline().rstrip())\r\n x -= 400\r\n print(8-x//200)\r\n\r\n @staticmethod\r\n def b():\r\n r, g, b, k = map(int, sys.stdin.read().split())\r\n while k and g <= r:\r\n g *= 2\r\n k -= 1\r\n while k and b <= g:\r\n b *= 2\r\n k -= 1\r\n print('Yes' if r < g < b else 'No')\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n for i in range(k, n):\r\n print('Yes' if a[i] > a[i-k] else 'No')\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n m = 1000\r\n s = 0\r\n for i in range(n):\r\n if a[i+1] == a[i]: continue\r\n elif a[i+1] > a[i]:\r\n cnt = m//a[i]\r\n m -= a[i]*cnt\r\n s += cnt\r\n else:\r\n m += a[i]*s\r\n s = 0\r\n print(m)\r\n\r\n\r\nclass Codeforces:\r\n class CR676div2:\r\n @staticmethod\r\n def a():\r\n t = int(sys.stdin.readline().rstrip())\r\n for _ in range(t):\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(a^b)\r\n\r\n @staticmethod\r\n def b():\r\n t = int(sys.stdin.readline().rstrip())\r\n for _ in range(t):\r\n n = int(sys.stdin.readline().rstrip())\r\n s = [list(sys.stdin.readline().rstrip()) for _ in range(n)]\r\n s[0][0] = s[-1][-1] = '0'\r\n for i in range(n):\r\n for j in range(n):\r\n s[i][j] = int(s[i][j])\r\n\r\n\r\n def can_goal(g, c=0):\r\n visited = [0] * n\r\n stack = [(0, 0)]\r\n visited[0] |= 1<<0\r\n while stack:\r\n y, x = stack.pop()\r\n for dy, dx in [(-1, 0), (0, -1), (1, 0), (0, 1)]:\r\n i, j = y+dy, x+dx\r\n if i<0 or i>=n or j<0 or j>=n: continue\r\n if i == j == n-1: return True\r\n if visited[i]>>j&1: continue\r\n visited[i] |= 1<<j\r\n if g[i][j] != c: continue\r\n stack.append((i, j))\r\n return False\r\n\r\n if not (can_goal(s, 0) or can_goal(s, 1)):\r\n print(0)\r\n continue\r\n\r\n flg = 0\r\n for i in range(n):\r\n for j in range(n):\r\n if i==j==0 or i==j==n-1: continue\r\n s[i][j] ^= 1\r\n if not (can_goal(s, 0) or can_goal(s, 1)):\r\n print(1)\r\n print(i+1, j+1)\r\n flg = 1\r\n break\r\n s[i][j] ^= 1\r\n if flg: break\r\n if flg: continue\r\n\r\n print(2)\r\n if s[0][1] == s[1][0]:\r\n print(n, n-1)\r\n print(n-1, n)\r\n continue\r\n\r\n if s[0][1] == s[-1][-2]:\r\n print(1, 2)\r\n print(n-1, n)\r\n else:\r\n print(1, 2)\r\n print(n, n-1)\r\n\r\n\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\nclass ProjectEuler:\r\n @staticmethod\r\n def p1():\r\n def f(n, x):\r\n return (x + n//x*x) * (n//x) // 2\r\n n = 1000\r\n ans = f(n-1, 3)+f(n-1, 5)-f(n-1, 15)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p2():\r\n fib = [1, 2]\r\n while fib[-1] < 4*10**6:\r\n fib.append(fib[-1]+fib[-2])\r\n print(sum(fib[1:-1:3]))\r\n\r\n @staticmethod\r\n def p3():\r\n pn = NumberTheory.PrimeNumbers()\r\n res = pn.factorize(600851475143)\r\n print(max(res.keys()))\r\n\r\n @staticmethod\r\n def p4():\r\n def is_palindrome(n):\r\n n = str(n)\r\n return n == n[::-1]\r\n cand = []\r\n for a in range(100, 1000):\r\n for b in range(a, 1000):\r\n n = a*b\r\n if is_palindrome(n): cand.append(n)\r\n print(max(cand))\r\n\r\n @staticmethod\r\n def p5():\r\n pn = NumberTheory.PrimeNumbers()\r\n res = defaultdict(int)\r\n for i in range(1, 21):\r\n for p, c in pn.factorize(i).items():\r\n res[p] = max(res[p], c)\r\n ans = 1\r\n for p, c in res.items(): ans *= pow(p, c)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p6():\r\n a = np.arange(101)\r\n b = np.cumsum(a**2)\r\n a = a.cumsum()\r\n print(a[100]**2 - b[100])\r\n\r\n @staticmethod\r\n def p7():\r\n nt = NumberTheory.PrimeNumbers()\r\n print(sorted(nt)[10000])\r\n @staticmethod\r\n def p8():\r\n n = '7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450'\r\n n = [int(d) for d in list(n)]\r\n res = 0\r\n for i in range(988):\r\n x = 1\r\n for j in range(13):\r\n x *= n[i+j]\r\n res = max(res, x)\r\n print(res)\r\n @staticmethod\r\n def p9():\r\n for a in range(1, 997):\r\n for b in range(a, 998-a):\r\n c = 1000 - a - b\r\n if a**2 + b**2 == c**2:\r\n print(a*b*c)\r\n return\r\n @staticmethod\r\n def p10():\r\n pn = NumberTheory.PrimeNumbers(2*10**6+1)\r\n print(sum(pn))\r\n @staticmethod\r\n def p11():\r\n grid = '08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48'\r\n print(grid)\r\n\r\n pass\r\n\r\nclass Yukicoder:\r\n def __init__(self):\r\n pass\r\n\r\n def __call__(self):\r\n print(1)\r\n\r\n\r\nclass AOJ:\r\n @staticmethod\r\n def ALDS1_12_A(): # minimum spanning tree\r\n n, *a = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for i in range(n-1):\r\n for j in range(i+1, n):\r\n if a[i*n+j] == -1: continue\r\n g.add_edge(i,j, weight=a[i*n+j])\r\n g.add_edge(j,i, weight=a[i*n+j])\r\n _, d = g.kruskal()\r\n # _, d = g.prim()\r\n # _, d = g.boruvka()\r\n print(d)\r\n\r\n\r\n @staticmethod\r\n def GRL_3_C(): # strongly connected components\r\n n, m = map(int, sys.stdin.readline().split())\r\n _, r = connecte\r\n g = GeometryTopology.Graph(n)\r\n for _ in range(m): g.add_edge(*map(int, sys.stdin.readline().split()))\r\n r = g.scc()\r\n q, *uv = map(int, sys.stdin.read().split())\r\n for u, v in zip(*[iter(uv)] * 2): print(int(r[u]==r[v]))\r\n\r\n\r\n @staticmethod\r\n def DSL_2_B(): # Binary Indexed Tree (Fenwick Tree)\r\n n, q, *txy = map(int, sys.stdin.read().split())\r\n bit = GeometryTopology.FenwickTree(n)\r\n for t, x, y in zip(*[iter(txy)]*3):\r\n if t==0: bit.add(x, y)\r\n else: print(bit.sum(x,y))\r\n\r\n\r\nclass YosupoJudge:\r\n\r\n @staticmethod\r\n def PointAddRangeSum():\r\n n, q = map(int, sys.stdin.readline().split())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n bit = GeometryTopology.FenwickTree(n)\r\n for i in range(n): bit.add(i+1, a[i])\r\n for t, i, j in zip(*[map(int, sys.stdin.read().split())]*3):\r\n if t==0: bit.add(i+1,j)\r\n else: print(bit.sum(i+1,j))\r\n\r\n @staticmethod\r\n def Directed_MST():\r\n n, m, s, *abc = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b, c in zip(*[iter(abc)]*3):g.add_edge(a, b, weight=c)\r\n _, d, p = g.prim(src=s, return_parent=True)\r\n print(d)\r\n print(*p)\r\n\r\n @staticmethod\r\n def Manhattan_MST():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n\r\n\r\nif __name__ == '__main__':\r\n AtCoder.ABC108.d()\r\n # AtCoder.ALPC.g_2()\r\n # AtCoder.ARC107.e()\r\n pass\n", "import sys\r\nimport typing\r\n\r\nimport numba as nb\r\nimport numpy as np\r\n\r\n\r\[email protected]\r\ndef sa_doubling(\r\n a: np.array,\r\n) -> np.array:\r\n n = a.size\r\n a = np.searchsorted(\r\n np.unique(a),\r\n a,\r\n )\r\n cnt = np.zeros(n + 1, dtype=np.int32)\r\n\r\n def count_sort(a):\r\n for x in a: cnt[x + 1] += 1\r\n for i in range(n): cnt[i + 1] += cnt[i]\r\n idx = np.empty(n, dtype=np.int32)\r\n for i in range(n):\r\n x = a[i]\r\n idx[cnt[x]] = i\r\n cnt[x] += 1\r\n cnt[:] = 0\r\n return idx\r\n\r\n k = 1\r\n rank = a\r\n while 1:\r\n b = np.zeros(n, dtype=np.int64)\r\n for i in range(n - k):\r\n b[i] = rank[i + k] + 1\r\n ord_b = count_sort(b)\r\n a = rank[ord_b]\r\n ord_a = count_sort(a)\r\n sa = ord_b[ord_a]\r\n c = a[ord_a] << 30 | b[sa]\r\n rank[sa[0]] = 0\r\n for i in range(n - 1):\r\n rank[sa[i + 1]] = rank[sa[i]] + (c[i + 1] > c[i])\r\n k *= 2\r\n if k >= n: break\r\n return sa\r\n\r\n\r\n\r\[email protected]\r\ndef kasai(\r\n a: np.array,\r\n sa: np.array,\r\n) -> np.array:\r\n n = a.size\r\n if n == 0:\r\n return np.full(n, -1, dtype=np.int32)\r\n\r\n rank = np.empty(n, np.int32)\r\n for i in range(n): rank[sa[i]] = i\r\n h, l = np.empty(n - 1, np.int32), 0\r\n for i in range(n):\r\n if l: l -= 1\r\n r = rank[i]\r\n if r == n - 1: continue\r\n j = sa[r + 1]\r\n while i + l < n and j + l < n:\r\n if a[i + l] != a[j + l]: break\r\n l += 1\r\n h[r] = l\r\n return h\r\n\r\n\r\n\r\[email protected](\r\n (nb.i8[:], ),\r\n cache=True,\r\n)\r\ndef solve(\r\n a: np.array,\r\n) -> typing.NoReturn:\r\n n = a.size\r\n sa = sa_doubling(a)\r\n lcp = kasai(a, sa)\r\n\r\n a = np.arange(n, 0, -1)\r\n for _ in range(2):\r\n st = []\r\n s = 0\r\n for i in range(n - 1):\r\n h = lcp[i]\r\n l = 1\r\n while st and st[-1][0] >= h:\r\n x = st.pop()\r\n l += x[1]\r\n s -= x[0] * x[1]\r\n s += h * l\r\n st.append((h, l))\r\n a[sa[i + 1]] += s\r\n sa = sa[::-1]\r\n lcp = lcp[::-1]\r\n\r\n for i in range(n):\r\n print(a[i])\r\n\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n n = int(sys.stdin.buffer.readline().rstrip())\r\n s = np.frombuffer(\r\n sys.stdin.buffer.readline().rstrip(),\r\n dtype='b',\r\n ).astype(np.int64)\r\n solve(s)\r\n\r\n\r\n\r\nmain()\n", "import sys\r\nimport typing\r\n\r\nimport numba as nb\r\nimport numpy as np\r\n\r\n\r\[email protected]((nb.i8[:, :], ), cache=True)\r\ndef solve(b) -> typing.NoReturn:\r\n n, m = b.shape\r\n for i in range(n - 1):\r\n if not np.all(b[i + 1] == b[i] + 7):\r\n print('No')\r\n return\r\n for j in range(m - 1):\r\n if not np.all(b[:, j + 1] == b[:, j] + 1):\r\n print('No')\r\n return\r\n if m == 7 and b[0, 0] % 7 != 0:\r\n print('No')\r\n print('Yes')\r\n\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n n, m = map(int, input().split())\r\n b = np.array(\r\n sys.stdin.read().split(),\r\n dtype=np.int64,\r\n ).reshape(n, m)\r\n solve(b)\r\n\r\n\r\n\r\nmain()\n", "import sys\r\n\r\nimport numpy as np\r\n\r\nn = int(sys.stdin.readline().rstrip())\r\ngraph = [None] * n\r\nfor i in range(n):\r\n a = int(sys.stdin.readline().rstrip())\r\n if not a: continue\r\n graph[i] = np.array([sys.stdin.readline().split() for _ in range(a)], dtype=np.int16).T\r\n\r\ndef main():\r\n comb = (np.arange(1 << n)[:, None] >> np.arange(n) & 1).astype(np.bool)\r\n ok = np.full(1 << n, True)\r\n for i in range(n):\r\n if graph[i] is None: continue\r\n x, y = graph[i]\r\n ok &= ~comb[:, i] | np.all(comb[:, x-1] == y.astype(np.bool), axis=1)\r\n\r\n print(np.amax(comb[ok].sum(axis=1)))\r\n\r\nif __name__ == '__main__':\r\n main()\n", "import sys\r\nimport typing\r\n\r\nimport numba as nb\r\nimport numpy as np\r\n\r\n\r\[email protected]((nb.i8[:, :], ), cache=True)\r\ndef solve(a: np.ndarray) -> typing.NoReturn:\r\n mod = 998244353\r\n h, w = a.shape\r\n\r\n cnt = 1\r\n buf = np.zeros(3, np.int64)\r\n for p in range(h + w - 1):\r\n buf[:] = 0\r\n for i in range(min(p + 1, h)):\r\n j = p - i\r\n if j >= w: continue\r\n buf[a[i, j]] += 1\r\n if buf[1] == 0 and buf[2] == 0:\r\n cnt *= 2\r\n elif buf[1] >=1 and buf[2] >=1:\r\n cnt *= 0\r\n cnt %= mod\r\n print(cnt)\r\n\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n h, w = map(int, sys.stdin.buffer.readline().split())\r\n s = np.frombuffer(\r\n sys.stdin.buffer.read(),\r\n dtype='S1',\r\n ).reshape(h, w + 1)[:, :-1]\r\n a = np.empty((h, w), np.int64)\r\n a[s == b'.'] = 0\r\n a[s == b'R'] = 1\r\n a[s == b'B'] = 2\r\n solve(a)\r\n\r\n\r\nmain()\n", "import sys\r\nimport typing\r\n\r\nimport numba as nb\r\nimport numpy as np\r\n\r\n\r\[email protected]\r\ndef bit_count(n: int) -> int:\r\n cnt = 0\r\n while n:\r\n cnt += n & 1\r\n n >>= 1\r\n return cnt\r\n\r\n\r\[email protected]((nb.i8[:, :], nb.i8[:, :]), cache=True)\r\ndef solve(a: np.ndarray, xy: np.ndarray) -> typing.NoReturn:\r\n n = len(a)\r\n m = len(xy)\r\n\r\n inf = 1 << 60\r\n cost = np.zeros((n, n), np.int64)\r\n for i in range(m):\r\n x, y = xy[i]\r\n cost[x, y] = cost[y, x] = inf\r\n\r\n\r\n dp = np.full((1 << n, n), inf, np.int64)\r\n for i in range(n):\r\n dp[1 << i, i] = a[i, 0]\r\n\r\n for s in range(1 << n):\r\n i = bit_count(s) - 1\r\n for v in range(n):\r\n if ~s >> v & 1: continue\r\n t = s & ~(1 << v)\r\n for u in range(n):\r\n if ~t >> u & 1: continue\r\n dp[s, v] = min(\r\n dp[s, v],\r\n dp[t, u] + cost[u, v] + a[v, i]\r\n )\r\n mn = dp[-1].min()\r\n print(-1 if mn == inf else mn)\r\n\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n n = int(input())\r\n I = np.array(\r\n sys.stdin.read().split(),\r\n dtype=np.int64,\r\n )\r\n a = I[:n * n].reshape(n, n)\r\n xy = I[n * n + 1:].reshape(-1, 2) - 1\r\n solve(a, xy)\r\n\r\n\r\nmain()\n", "import typing\n\nimport numba as nb\nimport numpy as np\n\n\[email protected]\ndef find_divisors(\n n: int,\n) -> np.array:\n i = np.arange(int(n ** .5))\n i += 1\n i = i[n % i == 0]\n i = np.hstack((i, n // i))\n return np.unique(i)\n\n\n\[email protected]\ndef euler_totient(\n n: int,\n) -> int:\n c = n\n p = 2\n while p * p <= n:\n if n % p:\n p += 1\n continue\n c = c // p * (p - 1)\n while not n % p: n //= p\n if n > 1:\n c = c // n * (n - 1)\n return c\n\n\[email protected](\n (nb.i8, ),\n cache=True,\n)\ndef solve(\n p: int,\n) -> typing.NoReturn:\n n = p - 1\n divs = find_divisors(n)\n mod = 998244353\n s = 1\n for d in divs:\n s += euler_totient(d) * d\n s %= mod\n print(s)\n\n\ndef main() -> typing.NoReturn:\n p = int(input())\n solve(p)\n\n\nmain()\n", "import sys\r\nimport typing\r\n\r\nimport numba as nb\r\nimport numpy as np\r\n\r\n\r\[email protected]((nb.i8[:],), cache=True)\r\ndef solve(a: np.ndarray) -> typing.NoReturn:\r\n n = len(a)\r\n b = np.zeros(1 << 20, np.bool8)\r\n for i in range(n):\r\n x = a[i]\r\n while x % 2 == 0:\r\n x //= 2\r\n b[x] = True\r\n\r\n cnt = 0\r\n for x in b:\r\n cnt += x\r\n print(cnt)\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n n = int(input())\r\n a = np.array(\r\n sys.stdin.readline().split(),\r\n dtype=np.int64,\r\n )\r\n solve(a)\r\n\r\n\r\nmain()\r\n", "import itertools\r\nimport math\r\nimport string\r\nimport sys\r\nfrom bisect import bisect_left as bi_l\r\nfrom bisect import bisect_right as bi_r\r\nfrom collections import Counter, defaultdict, deque\r\nfrom functools import lru_cache, reduce\r\nfrom heapq import heapify, heappop, heappush\r\nfrom operator import or_, xor\r\n\r\nsys.setrecursionlimit(10**7)\r\ninf = float(\"inf\")\r\nMOD = 10**9 + 7\r\n# MOD = 998244353\r\n\r\n\r\nusing_numpy = 1\r\nimport networkx as nx\r\nimport numpy as np\r\nfrom numba import i8, njit\r\nfrom scipy import optimize\r\nfrom scipy.ndimage import distance_transform_cdt\r\nfrom scipy.sparse import csr_matrix\r\nfrom scipy.sparse.csgraph import (\r\n connected_components,\r\n csgraph_to_dense,\r\n maximum_flow,\r\n minimum_spanning_tree,\r\n shortest_path,\r\n)\r\nfrom scipy.spatial import ConvexHull\r\nfrom scipy.special import comb\r\n\r\n\r\nclass Algebra:\r\n class Modular(int):\r\n def __init__(self, n, mod=MOD):\r\n self.value = n\r\n self.mod = mod\r\n\r\n def __str__(self):\r\n return f\"{self.value}\"\r\n\r\n def __add__(self, other):\r\n return self.__class__((self.value + other.value) % self.mod)\r\n\r\n def __sub__(self, x):\r\n return self.__class__((self.value - x.value) % self.mod)\r\n\r\n def __mul__(self, x):\r\n return self.__class__((self.value * x.value) % self.mod)\r\n\r\n def __pow__(self, x):\r\n return self.__class__(pow(self.value, x.value, self.mod))\r\n\r\n def __lt__(self, x):\r\n return self.value < x.value\r\n\r\n def __le__(self, x):\r\n return self.value <= x.value\r\n\r\n def __eq__(self, x):\r\n return self.value == x.value\r\n\r\n def __ne__(self, x):\r\n return self.value != x.value\r\n\r\n def __gt__(self, x):\r\n return self.value > x.value\r\n\r\n def __ge__(self, x):\r\n return self.value >= x.value\r\n\r\n class SemiGroup:\r\n pass\r\n\r\n class Monoid:\r\n pass\r\n\r\n class Group:\r\n pass\r\n\r\n class SemiRing:\r\n pass\r\n\r\n class Ring:\r\n pass\r\n\r\n @staticmethod\r\n def identity(n):\r\n if using_numpy:\r\n return np.identity(n, dtype=np.int64)\r\n else:\r\n a = [[0] * n for _ in range(n)]\r\n for i in range(n):\r\n a[i][i] = 1\r\n return a\r\n\r\n @staticmethod\r\n def dot(a, b):\r\n if using_numpy:\r\n return np.dot(a, b)\r\n else:\r\n h, w, l = len(a), len(b[0]), len(b)\r\n assert len(a[0]) == l\r\n c = [[0] * w for _ in range(h)]\r\n for i in range(h):\r\n for j in range(w):\r\n for k in range(l):\r\n c[i][j] += a[i][k] * b[k][j]\r\n return c\r\n\r\n @classmethod\r\n def matrix_pow(cls, a, n, mod=10**9 + 7):\r\n m = len(a)\r\n b = cls.identity(m)\r\n while n:\r\n if n & 1:\r\n b = cls.dot(b, a)\r\n n >>= 1\r\n a = cls.dot(a, a)\r\n if using_numpy:\r\n a %= mod\r\n b %= mod\r\n else:\r\n for i in range(m):\r\n for j in range(m):\r\n a[i][j] %= mod\r\n b[i][j] %= mod\r\n return b\r\n\r\n @staticmethod\r\n def bitwise_dot(a, b):\r\n if using_numpy:\r\n return np.bitwise_xor.reduce(\r\n a[:, None, :] & b.T[None, :, :], axis=-1\r\n )\r\n else:\r\n h, w, l = len(a), len(b[0]), len(b)\r\n assert len(a[0]) == l\r\n c = [[0] * w for _ in range(h)]\r\n for i in range(h):\r\n for j in range(w):\r\n for k in range(l):\r\n c[i][j] ^= a[i][k] & b[k][j]\r\n return c\r\n\r\n @classmethod\r\n def bitwise_mat_pow(cls, a, n):\r\n if n == 0:\r\n return np.eye(len(a), dtype=np.uint32) * ((1 << 32) - 1)\r\n res = cls.bitwise_mat_pow(a, n // 2)\r\n res = cls.bitwise_dot(res, res)\r\n return cls.bitwise_dot(res, a) if n & 1 else res\r\n\r\n @staticmethod\r\n def cumprod(a, mod):\r\n l = len(a)\r\n sql = int(np.sqrt(l) + 1)\r\n a = np.resize(a, sql**2).reshape(sql, sql)\r\n for i in range(sql - 1):\r\n a[:, i + 1] *= a[:, i]\r\n a[:, i + 1] %= mod\r\n for i in range(sql - 1):\r\n a[i + 1] *= a[i, -1]\r\n a[i + 1] %= mod\r\n return np.ravel(a)[:l]\r\n\r\n @classmethod\r\n def generate_fac_ifac(cls, n, p=MOD):\r\n if using_numpy:\r\n fac = np.arange(n + 1)\r\n fac[0] = 1\r\n fac = cls.cumprod(fac, p)\r\n ifac = np.arange(n + 1, 0, -1)\r\n ifac[0] = pow(int(fac[-1]), p - 2, p)\r\n ifac = cls.cumprod(ifac, p)[n::-1]\r\n else:\r\n fac = [None] * (n + 1)\r\n fac[0] = 1\r\n for i in range(n):\r\n fac[i + 1] = fac[i] * (i + 1) % p\r\n ifac = [None] * (n + 1)\r\n ifac[n] = pow(fac[n], p - 2, p)\r\n for i in range(n, 0, -1):\r\n ifac[i - 1] = ifac[i] * i % p\r\n return fac, ifac\r\n\r\n class Kitamasa:\r\n pass\r\n\r\n\r\nmint = Algebra.Modular\r\n\r\n\r\nclass NumberTheory:\r\n class PrimeNumbers: # pn\r\n def __init__(self, n=2 * 10**6):\r\n self.is_prime, self.prime_nums = self.find(n)\r\n\r\n def __call__(self, n):\r\n return self.is_prime[n]\r\n\r\n def __iter__(self):\r\n return iter(self.prime_nums)\r\n\r\n def __getitem__(self, key):\r\n return self.prime_nums[key]\r\n\r\n @staticmethod\r\n def find(n): # Sieve of eratosthenes\r\n if using_numpy:\r\n is_prime = np.ones(n + 1, dtype=np.bool)\r\n is_prime[:2] = 0\r\n for i in range(2, int(n**0.5) + 1):\r\n if is_prime[i]:\r\n is_prime[i * 2 :: i] = 0\r\n prime_nums = np.flatnonzero(is_prime)\r\n else:\r\n is_prime = [True] * (n + 1)\r\n is_prime[0] = is_prime[1] = 0\r\n for i in range(2, int(n**0.5) + 1):\r\n if not is_prime[i]:\r\n continue\r\n for j in range(i * 2, n + 1, i):\r\n is_prime[j] = 0\r\n prime_nums = [i for i in range(2, n + 1) if is_prime[i]]\r\n return is_prime, prime_nums\r\n\r\n @lru_cache(maxsize=None)\r\n def factorize(self, n):\r\n res = defaultdict(int)\r\n if n < 2:\r\n return res\r\n for p in self:\r\n if p * p > n:\r\n break\r\n while n % p == 0:\r\n res[p] += 1\r\n n //= p\r\n if n == 1:\r\n return res\r\n res[n] = 1\r\n return res\r\n\r\n def factorize_factorial(self, n):\r\n res = defaultdict(int)\r\n for i in range(2, n + 1):\r\n for p, c in self.factorize(i).items():\r\n res[p] += c\r\n return res\r\n\r\n @classmethod\r\n @lru_cache(maxsize=None)\r\n def gcd(cls, a, b):\r\n return cls.gcd(b, a % b) if b else abs(a)\r\n\r\n @classmethod\r\n def lcm(cls, a, b):\r\n return abs(a // cls.gcd(a, b) * b)\r\n\r\n @staticmethod\r\n def find_divisors(n):\r\n divisors = []\r\n for i in range(1, int(n**0.5) + 1):\r\n if n % i:\r\n continue\r\n divisors.append(i)\r\n j = n // i\r\n if j != i:\r\n divisors.append(j)\r\n return sorted(divisors)\r\n\r\n @staticmethod\r\n def base_convert(n, b):\r\n if not n:\r\n return [0]\r\n res = []\r\n while n:\r\n n, r = divmod(n, b)\r\n if r < 0:\r\n n += 1\r\n r -= b\r\n res.append(r)\r\n return res\r\n\r\n\r\nclass Combinatorics:\r\n @classmethod\r\n @lru_cache(maxsize=None)\r\n def choose(cls, n, r, mod=None):\r\n if r > n or r < 0:\r\n return 0\r\n if r == 0:\r\n return 1\r\n res = cls.choose(n - 1, r, mod) + cls.choose(n - 1, r - 1, mod)\r\n if mod:\r\n res %= mod\r\n return res\r\n\r\n class CombinationsMod:\r\n def __init__(self, n=2 * 10**6, mod=MOD):\r\n self.__mod = mod\r\n self.__fac, self.__ifac = Algebra.generate_fac_ifac(n, mod)\r\n\r\n def __call__(self, n, r):\r\n return self.__choose(n, r)\r\n\r\n def __choose(self, n, r):\r\n bl = (0 <= r) & (r <= n)\r\n p = self.__mod\r\n return (\r\n bl\r\n * self.__fac[n]\r\n * self.__ifac[r]\r\n % p\r\n * self.__ifac[n - r]\r\n % p\r\n )\r\n\r\n def make_nchoose_table(self, n):\r\n p = self.__mod\r\n r = len(self.__fac) - 1\r\n if using_numpy:\r\n n_choose = np.arange(n + 1, n - r, -1)\r\n n_choose[0] = 1\r\n n_choose = Algebra.cumprod(n_choose, p) * self.__ifac % p\r\n else:\r\n n_choose = [None] * (r + 1)\r\n n_choose[0] = 1\r\n for i in range(r):\r\n n_choose[i + 1] = n_choose[i] * (n - i) % p\r\n for i in range(1, r + 1):\r\n n_choose[i] = n_choose[i] * self.__ifac[i] % p\r\n return n_choose\r\n\r\n @classmethod\r\n def permutations(cls, a, r=None, i=0):\r\n a = list(a)\r\n n = len(a)\r\n if r is None:\r\n r = n\r\n res = []\r\n if r > n or i > r:\r\n return res\r\n if i == r:\r\n return [tuple(a[:r])]\r\n for j in range(i, n):\r\n a[i], a[j] = a[j], a[i]\r\n res += cls.permutations(a, r, i + 1)\r\n return res\r\n\r\n @staticmethod\r\n def combinations(a, r):\r\n a = tuple(a)\r\n n = len(a)\r\n if r > n:\r\n return\r\n indices = list(range(r))\r\n yield a[:r]\r\n while True:\r\n for i in range(r - 1, -1, -1):\r\n if indices[i] != i + n - r:\r\n break\r\n else:\r\n return\r\n indices[i] += 1\r\n for j in range(i + 1, r):\r\n indices[j] = indices[j - 1] + 1\r\n yield tuple(a[i] for i in indices)\r\n\r\n\r\nclass DP:\r\n @staticmethod\r\n def LIS(a):\r\n res = [inf] * len(a)\r\n for x in a:\r\n res[bi_l(res, x)] = x\r\n return res\r\n\r\n\r\nclass String:\r\n @staticmethod\r\n def z_algorithm(s):\r\n n = len(s)\r\n a = [0] * n\r\n a[0] = n\r\n l = r = -1\r\n for i in range(1, n):\r\n if r >= i:\r\n a[i] = min(a[i - l], r - i)\r\n while i + a[i] < n and s[i + a[i]] == s[a[i]]:\r\n a[i] += 1\r\n if i + a[i] >= r:\r\n l, r = i, i + a[i]\r\n return a\r\n\r\n\r\nclass GeometryTopology:\r\n class Graph:\r\n class __Edge:\r\n def __init__(self, weight=1, capacity=1, **args):\r\n self.weight = weight\r\n self.capacity = capacity\r\n\r\n class __Node:\r\n def __init__(self, **args):\r\n pass\r\n\r\n def __init__(self, n=0):\r\n self.__N = n\r\n self.nodes = [None] * n\r\n self.edges = [{} for _ in range(n)]\r\n\r\n def add_node_info(self, v, **args):\r\n self.nodes[v] = self.__Node(**args)\r\n\r\n def add_edge(self, u, v, **args):\r\n self.edges[u][v] = self.__Edge(**args)\r\n\r\n def get_size(self):\r\n return self.__N\r\n\r\n def bfs(self, src=0):\r\n n = self.__N\r\n self.depth = self.lv = lv = [None] * n\r\n lv[src] = 0 # depth in tree, or level in general graph.\r\n self.dist = dist = [inf] * n\r\n dist[src] = 0 # dist for only tree.\r\n self.parent = par = [None] * n\r\n par[src] = src\r\n q = deque([src])\r\n while q:\r\n u = q.popleft()\r\n for v, e in self.edges[u].items():\r\n if e.capacity == 0 or lv[v] is not None:\r\n continue\r\n lv[v], dist[v], par[v] = lv[u] + 1, dist[u] + e.weight, u\r\n q.append(v)\r\n return dist\r\n\r\n def dinic(self, src, sink):\r\n def flow_to_sink(u, flow_in):\r\n if u == sink:\r\n return flow_in\r\n flow = 0\r\n for v, e in self.edges[u].items():\r\n if e.capacity == 0 or self.lv[v] <= self.lv[u]:\r\n continue\r\n f = flow_to_sink(v, min(flow_in, e.capacity))\r\n if not f:\r\n continue\r\n self.edges[u][v].capacity -= f\r\n if u in self.edges[v]:\r\n self.edges[v][u].capacity += f\r\n else:\r\n self.add_edge(v, u, capacity=f)\r\n flow_in -= f\r\n flow += f\r\n return flow\r\n\r\n flow = 0\r\n while True:\r\n self.bfs(src)\r\n if self.lv[sink] is None:\r\n return flow\r\n flow += flow_to_sink(src, inf)\r\n\r\n def ford_fulkerson(self):\r\n pass\r\n\r\n def push_relabel(self):\r\n pass\r\n\r\n def floyd_warshall(self):\r\n n = self.__N\r\n d = [[inf] * n for _ in range(n)]\r\n for u in range(n):\r\n d[u][u] = 0\r\n for v, e in self.edges[u].items():\r\n d[u][v] = e.weight\r\n for w in range(n):\r\n for u in range(n):\r\n for v in range(n):\r\n d[u][v] = min(d[u][v], d[u][w] + d[w][v])\r\n return d\r\n\r\n def dijkstra(self, src, paths_cnt=False, mod=None):\r\n dist = [inf] * self.__N\r\n dist[src] = 0\r\n visited = [False] * self.__N\r\n paths = [0] * self.__N\r\n paths[src] = 1\r\n q = [(0, src)]\r\n while q:\r\n d, u = heappop(q)\r\n if visited[u]:\r\n continue\r\n visited[u] = True\r\n for v, e in self.edges[u].items():\r\n dv = d + e.weight\r\n if dv > dist[v]:\r\n continue\r\n elif dv == dist[v]:\r\n paths[v] += paths[u]\r\n if mod:\r\n paths[v] %= mod\r\n continue\r\n paths[v], dist[v] = paths[u], dv\r\n heappush(q, (dv, v))\r\n if paths_cnt:\r\n return dist, paths\r\n else:\r\n return dist\r\n\r\n def astar(self, src, tgt, heuristic_func):\r\n cost = [inf] * self.__N\r\n q = [(heuristic_func(src, tgt), 0, src)]\r\n while q:\r\n _, c, u = heappop(q)\r\n if u == tgt:\r\n return c\r\n if cost[u] != inf:\r\n continue\r\n cost[u] = c\r\n for v, e in self.edges[u].items():\r\n if cost[v] != inf:\r\n continue\r\n h = heuristic_func(v, tgt)\r\n nc = c + e.weight\r\n heappush(q, (h + nc, nc, v))\r\n return inf\r\n\r\n def bellman_ford(self, src):\r\n n = self.__N\r\n d = [inf] * n\r\n d[src] = 0\r\n for _ in range(n - 1):\r\n for u in range(n):\r\n for v, e in self.edges[u].items():\r\n d[v] = min(d[v], d[u] + e.weight)\r\n\r\n for u in range(n):\r\n for v, e in self.edges[u].items():\r\n if d[u] + e.weight < d[v]:\r\n raise Exception(\"found negative cycle.\")\r\n\r\n return d\r\n\r\n def find_ancestors(self): # tree doubling.\r\n self.__ancestors = ancestors = [self.parent]\r\n for _ in range(max(self.depth).bit_length()):\r\n ancestors.append([ancestors[-1][u] for u in ancestors[-1]])\r\n\r\n def find_dist(self, u, v):\r\n return (\r\n self.dist[u]\r\n + self.dist[v]\r\n - 2 * self.dist[self.__find_lca(u, v)]\r\n )\r\n\r\n def __find_lca(self, u, v):\r\n du, dv = self.depth[u], self.depth[v]\r\n if du > dv:\r\n u, v = v, u\r\n du, dv = dv, du\r\n\r\n d = dv - du\r\n for i in range(d.bit_length()): # up-stream\r\n if d >> i & 1:\r\n v = self.__ancestors[i][v]\r\n if v == u:\r\n return v\r\n\r\n for i in range(\r\n du.bit_length() - 1, -1, -1\r\n ): # find direct child of LCA.\r\n nu, nv = self.__ancestors[i][u], self.__ancestors[i][v]\r\n if nu == nv:\r\n continue\r\n u, v = nu, nv\r\n\r\n return self.__ancestors[0][u]\r\n\r\n def init_dsu(self): # disjoint set union (union-find)\r\n n = self.__N\r\n self.parent = list(range(n))\r\n self.rank = [0] * n\r\n self.size = [1] * n\r\n\r\n def find(self, u):\r\n if self.parent[u] == u:\r\n return u\r\n self.parent[u] = self.find(self.parent[u])\r\n return self.parent[u]\r\n\r\n def unite(self, u, v):\r\n u, v = self.find(u), self.find(v)\r\n if u == v:\r\n return\r\n if self.rank[u] < self.rank[v]:\r\n u, v = v, u\r\n self.parent[v] = u\r\n self.size[u] += self.size[v]\r\n self.rank[u] = max(self.rank[u], self.rank[v] + 1)\r\n\r\n def same(self, u, v):\r\n return self.find(u) == self.find(v)\r\n\r\n def scc(self): # strongly connected components\r\n n = self.__N\r\n visited, q, root, r = [False] * n, [], [None] * n, 0\r\n gg = self.__class__(n)\r\n for u in range(n):\r\n for v in self.edges[u]:\r\n gg.add_edge(v, u)\r\n\r\n def dfs(u):\r\n if visited[u]:\r\n return\r\n visited[u] = True\r\n for v in self.edges[u]:\r\n dfs(v)\r\n q.append(u)\r\n\r\n def rev_dfs(u, r):\r\n if root[u] is not None:\r\n return\r\n root[u] = r\r\n for v in gg.edges[u]:\r\n rev_dfs(v, r)\r\n\r\n for u in range(n):\r\n dfs(u)\r\n for u in q[::-1]:\r\n rev_dfs(u, r)\r\n r += 1\r\n return root\r\n\r\n def kruskal(self): # minimum spanning tree\r\n n = self.__N\r\n uf = self.__class__(n)\r\n uf.init_dsu()\r\n edges = sorted(\r\n [\r\n (u, v, e.weight)\r\n for u in range(n)\r\n for v, e in self.edges[u].items()\r\n ],\r\n key=lambda x: x[2],\r\n )\r\n g = self.__class__(n)\r\n d = 0\r\n for u, v, w in edges:\r\n if uf.same(u, v):\r\n continue\r\n uf.unite(u, v)\r\n g.add_edge(u, v, weight=w)\r\n d += w\r\n return g, d\r\n\r\n def prim(self, src=0, return_parent=False): # minimum spanning tree\r\n n = self.__N\r\n g = self.__class__(n)\r\n parent, visited, dist = [None] * n, [False] * n, 0\r\n q = [(0, (src, src))]\r\n while q:\r\n d, (w, u) = heappop(q)\r\n if visited[u]:\r\n continue\r\n visited[u], parent[u] = True, w\r\n dist += d\r\n g.add_edge(w, u, weight=d)\r\n for v, e in self.edges[u].items():\r\n if not visited[v]:\r\n heappush(q, (e.weight, (u, v)))\r\n if return_parent:\r\n return g, dist, parent\r\n return g, dist\r\n\r\n def boruvka(self): # minimum spanning tree\r\n n = self.__N\r\n uf = self.__class__(n)\r\n uf.init_dsu()\r\n g = self.__class__(n)\r\n d = 0\r\n\r\n def dfs(u):\r\n if visited[u]:\r\n return (inf, (None, None))\r\n visited[u] = True\r\n cand = []\r\n for v, e in self.edges[u].items():\r\n if uf.same(u, v):\r\n cand.append(dfs(v))\r\n continue\r\n cand.append((e.weight, (u, v)))\r\n return sorted(cand)[0]\r\n\r\n while len(set(uf.parent)) != 1:\r\n edges, visited = [], [False] * n\r\n for u in range(n):\r\n if visited[u]:\r\n continue\r\n edges.append(dfs(u))\r\n for w, (u, v) in edges:\r\n if uf.same(u, v):\r\n continue\r\n g.add_edge(u, v, weight=w)\r\n uf.unite(u, v)\r\n d += w\r\n for u in range(n):\r\n uf.find(u)\r\n\r\n return g, d\r\n\r\n def tsp(self): # traveling salesperson problem\r\n pass\r\n\r\n @staticmethod\r\n def triangle_area(p0, p1, p2, signed=False):\r\n x1, y1, x2, y2 = (\r\n p1[0] - p0[0],\r\n p1[1] - p0[1],\r\n p2[0] - p0[0],\r\n p2[1] - p0[1],\r\n )\r\n return (\r\n (x1 * y2 - x2 * y1) / 2 if signed else abs(x1 * y2 - x2 * y1) / 2\r\n )\r\n\r\n @classmethod\r\n def intersect(cls, seg1, seg2):\r\n (p1, p2), (p3, p4) = seg1, seg2\r\n t1 = cls.triangle_area(p1, p2, p3, signed=True)\r\n t2 = cls.triangle_area(p1, p2, p4, signed=True)\r\n t3 = cls.triangle_area(p3, p4, p1, signed=True)\r\n t4 = cls.triangle_area(p3, p4, p2, signed=True)\r\n return (t1 * t2 < 0) & (t3 * t4 < 0)\r\n\r\n\r\ndef cumxor(a):\r\n return reduce(xor, a, 0)\r\n\r\n\r\ndef cumor(a):\r\n return reduce(or_, a, 0)\r\n\r\n\r\ndef bit_count(n):\r\n cnt = 0\r\n while n:\r\n cnt += n & 1\r\n n >>= 1\r\n return cnt\r\n\r\n\r\nclass AtCoder:\r\n class ABC001:\r\n @staticmethod\r\n def a():\r\n h1, h2 = map(int, sys.stdin.read().split())\r\n print(h1 - h2)\r\n\r\n @staticmethod\r\n def d():\r\n def to_minuites(x):\r\n q, r = divmod(x, 100)\r\n return 60 * q + r\r\n\r\n def to_hmform(x):\r\n q, r = divmod(x, 60)\r\n return 100 * q + r\r\n\r\n n = int(sys.stdin.readline().rstrip())\r\n term = [0] * 2001\r\n for _ in range(n):\r\n s, e = map(\r\n to_minuites,\r\n map(int, sys.stdin.readline().rstrip().split(\"-\")),\r\n )\r\n s = s // 5 * 5\r\n e = (e + 4) // 5 * 5\r\n term[s] += 1\r\n term[e + 1] -= 1\r\n for i in range(2000):\r\n term[i + 1] += term[i]\r\n\r\n res = []\r\n raining = False\r\n for i in range(2001):\r\n if term[i]:\r\n if not raining:\r\n s = i\r\n raining = True\r\n elif raining:\r\n res.append((s, i - 1))\r\n raining = False\r\n for s, e in res:\r\n print(f\"{to_hmform(s):04}-{to_hmform(e):04}\")\r\n\r\n class ABC002:\r\n @staticmethod\r\n def a():\r\n print(max(map(int, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n vowels = set(\"aeiou\")\r\n print(\r\n \"\".join(\r\n [\r\n c\r\n for c in sys.stdin.readline().rstrip()\r\n if c not in vowels\r\n ]\r\n )\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n print(\r\n GeometryTopology.triangle_area(\r\n *map(int, sys.stdin.readline().split())\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n edges = set(\r\n (x - 1, y - 1)\r\n for x, y in zip(*[map(int, sys.stdin.read().split())] * 2)\r\n )\r\n print(\r\n max(\r\n len(s)\r\n for i in range(1, 1 << n)\r\n for s in [[j for j in range(n) if i >> j & 1]]\r\n if all(\r\n (x, y) in edges\r\n for x, y in itertools.combinations(s, 2)\r\n )\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m = map(int, sys.stdin.readline().split())\r\n relations = [1 << i for i in range(n)]\r\n for x, y in zip(*[map(int, sys.stdin.read().split())] * 2):\r\n relations[x] |= 1 << (y - 1)\r\n relations[y] |= 1 << (x - 1)\r\n res = 0\r\n for i in range(1 << n):\r\n s, cnt = (1 << n) - 1, 0\r\n for j in range(n):\r\n if i >> j & 1:\r\n t &= relations[j] | 1 << j\r\n cnt += 1\r\n if s & i == i:\r\n res = max(res, cnt)\r\n print(res)\r\n\r\n class ABC003:\r\n @staticmethod\r\n def a():\r\n print((int(sys.stdin.readline().rstrip()) + 1) * 5000)\r\n\r\n @staticmethod\r\n def b():\r\n atcoder = set(\"atcoder\")\r\n s, t = sys.stdin.read().split()\r\n print(\r\n all(\r\n s[i] == t[i]\r\n or s[i] == \"@\"\r\n and t[i] in atcoder\r\n or t[i] == \"@\"\r\n and s[i] in atcoder\r\n for i in range(len(s))\r\n )\r\n and \"You can win\"\r\n or \"You will lose\"\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *r = map(int, sys.stdin.read().split())\r\n print(reduce(lambda x, y: (x + y) / 2, sorted(r)[-k:], 0))\r\n\r\n class ABC004:\r\n @staticmethod\r\n def a():\r\n print(int(sys.stdin.readline().rstrip()) * 2)\r\n\r\n @staticmethod\r\n def b():\r\n for l in [sys.stdin.readline().rstrip() for _ in range(4)][::-1]:\r\n print(l[::-1])\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip()) % 30\r\n res = list(range(1, 7))\r\n for i in range(n):\r\n i %= 5\r\n res[i], res[i + 1] = res[i + 1], res[i]\r\n print(*res, sep=\"\")\r\n\r\n class ABC005:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(y // x)\r\n\r\n @staticmethod\r\n def b():\r\n n, *t = map(int, sys.stdin.read().split())\r\n print(min(t))\r\n\r\n @staticmethod\r\n def c():\r\n t = int(sys.stdin.readline().rstrip())\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n m = int(sys.stdin.readline().rstrip())\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n i = 0\r\n for p in b:\r\n if i == n:\r\n print(\"no\")\r\n return\r\n while p - a[i] > t:\r\n i += 1\r\n if i == n:\r\n print(\"no\")\r\n return\r\n if a[i] > p:\r\n print(\"no\")\r\n return\r\n i += 1\r\n print(\"yes\")\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n d = np.array(\r\n [sys.stdin.readline().split() for _ in range(n)], np.int64\r\n )\r\n s = d.cumsum(axis=0).cumsum(axis=1)\r\n s = np.pad(s, 1)\r\n max_del = np.zeros((n + 1, n + 1), dtype=np.int64)\r\n for y in range(1, n + 1):\r\n for x in range(1, n + 1):\r\n max_del[y, x] = np.amax(\r\n s[y : n + 1, x : n + 1]\r\n - s[0 : n - y + 1, x : n + 1]\r\n - s[y : n + 1, 0 : n - x + 1]\r\n + s[0 : n - y + 1, 0 : n - x + 1]\r\n )\r\n res = np.arange(n**2 + 1)[:, None]\r\n i = np.arange(1, n + 1)\r\n res = max_del[i, np.minimum(res // i, n)].max(axis=1)\r\n q = int(sys.stdin.readline().rstrip())\r\n p = np.array(sys.stdin.read().split(), dtype=np.int64)\r\n print(*res[p], sep=\"\\n\")\r\n\r\n class ABC006:\r\n @staticmethod\r\n def a():\r\n n = sys.stdin.readline().rstrip()\r\n if \"3\" in n:\r\n print(\"YES\")\r\n elif int(n) % 3 == 0:\r\n print(\"YES\")\r\n else:\r\n print(\"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n mod = 10007\r\n a = np.eye(N=3, k=-1, dtype=np.int64)\r\n a[0] = 1\r\n n = int(sys.stdin.readline().rstrip())\r\n a = Algebra.matrix_pow(a, n - 1, mod)\r\n print(a[2][0])\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n cnt = [0, 0, 0]\r\n if m == 1:\r\n cnt = [-1, -1, -1]\r\n else:\r\n if m & 1:\r\n m -= 3\r\n cnt[1] += 1\r\n n -= 1\r\n cnt[2] = m // 2 - n\r\n cnt[0] = n - cnt[2]\r\n if cnt[0] < 0 or cnt[1] < 0 or cnt[2] < 0:\r\n print(-1, -1, -1)\r\n else:\r\n print(*cnt, sep=\" \")\r\n\r\n @staticmethod\r\n def d():\r\n n, *c = map(int, sys.stdin.read().split())\r\n lis = [inf] * n\r\n for x in c:\r\n lis[bi_l(lis, x)] = x\r\n print(n - bi_l(lis, inf))\r\n\r\n class ABC007:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n - 1)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n if s == \"a\":\r\n print(-1)\r\n else:\r\n print(\"a\")\r\n\r\n @staticmethod\r\n def c():\r\n r, c = map(int, sys.stdin.readline().split())\r\n sy, sx = map(int, sys.stdin.readline().split())\r\n gy, gx = map(int, sys.stdin.readline().split())\r\n sy -= 1\r\n sx -= 1\r\n gy -= 1\r\n gx -= 1\r\n maze = [sys.stdin.readline().rstrip() for _ in range(r)]\r\n queue = deque([(sy, sx)])\r\n dist = np.full((r, c), np.inf)\r\n dist[sy, sx] = 0\r\n while queue:\r\n y, x = queue.popleft()\r\n for i, j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\r\n i += y\r\n j += x\r\n if maze[i][j] == \"#\" or dist[i, j] != np.inf:\r\n continue\r\n dist[i, j] = dist[y, x] + 1\r\n queue.append((i, j))\r\n print(int(dist[gy, gx]))\r\n\r\n @staticmethod\r\n def d():\r\n ng = set([4, 9])\r\n\r\n def count(d):\r\n return d if d <= 4 else d - 1\r\n\r\n def f(n):\r\n x = [int(d) for d in str(n)]\r\n flg = True\r\n dp = 0\r\n for d in x:\r\n dp = dp * 8 + flg * count(d)\r\n if d in ng:\r\n flg = False\r\n return n - (dp + flg)\r\n\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(f(b) - f(a - 1))\r\n\r\n class ABC008:\r\n @staticmethod\r\n def a():\r\n s, t = map(int, sys.stdin.readline().split())\r\n print(t - s + 1)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n res = defaultdict(int)\r\n for name in s:\r\n res[name] += 1\r\n print(sorted(res.items(), key=lambda x: x[1])[-1][0])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n c = n - np.count_nonzero(a[:, None] % a, axis=1)\r\n print(np.sum((c + 1) // 2 / c))\r\n\r\n @staticmethod\r\n def d():\r\n w, h, n, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*([iter(xy)] * 2))\r\n\r\n @lru_cache(maxsize=None)\r\n def count(x1, y1, x2, y2):\r\n res = 0\r\n for x, y in xy:\r\n if not (x1 <= x <= x2 and y1 <= y <= y2):\r\n continue\r\n cnt = (x2 - x1) + (y2 - y1) + 1\r\n cnt += count(x1, y1, x - 1, y - 1)\r\n cnt += count(x1, y + 1, x - 1, y2)\r\n cnt += count(x + 1, y1, x2, y - 1)\r\n cnt += count(x + 1, y + 1, x2, y2)\r\n res = max(res, cnt)\r\n return res\r\n\r\n print(count(1, 1, w, h))\r\n\r\n class ABC009:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((n + 1) // 2)\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n print(sorted(set(a))[-2])\r\n\r\n @staticmethod\r\n def c():\r\n n, k = map(int, sys.stdin.readline().split())\r\n s = list(sys.stdin.readline().rstrip())\r\n cost = [1] * n\r\n r = k\r\n for i in range(n - 1):\r\n q = []\r\n for j in range(i + 1, n):\r\n if s[j] < s[i] and cost[i] + cost[j] <= r:\r\n heappush(q, (s[j], cost[i] + cost[j], -j))\r\n if not q:\r\n continue\r\n _, c, j = heappop(q)\r\n j = -j\r\n s[i], s[j] = s[j], s[i]\r\n r -= c\r\n cost[i] = cost[j] = 0\r\n print(\"\".join(s))\r\n\r\n @staticmethod\r\n def d():\r\n k, m = map(int, sys.stdin.readline().split())\r\n a = np.array([int(x) for x in sys.stdin.readline().split()])\r\n c = np.array([int(x) for x in sys.stdin.readline().split()])\r\n mask = (1 << 32) - 1\r\n d = np.eye(k, k, -1, dtype=np.uint32) * mask\r\n d[0] = c\r\n if m <= k:\r\n print(a[m - 1])\r\n return\r\n # print(Algebra.bitwise_mat_pow(d, m-k))\r\n # print(Algebra.bitwise_dot(Algebra.bitwise_mat_pow(d, m-k), a[::-1].reshape(-1, 1))[0].item())\r\n print(\r\n Algebra.bitwise_dot(\r\n Algebra.bitwise_mat_pow(d, m - k), a[::-1].reshape(-1, 1)\r\n )[0][0]\r\n )\r\n\r\n class ABC010:\r\n @staticmethod\r\n def a():\r\n print(sys.stdin.readline().rstrip() + \"pp\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n tot = 0\r\n for x in a:\r\n c = 0\r\n while x % 2 == 0 or x % 3 == 2:\r\n x -= 1\r\n c += 1\r\n tot += c\r\n print(tot)\r\n\r\n @staticmethod\r\n def c():\r\n sx, sy, gx, gy, t, v, n, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(-1, 2).T\r\n\r\n def dist(x1, y1, x2, y2):\r\n return np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\r\n\r\n ans = (\r\n \"YES\"\r\n if (dist(sx, sy, x, y) + dist(x, y, gx, gy) <= v * t).any()\r\n else \"NO\"\r\n )\r\n print(ans)\r\n\r\n @staticmethod\r\n def d():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n p = [int(x) for x in sys.stdin.readline().split()]\r\n x, y = [], []\r\n for _ in range(e):\r\n a, b = map(int, sys.stdin.readline().split())\r\n x.append(a)\r\n y.append(b)\r\n x.append(b)\r\n y.append(a)\r\n for a in p:\r\n x.append(a)\r\n y.append(n)\r\n if not x:\r\n print(0)\r\n return\r\n c = [1] * len(x)\r\n min_cut = maximum_flow(\r\n csr_matrix((c, (x, y)), (n + 1, n + 1)), source=0, sink=n\r\n ).flow_value\r\n print(min_cut)\r\n\r\n @staticmethod\r\n def d_2():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n graph = nx.DiGraph()\r\n graph.add_nodes_from(range(n + 1))\r\n for p in [int(x) for x in sys.stdin.readline().split()]:\r\n graph.add_edge(p, n, capacity=1)\r\n for _ in range(e):\r\n a, b = map(int, sys.stdin.readline().split())\r\n graph.add_edge(a, b, capacity=1)\r\n graph.add_edge(b, a, capacity=1)\r\n print(nx.minimum_cut_value(graph, 0, n))\r\n\r\n @staticmethod\r\n def d_3():\r\n n, q, m = map(int, sys.stdin.readline().split())\r\n g = GeometryTopology.Graph(n + 1)\r\n # for i in range(n+1): g.add_node(i)\r\n for p in [int(x) for x in sys.stdin.readline().split()]:\r\n g.add_edge(p, n, capacity=1)\r\n for a, b in zip(*[map(int, sys.stdin.read().split())] * 2):\r\n g.add_edge(a, b, capacity=1)\r\n g.add_edge(b, a, capacity=1)\r\n print(g.dinic(0, n))\r\n\r\n class ABC011:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n % 12 + 1)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(s[0].upper() + s[1:].lower())\r\n\r\n @staticmethod\r\n def c():\r\n n, *ng = map(int, sys.stdin.read().split())\r\n ng = set(ng)\r\n if n in ng:\r\n print(\"NO\")\r\n else:\r\n r = 100\r\n while n > 0:\r\n if r == 0:\r\n print(\"NO\")\r\n return\r\n for i in range(3, 0, -1):\r\n if (n - i) in ng:\r\n continue\r\n n -= i\r\n r -= 1\r\n break\r\n else:\r\n print(\"NO\")\r\n return\r\n print(\"YES\")\r\n\r\n @staticmethod\r\n def d():\r\n n, d, x, y = map(int, sys.stdin.read().split())\r\n x, y = abs(x), abs(y)\r\n if x % d or y % d:\r\n print(0)\r\n return\r\n x, y = x // d, y // d\r\n r = n - (x + y)\r\n if r < 0 or r & 1:\r\n print(0)\r\n return\r\n\r\n res = 0\r\n half_p = pow(1 / 2, n)\r\n for d in range(r // 2 + 1): # 0 <= d <= r//2, south\r\n south, north = d, y + d\r\n west = (r - 2 * d) // 2\r\n res += (\r\n half_p\r\n * comb(n, south, exact=True)\r\n * comb(n - south, north, exact=True)\r\n * comb(n - south - north, west, exact=True)\r\n * half_p\r\n )\r\n print(res)\r\n\r\n class ABC012:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(b, a)\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n h, n = divmod(n, 3600)\r\n m, s = divmod(n, 60)\r\n print(f\"{h:02}:{m:02}:{s:02}\")\r\n\r\n @staticmethod\r\n def c():\r\n n = 2025 - int(sys.stdin.readline().rstrip())\r\n res = []\r\n for i in range(1, 10):\r\n if n % i != 0 or n // i > 9:\r\n continue\r\n res.append(f\"{i} x {n//i}\")\r\n print(*sorted(res), sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abt = map(int, sys.stdin.read().split())\r\n a, b, t = np.array(abt).reshape(m, 3).T\r\n res = shortest_path(\r\n csr_matrix((t, (a - 1, b - 1)), (n, n)),\r\n method=\"FW\",\r\n directed=False,\r\n )\r\n print(res.max(axis=-1).min().astype(np.int64))\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m, *abt = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b, t in zip(*[iter(abt)] * 3):\r\n a -= 1\r\n b -= 1\r\n g.add_edge(a, b, weight=t)\r\n g.add_edge(b, a, weight=t)\r\n\r\n print(min(max(d) for d in g.floyd_warshall()))\r\n\r\n class ABC013:\r\n @staticmethod\r\n def a():\r\n print(ord(sys.stdin.readline().rstrip()) - ord(\"A\") + 1)\r\n\r\n @staticmethod\r\n def b():\r\n a, b = map(int, sys.stdin.read().split())\r\n d = abs(a - b)\r\n print(min(d, 10 - d))\r\n\r\n @staticmethod\r\n def c():\r\n n, h, a, b, c, d, e = map(int, sys.stdin.read().split())\r\n y = np.arange(n + 1)\r\n x = (n * e - h - (d + e) * y) // (b + e) + 1\r\n np.maximum(x, 0, out=x)\r\n np.minimum(x, n - y, out=x)\r\n print(np.amin(a * x + c * y))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, d, *a = map(int, sys.stdin.read().split())\r\n res = list(range(n))\r\n\r\n def swap(i, j):\r\n res[i], res[j] = res[j], res[i]\r\n\r\n for i in a[::-1]:\r\n swap(i - 1, i)\r\n res = np.array(res)\r\n\r\n def binary_method(a, p):\r\n b = np.arange(n)\r\n while p:\r\n if p & 1:\r\n b = a[b]\r\n p >>= 1\r\n a = a[a]\r\n return b\r\n\r\n print(*(binary_method(res, d) + 1), sep=\"\\n\")\r\n\r\n class ABC014:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.read().split())\r\n print((a + b - 1) // b * b - a)\r\n\r\n @staticmethod\r\n def b():\r\n n, x, *a = map(int, sys.stdin.read().split())\r\n print(sum(a[i] for i in range(n) if x >> i & 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n a, b = np.array(ab).reshape(n, 2).T\r\n res = np.zeros(10**6 + 2, dtype=np.int64)\r\n np.add.at(res, a, 1)\r\n np.subtract.at(res, b + 1, 1)\r\n np.cumsum(res, out=res)\r\n print(res.max())\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n g = GeometryTopology.Graph(n)\r\n for _ in range(n - 1):\r\n x, y = map(int, sys.stdin.readline().split())\r\n x -= 1\r\n y -= 1\r\n g.add_edge(x, y, weight=1)\r\n g.add_edge(y, x, weight=1)\r\n\r\n g.bfs(0)\r\n g.find_ancestors()\r\n\r\n q, *ab = map(int, sys.stdin.read().split())\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n print(g.find_dist(a, b) + 1)\r\n\r\n class ABC015:\r\n @staticmethod\r\n def a():\r\n a, b = sys.stdin.read().split()\r\n print(a if len(a) > len(b) else b)\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n print(\r\n np.ceil(\r\n a[np.nonzero(a)[0]].sum() / np.count_nonzero(a)\r\n ).astype(np.int8)\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *t = map(int, sys.stdin.read().split())\r\n t = np.array(t).reshape(n, k)\r\n x = np.zeros((1, 1), dtype=np.int8)\r\n for i in range(n):\r\n x = x.reshape(-1, 1) ^ t[i]\r\n print(\"Found\" if np.count_nonzero(x == 0) > 0 else \"Nothing\")\r\n\r\n @staticmethod\r\n def d():\r\n w, n, k, *ab = map(int, sys.stdin.read().split())\r\n dp = np.zeros((k + 1, w + 1), dtype=np.int32)\r\n for a, b in zip(*[iter(ab)] * 2):\r\n np.maximum(dp[1:, a:], dp[:-1, :-a] + b, out=dp[1:, a:])\r\n print(dp[k][w])\r\n\r\n class ABC016:\r\n @staticmethod\r\n def a():\r\n m, d = map(int, sys.stdin.readline().split())\r\n print(\"YES\" if m % d == 0 else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n f1, f2 = a + b == c, a - b == c\r\n if f1 & f2:\r\n print(\"?\")\r\n elif f1 & (~f2):\r\n print(\"+\")\r\n elif (~f1) & f2:\r\n print(\"-\")\r\n else:\r\n print(\"!\")\r\n\r\n @staticmethod\r\n def c():\r\n n, _, *ab = map(int, sys.stdin.read().split())\r\n f = [0] * n\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n f[a] |= 1 << b\r\n f[b] |= 1 << a\r\n res = [\r\n bit_count(\r\n cumor(f[j] for j in range(n) if f[i] >> j & 1)\r\n & ~(f[i] | 1 << i)\r\n )\r\n for i in range(n)\r\n ]\r\n print(*res, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n sx, sy, gx, gy = map(int, sys.stdin.readline().split())\r\n seg1 = ((sx, sy), (gx, gy))\r\n n = int(sys.stdin.readline().rstrip())\r\n p1 = (\r\n np.array(sys.stdin.read().split(), dtype=np.int64)\r\n .reshape(n, 2)\r\n .T\r\n )\r\n p2 = np.hstack((p1[:, 1:], p1[:, :1]))\r\n seg2 = (p1, p2)\r\n print(\r\n np.count_nonzero(GeometryTopology.intersect(seg1, seg2)) // 2\r\n + 1\r\n )\r\n\r\n class ABC017:\r\n @staticmethod\r\n def a():\r\n s, e = (\r\n np.array(sys.stdin.read().split(), dtype=np.int16)\r\n .reshape(3, 2)\r\n .T\r\n )\r\n print((s // 10 * e).sum())\r\n\r\n @staticmethod\r\n def b():\r\n choku_tail = set(\"ch, o, k, u\".split(\", \"))\r\n\r\n def is_choku(s):\r\n if s == \"\":\r\n return True\r\n if len(s) >= 1 and (s[-1] in choku_tail) and is_choku(s[:-1]):\r\n return True\r\n if len(s) >= 2 and (s[-2:] in choku_tail) and is_choku(s[:-2]):\r\n return True\r\n return False\r\n\r\n print(\"YES\" if is_choku(sys.stdin.readline().rstrip()) else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *lrs = map(int, sys.stdin.read().split())\r\n l, r, s = np.array(lrs).reshape(n, 3).T\r\n score = np.zeros((m + 1,), dtype=np.int32)\r\n np.add.at(score, l - 1, s)\r\n np.subtract.at(score, r, s)\r\n np.cumsum(score, out=score)\r\n print(s.sum() - score[:m].min())\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *f = map(int, sys.stdin.read().split())\r\n prev = [0] * (n + 1)\r\n tmp = defaultdict(int)\r\n for i in range(n):\r\n prev[i + 1] = tmp[f[i]]\r\n tmp[f[i]] = i + 1\r\n\r\n dp = [0] * (n + 1)\r\n dp[0] = 1\r\n l, s = 0, dp[0]\r\n for i in range(1, n + 1):\r\n while l < prev[i]:\r\n s = (s - dp[l]) % MOD\r\n l += 1\r\n dp[i] = s\r\n s = (s + dp[i]) % MOD\r\n print(dp[n])\r\n\r\n class ABC018:\r\n @staticmethod\r\n def a():\r\n (*a,) = map(int, sys.stdin.read().split())\r\n a = sorted(enumerate(a), key=lambda x: -x[1])\r\n res = [None] * 3\r\n for i in range(3):\r\n res[a[i][0]] = i + 1\r\n print(*res, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n n, *lr = map(int, sys.stdin.read().split())\r\n for l, r in zip(*[iter(lr)] * 2):\r\n l -= 1\r\n r -= 1\r\n s = s[:l] + s[l : r + 1][::-1] + s[r + 1 :]\r\n print(s)\r\n\r\n @staticmethod\r\n def c():\r\n r, c, k = map(int, sys.stdin.readline().split())\r\n s = np.array([list(s) for s in sys.stdin.read().split()])\r\n s = np.pad(s, 1, constant_values=\"x\")\r\n\r\n a = np.zeros_like(s, dtype=np.float64)\r\n a[s == \"o\"] = np.inf\r\n for i in range(1, r + 1):\r\n np.minimum(a[i - 1, :] + 1, a[i, :], out=a[i, :])\r\n for i in range(r, 0, -1):\r\n np.minimum(a[i + 1, :] + 1, a[i, :], out=a[i, :])\r\n for j in range(1, c + 1):\r\n np.minimum(a[:, j - 1] + 1, a[:, j], out=a[:, j])\r\n for j in range(c, 0, -1):\r\n np.minimum(a[:, j + 1] + 1, a[:, j], out=a[:, j])\r\n print(np.count_nonzero(a >= k))\r\n\r\n @staticmethod\r\n def c_2():\r\n r, c, k = map(int, sys.stdin.readline().split())\r\n s = np.array([list(s) for s in sys.stdin.read().split()])\r\n s = np.pad(s, 1, constant_values=\"x\")\r\n a = (s == \"o\").astype(np.int16)\r\n a = distance_transform_cdt(a, metric=\"taxicab\")\r\n print(np.count_nonzero(a >= k))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, p, q, r, *xyz = map(int, sys.stdin.read().split())\r\n x, y, z = np.array(xyz).reshape(r, 3).T\r\n h = np.zeros((n, m), dtype=np.int32)\r\n h[x - 1, y - 1] = z\r\n g = np.array([*itertools.combinations(range(n), p)])\r\n print(np.sort(h[g].sum(axis=1), axis=1)[:, -q:].sum(axis=1).max())\r\n\r\n class ABC019:\r\n @staticmethod\r\n def a():\r\n (*a,) = map(int, sys.stdin.readline().split())\r\n print(sorted(a)[1])\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip() + \"$\"\r\n cnt = 0\r\n prev = \"$\"\r\n t = \"\"\r\n for c in s:\r\n if c == prev:\r\n cnt += 1\r\n continue\r\n t += prev + str(cnt)\r\n prev = c\r\n cnt = 1\r\n print(t[2:])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n res = set()\r\n for x in a:\r\n while not x & 1:\r\n x >>= 1\r\n res.add(x)\r\n print(len(res))\r\n\r\n @staticmethod\r\n def d():\r\n def inquire(u, v):\r\n print(f\"? {u} {v}\".format(u, v), flush=True)\r\n return int(sys.stdin.readline().rstrip())\r\n\r\n n = int(sys.stdin.readline().rstrip())\r\n u = sorted([(inquire(1, v), v) for v in range(2, n + 1)])[-1][1]\r\n d = max((inquire(u, v)) for v in range(1, n + 1) if u != v)\r\n print(f\"! {d}\")\r\n\r\n class ABC020:\r\n @staticmethod\r\n def a():\r\n print(\r\n \"ABC\"\r\n if int(sys.stdin.readline().rstrip()) == 1\r\n else \"chokudai\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n a, b = sys.stdin.readline().split()\r\n print(int(a + b) * 2)\r\n\r\n @staticmethod\r\n def c():\r\n h, w, t = map(int, sys.stdin.readline().split())\r\n s = [list(s) for s in sys.stdin.read().split()]\r\n for i in range(h):\r\n for j in range(w):\r\n if s[i][j] == \"S\":\r\n sy, sx = i, j\r\n if s[i][j] == \"G\":\r\n gy, gx = i, j\r\n s[sy][sx] = s[gy][gx] = \".\"\r\n source, target = sy * w + sx, gy * w + gx\r\n\r\n def heuristic_function(u, v=target):\r\n uy, ux = divmod(u, w)\r\n vy, vx = divmod(v, w)\r\n return abs(vy - uy) + abs(ux - vx)\r\n\r\n def min_time(x):\r\n g = GeometryTopology.Graph(h * w)\r\n # g = nx.DiGraph()\r\n\r\n for i in range(h):\r\n for j in range(w):\r\n u = i * w + j\r\n if i > 0:\r\n g.add_edge(\r\n u,\r\n (i - 1) * w + j,\r\n weight=(1 if s[i - 1][j] == \".\" else x),\r\n )\r\n if i < h - 1:\r\n g.add_edge(\r\n u,\r\n (i + 1) * w + j,\r\n weight=(1 if s[i + 1][j] == \".\" else x),\r\n )\r\n if j > 0:\r\n g.add_edge(\r\n u,\r\n i * w + j - 1,\r\n weight=(1 if s[i][j - 1] == \".\" else x),\r\n )\r\n if j < w - 1:\r\n g.add_edge(\r\n u,\r\n i * w + j + 1,\r\n weight=(1 if s[i][j + 1] == \".\" else x),\r\n )\r\n\r\n return g.dijkstra(source)[target]\r\n return g.astar(source, target, heuristic_function)\r\n # return nx.dijkstra_path_length(g, source, target)\r\n # return nx.astar_path_length(g, source, target, heuristic_function)\r\n\r\n def binary_search():\r\n lo, hi = 1, t + 1\r\n while lo + 1 < hi:\r\n x = (lo + hi) // 2\r\n if min_time(x) > t:\r\n hi = x\r\n else:\r\n lo = x\r\n return lo\r\n\r\n print(binary_search())\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.readline().split())\r\n div = sorted(NumberTheory.find_divisors(k))\r\n l = len(div)\r\n s = [0] * l\r\n for i, d in enumerate(div):\r\n s[i] = (1 + n // d) * (n // d) // 2 * d % MOD\r\n for i in range(l - 1, -1, -1):\r\n for j in range(i + 1, l):\r\n if div[j] % div[i]:\r\n continue\r\n s[i] = (s[i] - s[j]) % MOD\r\n\r\n print(\r\n sum(s[i] * k // div[i] % MOD for i in range(l)) % MOD\r\n ) # ans is LCM.\r\n\r\n class ABC021:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n s = [1 << i for i in range(5) if n >> i & 1]\r\n print(len(s), *s, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def b():\r\n n, a, b, k, *p = map(int, sys.stdin.read().split())\r\n print(\"YES\" if len(set(p) | set([a, b])) == k + 2 else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, a, b, m, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(m, 2).T - 1\r\n a -= 1\r\n b -= 1\r\n g = csgraph_to_dense(\r\n csr_matrix((np.ones(m), (x, y)), (n, n), dtype=np.int8)\r\n )\r\n g = np.logical_or(g, g.T)\r\n paths = np.zeros(n, dtype=np.int64).reshape(-1, 1)\r\n paths[a, 0] = 1\r\n while not paths[b, 0]:\r\n paths = np.dot(g, paths) % MOD\r\n print(paths[b, 0])\r\n\r\n @staticmethod\r\n def c_2():\r\n n, a, b, m, *xy = map(int, sys.stdin.read().split())\r\n a -= 1\r\n b -= 1\r\n g = GeometryTopology.Graph()\r\n\r\n for x, y in zip(*[iter(xy)] * 2):\r\n x -= 1\r\n y -= 1\r\n g.add_edge(x, y, weight=1)\r\n g.add_edge(y, x, weight=1)\r\n\r\n dist, paths = g.dijkstra(a, paths_cnt=True, mod=MOD)\r\n print(paths[b])\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.read().split())\r\n cn = Combinatorics.CombinationsMod()\r\n print(cn(n + k - 1, k))\r\n\r\n class ABC022:\r\n @staticmethod\r\n def a():\r\n n, s, t, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n np.cumsum(a, out=a)\r\n print(((s <= a) & (a <= t)).sum())\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n c = Counter(a)\r\n print(sum(c.values()) - len(c))\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *uvl = map(int, sys.stdin.read().split())\r\n u, v, l = np.array(uvl).reshape(m, 3).T\r\n u -= 1\r\n v -= 1\r\n g = csgraph_to_dense(csr_matrix((l, (u, v)), (n, n)))\r\n g += g.T\r\n g[g == 0] = np.inf\r\n dist0 = g[0].copy()\r\n g[0] = 0\r\n g[:, 0] = 0\r\n dist = shortest_path(g, method=\"FW\", directed=False)\r\n u, v = np.array([*itertools.combinations(range(1, n), 2)]).T\r\n res = (dist0[u] + dist[u, v] + dist0[v]).min()\r\n print(-1 if res == np.inf else int(res))\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n c = np.array(ab).reshape(2, n, 2)\r\n g = c.mean(axis=1)\r\n d = np.sqrt(((c - g[:, None, :]) ** 2).sum(axis=-1)).sum(axis=1)\r\n print(d[1] / d[0])\r\n\r\n class ABC023:\r\n @staticmethod\r\n def a():\r\n print(sum(divmod(int(sys.stdin.readline().rstrip()), 10)))\r\n\r\n @staticmethod\r\n def b():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n t = \"b\"\r\n for i in range(n // 2):\r\n if i % 3 == 0:\r\n t = \"a\" + t + \"c\"\r\n elif i % 3 == 1:\r\n t = \"c\" + t + \"a\"\r\n else:\r\n t = \"b\" + t + \"b\"\r\n print(n // 2 if t == s else -1)\r\n\r\n @staticmethod\r\n def b_2():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n if n & 1 ^ 1:\r\n print(-1)\r\n return\r\n a = list(\"abc\")\r\n i = (1 - n // 2) % 3\r\n for c in s:\r\n if c != a[i]:\r\n print(-1)\r\n return\r\n i = (i + 1) % 3\r\n print(n // 2)\r\n\r\n @staticmethod\r\n def c():\r\n h, w, k, n, *rc = map(int, sys.stdin.read().split())\r\n r, c = np.array(rc).reshape(n, 2).T - 1\r\n rb = np.bincount(r, minlength=h)\r\n cb = np.bincount(c, minlength=w)\r\n rbb = np.bincount(rb, minlength=k + 1)\r\n cbb = np.bincount(cb, minlength=k + 1)\r\n tot = (rbb[: k + 1] * cbb[k::-1]).sum()\r\n real = np.bincount(rb[r] + cb[c] - 1, minlength=k + 1)\r\n print(tot - real[k - 1] + real[k])\r\n\r\n @staticmethod\r\n def d():\r\n n, *hs = map(int, sys.stdin.read().split())\r\n h, s = np.array(hs).reshape(n, 2).T\r\n\r\n t = np.arange(n)\r\n\r\n def is_ok(x):\r\n return np.all(np.sort((x - h) // s) >= t)\r\n\r\n def binary_search():\r\n lo, hi = 0, 10**14\r\n while lo + 1 < hi:\r\n x = (lo + hi) // 2\r\n if is_ok(x):\r\n hi = x\r\n else:\r\n lo = x\r\n return hi\r\n\r\n print(binary_search())\r\n\r\n class ABC024:\r\n @staticmethod\r\n def a():\r\n a, b, c, k, s, t = map(int, sys.stdin.read().split())\r\n print(a * s + b * t - c * (s + t) * (s + t >= k))\r\n\r\n @staticmethod\r\n def b():\r\n n, t, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n print(np.minimum(a[1:] - a[:-1], t).sum() + t)\r\n\r\n @staticmethod\r\n def c():\r\n n, d, k, *lrst = map(int, sys.stdin.read().split())\r\n lrst = np.array(lrst)\r\n lr = lrst[: 2 * d].reshape(d, 2)\r\n s, t = lrst[2 * d :].reshape(k, 2).T\r\n day = np.zeros((k,), dtype=np.int32)\r\n for i in range(d):\r\n l, r = lr[i]\r\n move = (l <= s) & (s <= r) & (s != t)\r\n reach = move & (l <= t) & (t <= r)\r\n s[move & (s < t)] = r\r\n s[move & (s > t)] = l\r\n s[reach] = t[reach]\r\n day[reach] = i + 1\r\n print(*day, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n a, b, c = map(int, sys.stdin.read().split())\r\n p = MOD\r\n denom = pow(a * b % p - b * c % p + c * a % p, p - 2, p)\r\n w = (b * c - a * b) % p * denom % p\r\n h = (b * c - a * c) % p * denom % p\r\n print(h, w)\r\n\r\n class ABC025:\r\n @staticmethod\r\n def a():\r\n s, n = sys.stdin.read().split()\r\n n = int(n)\r\n i, j = divmod(n - 1, 5)\r\n print(s[i] + s[j])\r\n\r\n @staticmethod\r\n def b():\r\n n, a, b = map(int, sys.stdin.readline().split())\r\n res = defaultdict(int)\r\n for _ in range(n):\r\n s, d = sys.stdin.readline().split()\r\n d = int(d)\r\n res[s] += min(max(d, a), b)\r\n res = res[\"East\"] - res[\"West\"]\r\n if res == 0:\r\n ans = 0\r\n elif res > 0:\r\n ans = f\"East {res}\"\r\n else:\r\n ans = f\"West {-res}\"\r\n print(ans)\r\n\r\n @staticmethod\r\n def c():\r\n b = [0] * 6\r\n for i in range(2):\r\n (*row,) = map(int, sys.stdin.readline().split())\r\n for j in range(3):\r\n b[i * 3 + j] = row[j]\r\n c = [0] * 8\r\n for i in range(3):\r\n (*row,) = map(int, sys.stdin.readline().split())\r\n for j in range(2):\r\n c[i * 3 + j] = row[j]\r\n tot = sum(b) + sum(c)\r\n\r\n @lru_cache(maxsize=None)\r\n def f(s=tuple(0 for _ in range(9))):\r\n if all(s):\r\n res = 0\r\n for i in range(6):\r\n res += (s[i] == s[i + 3]) * b[i]\r\n for i in range(8):\r\n res += (s[i] == s[i + 1]) * c[i]\r\n return res\r\n cand = [i for i in range(9) if not s[i]]\r\n flg = len(cand) & 1\r\n s = list(s)\r\n res = []\r\n for i in cand:\r\n s[i] = (flg ^ 1) + 1\r\n res.append(f(tuple(s)))\r\n s[i] = 0\r\n return sorted(res, reverse=flg)[0]\r\n\r\n a = f()\r\n b = tot - a\r\n print(a)\r\n print(b)\r\n\r\n class ABC026:\r\n @staticmethod\r\n def a():\r\n a = int(sys.stdin.readline().rstrip())\r\n print(a // 2 * (a - a // 2))\r\n\r\n @staticmethod\r\n def b():\r\n n, *r = map(int, sys.stdin.read().split())\r\n s = np.pi * np.array([0] + r) ** 2\r\n s.sort()\r\n res = s[n::-2].sum() - s[n - 1 :: -2].sum()\r\n print(res)\r\n\r\n @staticmethod\r\n def c():\r\n n, *b = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph()\r\n for i in range(1, n):\r\n g.add_edge(b[i - 1] - 1, i, weight=1)\r\n\r\n def f(u=0):\r\n if not g.edges[u]:\r\n return 1\r\n s = [f(v) for v in g.edges[u]]\r\n return max(s) + min(s) + 1\r\n\r\n print(f())\r\n\r\n @staticmethod\r\n def d():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n\r\n def f(t):\r\n return a * t + b * np.sin(c * t * np.pi) - 100\r\n\r\n print(optimize.brenth(f, 0, 200))\r\n\r\n class ABC027:\r\n @staticmethod\r\n def a():\r\n l = [int(l) for l in sys.stdin.readline().split()]\r\n l.sort()\r\n print(l[2] if l[0] == l[1] else l[0])\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n m, r = divmod(sum(a), n)\r\n if r:\r\n print(-1)\r\n return\r\n population = 0\r\n towns = 0\r\n cnt = 0\r\n for x in a:\r\n population += x\r\n towns += 1\r\n if population / towns != m:\r\n cnt += 1\r\n continue\r\n population, towns = 0, 0\r\n print(cnt)\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n flg = n.bit_length() & 1 ^ 1\r\n t = 0\r\n x = 1\r\n while x <= n:\r\n t += 1\r\n x = 2 * x + 1 if t & 1 ^ flg else 2 * x\r\n print(\"Aoki\" if t & 1 else \"Takahashi\")\r\n\r\n class ABC028:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(\r\n \"Bad\"\r\n if n < 60\r\n else \"Good\"\r\n if n < 90\r\n else \"Great\"\r\n if n < 100\r\n else \"Perfect\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n cnt = Counter(s)\r\n print(*[cnt.get(c, 0) for c in \"ABCDEF\"])\r\n\r\n @staticmethod\r\n def c():\r\n a, b, c, d, e = map(int, sys.stdin.readline().split())\r\n print(max(b + c + e, a + d + e))\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.readline().split())\r\n c = 3 * 2 * (n - k) * (k - 1) + 3 * (n - 1) + 1\r\n print(c / n**3)\r\n\r\n class ABC029:\r\n @staticmethod\r\n def a():\r\n print(sys.stdin.readline().rstrip() + \"s\")\r\n\r\n @staticmethod\r\n def b():\r\n print(sum(\"r\" in s for s in sys.stdin.read().split()))\r\n\r\n @staticmethod\r\n def c():\r\n print(\r\n *[\r\n \"\".join(s)\r\n for s in itertools.product(\r\n \"abc\", repeat=int(sys.stdin.readline().rstrip())\r\n )\r\n ],\r\n sep=\"\\n\",\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(\r\n sum(\r\n n // 10 ** (i + 1) * 10**i\r\n + min(max((n % 10 ** (i + 1) - 10**i + 1), 0), 10**i)\r\n for i in range(9)\r\n )\r\n )\r\n\r\n class ABC030:\r\n @staticmethod\r\n def a():\r\n a, b, c, d = map(int, sys.stdin.readline().split())\r\n e, f = b * c, d * a\r\n print(\"TAKAHASHI\" if e > f else \"AOKI\" if f > e else \"DRAW\")\r\n\r\n @staticmethod\r\n def b():\r\n n, m = map(int, sys.stdin.readline().split())\r\n n = (n % 12 + m / 60) * 30\r\n m *= 6\r\n d = abs(n - m)\r\n print(min(d, 360 - d))\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n x, y = map(int, sys.stdin.readline().split())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n\r\n t = 0\r\n p = 1\r\n cnt = 0\r\n while True:\r\n if p:\r\n i = bi_l(a, t)\r\n if i == n:\r\n break\r\n t = a[i] + x\r\n else:\r\n i = bi_l(b, t)\r\n if i == m:\r\n break\r\n t = b[i] + y\r\n cnt += 1\r\n p ^= 1\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, a = map(int, sys.stdin.readline().split())\r\n a -= 1\r\n k = sys.stdin.readline().rstrip()\r\n b = [int(x) - 1 for x in sys.stdin.readline().split()]\r\n\r\n c = [None] * n\r\n for i in range(n + 1):\r\n if str(i) == k:\r\n print(a + 1)\r\n return\r\n if c[a] is not None:\r\n l, d = i - c[a], c[a]\r\n break\r\n c[a] = i\r\n a = b[a]\r\n\r\n r = [None] * len(k)\r\n r[0] = 1\r\n for i in range(len(k) - 1):\r\n r[i + 1] = r[i] * 10 % l\r\n k = [int(c) for c in k][::-1]\r\n d = (sum(r[i] * k[i] for i in range(len(k))) - d) % l\r\n for _ in range(d):\r\n a = b[a]\r\n print(a + 1)\r\n\r\n @staticmethod\r\n def d_2():\r\n n, a, k, *b = map(int, sys.stdin.read().split())\r\n a -= 1\r\n b = [x - 1 for x in b]\r\n c = [None] * n\r\n for i in range(n + 1):\r\n if i == k:\r\n print(a + 1)\r\n return\r\n if c[a] is not None:\r\n for _ in range((k - c[a]) % (i - c[a])):\r\n a = b[a]\r\n print(a + 1)\r\n return\r\n c[a] = i\r\n a = b[a]\r\n\r\n class ABC031:\r\n @staticmethod\r\n def a():\r\n a, d = map(int, sys.stdin.readline().split())\r\n if a > d:\r\n a, d = d, a\r\n print((a + 1) * d)\r\n\r\n @staticmethod\r\n def b():\r\n l, h, n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n res = np.maximum(l - a, 0)\r\n res[a > h] = -1\r\n print(*res, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n np.cumsum(a[::2], out=a[::2])\r\n np.cumsum(a[1::2], out=a[1::2])\r\n a = list(a) + [0] * 2\r\n\r\n def score(i, j):\r\n if i > j:\r\n i, j = j, i\r\n if (j - i) & 1:\r\n x, y = a[j - 1] - a[i - 2], a[j] - a[i - 1]\r\n else:\r\n x, y = a[j] - a[i - 2], a[j - 1] - a[i - 1]\r\n return x, y\r\n\r\n res = -inf\r\n for i in range(n):\r\n s = -inf\r\n for j in range(n):\r\n if i == j:\r\n continue\r\n x, y = score(i, j)\r\n if y > s:\r\n s, t = y, x\r\n res = max(res, t)\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n k, m = map(int, sys.stdin.readline().split())\r\n (*vw,) = zip(*[iter(sys.stdin.read().split())] * 2)\r\n for l in itertools.product((1, 2, 3), repeat=k):\r\n s = dict()\r\n for v, w in vw:\r\n i = 0\r\n for d in v:\r\n d = int(d) - 1\r\n j = i + l[d]\r\n if j > len(w):\r\n break\r\n t = w[i:j]\r\n if d in s and s[d] != t:\r\n break\r\n s[d] = t\r\n i = j\r\n else:\r\n if i == len(w):\r\n continue\r\n break\r\n else:\r\n for i in range(k):\r\n print(s[i])\r\n return\r\n\r\n class ABC032:\r\n @staticmethod\r\n def a():\r\n a, b, n = map(int, sys.stdin.read().split())\r\n l = NumberTheory.lcm(a, b)\r\n print((n + l - 1) // l * l)\r\n\r\n @staticmethod\r\n def b():\r\n s, k = sys.stdin.read().split()\r\n k = int(k)\r\n res = set()\r\n for i in range(len(s) - k + 1):\r\n res.add(s[i : i + k])\r\n print(len(res))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *s = map(int, sys.stdin.read().split())\r\n if 0 in s:\r\n print(n)\r\n return\r\n if k == 0:\r\n print(0)\r\n return\r\n res, tmp, l = 0, 1, 0\r\n for r in range(n):\r\n tmp *= s[r]\r\n while tmp > k:\r\n tmp //= s[l]\r\n l += 1\r\n res = max(res, r - l + 1)\r\n\r\n print(res)\r\n\r\n class ABC033:\r\n @staticmethod\r\n def a():\r\n print(\r\n \"SAME\"\r\n if len(set(sys.stdin.readline().rstrip())) == 1\r\n else \"DIFFERENT\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = dict()\r\n for _ in range(n):\r\n s, p = sys.stdin.readline().split()\r\n res[s] = int(p)\r\n tot = sum(res.values())\r\n for s, p in res.items():\r\n if p > tot / 2:\r\n print(s)\r\n return\r\n print(\"atcoder\")\r\n\r\n @staticmethod\r\n def c():\r\n s = sys.stdin.readline().rstrip()\r\n print(sum(not \"0\" in f for f in s.split(\"+\")))\r\n\r\n class ABC034:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Better\" if y > x else \"Worse\")\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n + 1 if n & 1 else n - 1)\r\n\r\n @staticmethod\r\n def c():\r\n h, w = map(int, sys.stdin.read().split())\r\n choose = Combinatorics.CombinationsMod()\r\n print(choose(h + w - 2, h - 1))\r\n\r\n @staticmethod\r\n def d():\r\n n, k, *wp = map(int, sys.stdin.read().split())\r\n w, p = np.array(wp).reshape(-1, 2).T\r\n\r\n def f(x):\r\n return np.sort(w * (p - x))[-k:].sum()\r\n\r\n print(optimize.bisect(f, 0, 100))\r\n\r\n class ABC035:\r\n @staticmethod\r\n def a():\r\n w, h = map(int, sys.stdin.readline().split())\r\n print(\"4:3\" if 4 * h == 3 * w else \"16:9\")\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n y = x = z = 0\r\n for c in s:\r\n if c == \"?\":\r\n z += 1\r\n elif c == \"L\":\r\n x -= 1\r\n elif c == \"R\":\r\n x += 1\r\n elif c == \"D\":\r\n y -= 1\r\n elif c == \"U\":\r\n y += 1\r\n d = abs(y) + abs(x)\r\n print(d + z if t == \"1\" else max(d - z, (d - z) & 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, q, *lr = map(int, sys.stdin.read().split())\r\n l, r = np.array(lr).reshape(q, 2).T\r\n res = np.zeros(n + 1, dtype=int)\r\n np.add.at(res, l - 1, 1)\r\n np.subtract.at(res, r, 1)\r\n np.cumsum(res, out=res)\r\n res = res & 1\r\n print(\"\".join(map(str, res[:-1])))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, t = map(int, sys.stdin.readline().split())\r\n point = np.array(sys.stdin.readline().split(), dtype=int)\r\n a, b, c = (\r\n np.array(sys.stdin.read().split(), dtype=np.int64)\r\n .reshape(m, 3)\r\n .T\r\n )\r\n a -= 1\r\n b -= 1\r\n d_1 = shortest_path(\r\n csr_matrix((c, (a, b)), (n, n)),\r\n method=\"D\",\r\n directed=True,\r\n indices=0,\r\n )\r\n d_2 = shortest_path(\r\n csr_matrix((c, (b, a)), (n, n)),\r\n method=\"D\",\r\n directed=True,\r\n indices=0,\r\n )\r\n print(int(np.amax((t - (d_1 + d_2)) * point)))\r\n\r\n class ABC036:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print((b + a - 1) // a)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n n = int(n)\r\n for j in range(n):\r\n row = \"\"\r\n for i in range(n - 1, -1, -1):\r\n row += s[i][j]\r\n print(row)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n b = [None] * n\r\n prev = None\r\n j = -1\r\n for i, x in sorted(enumerate(a), key=lambda x: x[1]):\r\n if x != prev:\r\n j += 1\r\n b[i] = j\r\n prev = x\r\n print(*b, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n edges = [[] for _ in range(n)]\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n edges[a].append(b)\r\n edges[b].append(a)\r\n parent = [None] * n\r\n\r\n def count(u):\r\n black, white = 1, 1\r\n for v in edges[u]:\r\n if v == parent[u]:\r\n continue\r\n parent[v] = u\r\n b, w = count(v)\r\n black *= w\r\n black %= MOD\r\n white *= (b + w) % MOD\r\n white %= MOD\r\n return black, white\r\n\r\n print(sum(count(0)) % MOD)\r\n\r\n class ABC037:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(c // min(a, b))\r\n\r\n @staticmethod\r\n def b():\r\n n, q, *lrt = map(int, sys.stdin.read().split())\r\n a = np.zeros(n, dtype=int)\r\n for l, r, t in zip(*[iter(lrt)] * 3):\r\n a[l - 1 : r] = t\r\n print(*a, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = np.array([0] + a)\r\n np.cumsum(a, out=a)\r\n s = (a[k:] - a[:-k]).sum()\r\n print(s)\r\n\r\n @staticmethod\r\n def d():\r\n h, w, *a = map(int, sys.stdin.read().split())\r\n p = [None] * (h * w)\r\n\r\n def paths(k):\r\n if p[k]:\r\n return p[k]\r\n p[k] = 1\r\n i, j = divmod(k, w)\r\n if j > 0 and a[k] > a[k - 1]:\r\n p[k] += paths(k - 1)\r\n if j < w - 1 and a[k] > a[k + 1]:\r\n p[k] += paths(k + 1)\r\n if i > 0 and a[k] > a[k - w]:\r\n p[k] += paths(k - w)\r\n if i < h - 1 and a[k] > a[k + w]:\r\n p[k] += paths(k + w)\r\n p[k] %= MOD\r\n return p[k]\r\n\r\n print(sum(paths(i) for i in range(h * w)) % MOD)\r\n\r\n class ABC038:\r\n @staticmethod\r\n def a():\r\n s = sys.stdin.readline().rstrip()\r\n print(\"YES\" if s[-1] == \"T\" else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c, d = map(int, sys.stdin.read().split())\r\n print(\"YES\" if a == c or b == c or a == d or b == d else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n cnt = n\r\n tmp = 1\r\n for i in range(n):\r\n if a[i + 1] > a[i]:\r\n tmp += 1\r\n else:\r\n cnt += tmp * (tmp - 1) // 2\r\n tmp = 1\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, *wh = map(int, sys.stdin.read().split())\r\n a = [\r\n x[1]\r\n for x in sorted(\r\n zip(*[iter(wh)] * 2), key=lambda x: (x[0], -x[1])\r\n )\r\n ]\r\n print(bi_l(DP.LIS(a), inf))\r\n\r\n class ABC039:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print((a * b + b * c + c * a) * 2)\r\n\r\n @staticmethod\r\n def b():\r\n x = int(sys.stdin.readline().rstrip())\r\n for n in range(1, int(x**0.5) + 1):\r\n if pow(n, 4) == x:\r\n print(n)\r\n return\r\n\r\n @staticmethod\r\n def c():\r\n board = \"WBWBWWBWBWBW\" * 3\r\n convert = \"Do, *, Re, *, Mi, Fa, *, So, *, La, *, Si\".split(\", \")\r\n s = sys.stdin.readline().rstrip()\r\n print(convert[board.index(s)])\r\n\r\n @staticmethod\r\n def d():\r\n h, w = map(int, sys.stdin.readline().split())\r\n s = \"\".join(sys.stdin.read().split())\r\n white = set()\r\n for i in range(h * w):\r\n if s[i] == \"#\":\r\n continue\r\n l = 0 if i % w == 0 else -1\r\n r = 0 if (i + 1) % w == 0 else 1\r\n white |= {\r\n i + dy + dx\r\n for dy in range(-w, w + 1, w)\r\n for dx in range(l, r + 1)\r\n }\r\n black_before = set(range(h * w)) - white\r\n black_after = set()\r\n for i in black_before:\r\n l = 0 if i % w == 0 else -1\r\n r = 0 if (i + 1) % w == 0 else 1\r\n black_after |= {\r\n i + dy + dx\r\n for dy in range(-w, w + 1, w)\r\n for dx in range(l, r + 1)\r\n }\r\n black_after &= set(range(h * w))\r\n for i in range(h * w):\r\n if s[i] == \"#\" and not i in black_after:\r\n print(\"impossible\")\r\n return\r\n print(\"possible\")\r\n for i in range(h):\r\n print(\r\n \"\".join(\r\n [\r\n \"#\" if i * w + j in black_before else \".\"\r\n for j in range(w)\r\n ]\r\n )\r\n )\r\n\r\n class ABC040:\r\n @staticmethod\r\n def a():\r\n n, x = map(int, sys.stdin.readline().split())\r\n print(min(x - 1, n - x))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = inf\r\n for i in range(1, int(n**0.5) + 1):\r\n res = min(res, n // i - i + n % i)\r\n print(res)\r\n\r\n @staticmethod\r\n def c():\r\n n, *h = map(int, sys.stdin.read().split())\r\n h = [h[0]] + h\r\n cost = [None] * (n + 1)\r\n cost[0] = cost[1] = 0\r\n for i in range(2, n + 1):\r\n cost[i] = min(\r\n cost[i - 2] + abs(h[i] - h[i - 2]),\r\n cost[i - 1] + abs(h[i] - h[i - 1]),\r\n )\r\n print(cost[n])\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n uf = GeometryTopology.Graph(n)\r\n uf.init_dsu()\r\n queue = []\r\n for _ in range(m):\r\n a, b, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2 * y), a - 1, b - 1))\r\n q = int(sys.stdin.readline().rstrip())\r\n for i in range(q):\r\n v, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2 * y + 1), v - 1, i))\r\n res = [None] * q\r\n while queue:\r\n y, i, j = heappop(queue)\r\n if y & 1:\r\n res[j] = uf.size[uf.find(i)]\r\n else:\r\n uf.unite(i, j)\r\n print(*res, sep=\"\\n\")\r\n\r\n class ABC041:\r\n @staticmethod\r\n def a():\r\n s, i = sys.stdin.read().split()\r\n i = int(i)\r\n print(s[i - 1])\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n ans = a * b % MOD * c % MOD\r\n print(ans)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n for i, h in sorted(enumerate(a), key=lambda x: -x[1]):\r\n print(i + 1)\r\n\r\n @staticmethod\r\n def d():\r\n n, _, *xy = map(int, sys.stdin.read().split())\r\n g = [0] * n\r\n for x, y in zip(*[iter(xy)] * 2):\r\n g[x - 1] |= 1 << (y - 1)\r\n res = [0] * (1 << n)\r\n res[0] = 1\r\n for i in range(1 << n):\r\n for j in range(n):\r\n if i >> j & 1 ^ 1:\r\n continue\r\n if not (g[j] & i):\r\n res[i] += res[i & ~(1 << j)]\r\n print(res[-1])\r\n\r\n class ABC042:\r\n @staticmethod\r\n def a():\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n c = Counter(a)\r\n print(\"YES\" if c[5] == 2 and c[7] == 1 else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n n, l, *s = sys.stdin.read().split()\r\n print(\"\".join(sorted(s)))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *d = sys.stdin.read().split()\r\n l = len(n)\r\n ok = sorted(set(string.digits) - set(d))\r\n cand = [\r\n int(\"\".join(p)) for p in itertools.product(ok, repeat=l)\r\n ] + [int(min(x for x in ok if x > \"0\") + min(ok) * l)]\r\n print(cand[bi_l(cand, int(n))])\r\n\r\n @staticmethod\r\n def d():\r\n h, w, a, b = map(int, sys.stdin.read().split())\r\n combinations = Combinatorics.CombinationsMod(\r\n n=2 * 10**5, mod=MOD\r\n )\r\n i = np.arange(h - a, h)\r\n ng = np.sum(\r\n combinations(i + b - 1, i)\r\n * combinations(h - i + w - b - 2, h - 1 - i)\r\n % MOD\r\n )\r\n print((combinations(h + w - 2, h - 1) - ng) % MOD)\r\n\r\n class ABC043:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((1 + n) * n // 2)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n t = \"\"\r\n for c in s:\r\n if c == \"B\":\r\n t = t[:-1]\r\n else:\r\n t += c\r\n print(t)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n x = np.around(a.sum() / n).astype(int)\r\n print(np.sum((a - x) ** 2))\r\n\r\n @staticmethod\r\n def d():\r\n s = sys.stdin.readline().rstrip()\r\n n = len(s)\r\n for i in range(n - 1):\r\n if s[i] == s[i + 1]:\r\n print(i + 1, i + 2)\r\n return\r\n for i in range(n - 2):\r\n if s[i] == s[i + 2]:\r\n print(i + 1, i + 3)\r\n return\r\n print(-1, -1)\r\n\r\n class ABC044:\r\n @staticmethod\r\n def a():\r\n n, k, x, y = map(int, sys.stdin.read().split())\r\n print(min(n, k) * x + max(0, n - k) * y)\r\n\r\n @staticmethod\r\n def b():\r\n res = set(\r\n c & 1 for c in Counter(sys.stdin.readline().rstrip()).values()\r\n )\r\n print(\"Yes\" if len(res) == 1 and res.pop() == 0 else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n, a, *x = map(int, sys.stdin.read().split())\r\n dp = np.zeros((n + 1, 2501), dtype=np.int64)\r\n dp[0, 0] = 1\r\n for v in x:\r\n dp[1:, v:] += dp[:-1, :-v]\r\n i = np.arange(1, n + 1)\r\n print(dp[i, i * a].sum())\r\n\r\n @staticmethod\r\n def c_2():\r\n n, a, *x = map(int, sys.stdin.read().split())\r\n for i in range(n):\r\n x[i] -= a\r\n\r\n s = defaultdict(int)\r\n s[0] = 1\r\n for i in range(n):\r\n ns = s.copy()\r\n for k, v in s.items():\r\n ns[k + x[i]] += v\r\n s = ns\r\n print(s[0] - 1)\r\n\r\n @staticmethod\r\n def d():\r\n pass\r\n\r\n class ABC045:\r\n @staticmethod\r\n def a():\r\n a, b, h = map(int, sys.stdin.read().split())\r\n print((a + b) * h // 2)\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = sys.stdin.read().split()\r\n d = {\"a\": a[::-1], \"b\": b[::-1], \"c\": c[::-1]}\r\n nx = \"a\"\r\n while 1:\r\n if not d[nx]:\r\n print(nx.upper())\r\n return\r\n d[nx], nx = d[nx][:-1], d[nx][-1]\r\n\r\n @staticmethod\r\n def c():\r\n def c(l):\r\n return pow(2, max(0, l - 1))\r\n\r\n s = sys.stdin.readline().rstrip()\r\n n = len(s)\r\n print(\r\n sum(\r\n int(s[i : j + 1]) * c(i) * c(n - 1 - j)\r\n for i in range(n)\r\n for j in range(i, n)\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n h, w, n, *ab = map(int, sys.stdin.read().split())\r\n c = defaultdict(int)\r\n for y, x in zip(*[iter(ab)] * 2):\r\n y -= 1\r\n x -= 1\r\n for dy, dx in itertools.product(range(-1, 2), repeat=2):\r\n i, j = y + dy, x + dx\r\n if not (0 < i < h - 1 and 0 < j < w - 1):\r\n continue\r\n c[(i, j)] += 1\r\n c = Counter(c.values())\r\n c[0] = (h - 2) * (w - 2) - sum(c.values())\r\n for i in range(10):\r\n print(c[i])\r\n\r\n class ABC046:\r\n @staticmethod\r\n def a():\r\n print(len(set(sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n n, k = map(int, sys.stdin.readline().split())\r\n print(k * pow(k - 1, n - 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n a, b = 1, 1\r\n for x, y in zip(*[iter(xy)] * 2):\r\n n = max((a + x - 1) // x, (b + y - 1) // y)\r\n a, b = n * x, n * y\r\n print(a + b)\r\n\r\n @staticmethod\r\n def d():\r\n c = Counter(sys.stdin.readline().rstrip())\r\n print((c[\"g\"] - c[\"p\"]) // 2)\r\n\r\n class ABC047:\r\n @staticmethod\r\n def a():\r\n c = sorted(map(int, sys.stdin.readline().split()))\r\n print(\"Yes\" if c[0] + c[1] == c[2] else \"No\")\r\n\r\n @staticmethod\r\n def b():\r\n w, h, n, *xyf = map(int, sys.stdin.read().split())\r\n l, r, d, u = 0, w, 0, h\r\n for x, y, f in zip(*[iter(xyf)] * 3):\r\n if f == 1:\r\n l = max(l, x)\r\n if f == 2:\r\n r = min(r, x)\r\n if f == 3:\r\n d = max(d, y)\r\n if f == 4:\r\n u = min(u, y)\r\n print(max(0, r - l) * max(0, u - d))\r\n\r\n @staticmethod\r\n def c():\r\n s = sys.stdin.readline().rstrip()\r\n print(sum(s[i] != s[i + 1] for i in range(len(s) - 1)))\r\n\r\n @staticmethod\r\n def d():\r\n mn, mx, c = inf, -1, 0\r\n n, t, *a = map(int, sys.stdin.read().split())\r\n for p in a:\r\n if p - mn == mx:\r\n c += 1\r\n elif p - mn > mx:\r\n mx, c = p - mn, 1\r\n mn = min(mn, p)\r\n print(c)\r\n\r\n class ABC048:\r\n @staticmethod\r\n def a():\r\n def initial(s):\r\n return s[0].upper()\r\n\r\n print(\"\".join(map(initial, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n a, b, x = map(int, sys.stdin.readline().split())\r\n print(\r\n b // x - (a - 1) // x\r\n ) # if a=0, (a-1)/x is rounded down to -1.\r\n\r\n @staticmethod\r\n def c():\r\n n, x, *a = map(int, sys.stdin.read().split())\r\n cnt = prev = 0\r\n for i in range(n):\r\n d = prev + a[i] - x\r\n prev = a[i]\r\n if d <= 0:\r\n continue\r\n cnt += d\r\n prev -= d\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n s = sys.stdin.readline().rstrip()\r\n print(\"First\" if len(s) & 1 ^ (s[0] == s[-1]) else \"Second\")\r\n\r\n class ABC049:\r\n @staticmethod\r\n def a():\r\n vowels = set(\"aeiou\")\r\n print(\r\n \"vowel\"\r\n if sys.stdin.readline().rstrip() in vowels\r\n else \"consonant\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n h, w, *s = sys.stdin.read().split()\r\n for l in s:\r\n for _ in range(2):\r\n print(l)\r\n\r\n @staticmethod\r\n def c():\r\n t = set(\"dream, dreamer, erase, eraser\".split(\", \"))\r\n\r\n def obtainable(s):\r\n while True:\r\n for i in range(5, 8):\r\n if s[-i:] in t:\r\n s = s[:-i]\r\n if not s:\r\n return True\r\n break\r\n else:\r\n return False\r\n\r\n s = sys.stdin.readline().rstrip()\r\n print(\"YES\" if obtainable(s) else \"NO\")\r\n\r\n @staticmethod\r\n def d():\r\n n, k, l = map(int, sys.stdin.readline().split())\r\n uf1 = GeometryTopology.Graph(n)\r\n uf1.init_dsu()\r\n uf2 = GeometryTopology.Graph(n)\r\n uf2.init_dsu()\r\n\r\n def add_edges(uf, m):\r\n for _ in range(m):\r\n x, y = map(int, sys.stdin.readline().split())\r\n x -= 1\r\n y -= 1\r\n uf.unite(x, y)\r\n\r\n add_edges(uf1, k)\r\n add_edges(uf2, l)\r\n\r\n g = defaultdict(list)\r\n for i in range(n):\r\n g[(uf1.find(i), uf2.find(i))].append(i)\r\n\r\n res = [None] * n\r\n for a in g:\r\n for i in g[a]:\r\n res[i] = len(g[a])\r\n\r\n print(*res, sep=\" \")\r\n\r\n class ABC050:\r\n @staticmethod\r\n def a():\r\n print(eval(sys.stdin.readline().rstrip()))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n t = np.array(sys.stdin.readline().split(), dtype=np.int64)\r\n m, *px = map(int, sys.stdin.read().split())\r\n p, x = np.array(px).reshape(m, 2).T\r\n p -= 1\r\n print(*(t.sum() + x - t[p]), sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = Counter(a)\r\n if n & 1 and not (\r\n a[0] == 1 and all(a[i] == 2 for i in range(2, n, 2))\r\n ):\r\n print(0)\r\n return\r\n if ~n & 1 and any(a[i] != 2 for i in range(1, n, 2)):\r\n print(0)\r\n return\r\n print(pow(2, n // 2, MOD))\r\n\r\n @staticmethod\r\n def d():\r\n pass\r\n\r\n class ABC051:\r\n @staticmethod\r\n def a():\r\n print(\" \".join(sys.stdin.readline().rstrip().split(\",\")))\r\n\r\n @staticmethod\r\n def b():\r\n k, s = map(int, sys.stdin.readline().split())\r\n tot = 0\r\n for x in range(k + 1):\r\n if s - x < 0:\r\n break\r\n if s - x > 2 * k:\r\n continue\r\n tot += s - x + 1 if s - x <= k else 2 * k - (s - x) + 1\r\n print(tot)\r\n\r\n @staticmethod\r\n def c():\r\n x1, y1, x2, y2 = map(int, sys.stdin.readline().split())\r\n dx, dy = x2 - x1, y2 - y1\r\n print(\r\n \"U\" * dy\r\n + \"R\" * (dx + 1)\r\n + \"D\" * (dy + 1)\r\n + \"L\" * (dx + 1)\r\n + \"U\"\r\n + \"L\"\r\n + \"U\" * (dy + 1)\r\n + \"R\" * (dx + 1)\r\n + \"D\" * (dy + 1)\r\n + \"L\" * dx\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abc = map(int, sys.stdin.read().split())\r\n x = np.arange(n)\r\n a, b, c = np.array(abc).reshape(m, 3).T\r\n a -= 1\r\n b -= 1\r\n d = shortest_path(\r\n csr_matrix((c, (a, b)), shape=(n, n)),\r\n method=\"FW\",\r\n directed=False,\r\n ).astype(np.int64)\r\n print(\r\n m\r\n - np.any(\r\n d[x, a[:, None]] + c[:, None] == d[x, b[:, None]], axis=1\r\n ).sum()\r\n )\r\n\r\n class ABC052:\r\n @staticmethod\r\n def a():\r\n a, b, c, d = map(int, sys.stdin.readline().split())\r\n print(max(a * b, c * d))\r\n\r\n @staticmethod\r\n def b():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n a = [0] * (n + 1)\r\n for i in range(n):\r\n a[i + 1] = a[i] + (1 if s[i] == \"I\" else -1)\r\n print(max(a))\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n pn = NumberTheory.PrimeNumbers(n)\r\n s = 1\r\n for c in pn.factorize_factorial(n).values():\r\n s = s * (c + 1) % MOD\r\n print(s)\r\n\r\n @staticmethod\r\n def d():\r\n n, a, b, *x = map(int, sys.stdin.read().split())\r\n x = np.array(x)\r\n print(np.minimum((x[1:] - x[:-1]) * a, b).sum())\r\n\r\n class ABC053:\r\n @staticmethod\r\n def a():\r\n print(\r\n \"ABC\" if int(sys.stdin.readline().rstrip()) < 1200 else \"ARC\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(len(s) - s.find(\"A\") - s[::-1].find(\"Z\"))\r\n\r\n @staticmethod\r\n def c():\r\n x = int(sys.stdin.readline().rstrip())\r\n q, r = divmod(x, 11)\r\n print(2 * q + (r + 5) // 6)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n print(n - ((n - len(set(a)) + 1) // 2 * 2))\r\n\r\n class ABC054:\r\n @staticmethod\r\n def a():\r\n def f(x):\r\n return (x + 11) % 13\r\n\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(\"Alice\" if f(a) > f(b) else \"Bob\" if f(a) < f(b) else \"Draw\")\r\n\r\n @staticmethod\r\n def b():\r\n n, m = map(int, sys.stdin.readline().split())\r\n a = [sys.stdin.readline().rstrip() for _ in range(n)]\r\n b = [sys.stdin.readline().rstrip() for _ in range(m)]\r\n\r\n for i in range(n - m + 1):\r\n for j in range(n - m + 1):\r\n for y in range(m):\r\n for x in range(m):\r\n if a[i + y][j + x] == b[y][x]:\r\n continue\r\n break\r\n else:\r\n continue\r\n break\r\n else:\r\n print(\"Yes\")\r\n return\r\n print(\"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *ab = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n g.add_edge(a, b)\r\n g.add_edge(b, a)\r\n\r\n cnt = 0\r\n stack = [(0, 1)]\r\n while stack:\r\n u, s = stack.pop()\r\n if s == (1 << n) - 1:\r\n cnt += 1\r\n continue\r\n for v in g.edges[u]:\r\n if s >> v & 1:\r\n continue\r\n stack.append((v, s | 1 << v))\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, ma, mb, *abc = map(int, sys.stdin.read().split())\r\n dp = np.full((401, 401), np.inf)\r\n dp[0, 0] = 0\r\n for a, b, c in zip(*[iter(abc)] * 3):\r\n np.minimum(dp[a:, b:], dp[:-a, :-b] + c, out=dp[a:, b:])\r\n i = np.arange(1, 400 // max(ma, mb) + 1)\r\n res = dp[i * ma, i * mb].min()\r\n print(int(res) if res != np.inf else -1)\r\n\r\n class ABC055:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(800 * n - 200 * (n // 15))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n fac, _ = Algebra.generate_fac_ifac(n, MOD)\r\n print(fac[-1])\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n print(m // 2 if m <= 2 * n else n + (m - 2 * n) // 4)\r\n\r\n @staticmethod\r\n def d():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n s = [1 if c == \"o\" else 0 for c in s]\r\n\r\n def possible(t):\r\n for i in range(1, n - 1):\r\n t[i + 1] = t[i - 1] ^ t[i] ^ s[i]\r\n return (\r\n (t[0] ^ s[0] ^ t[1] ^ t[-1])\r\n | (t[-1] ^ s[-1] ^ t[-2] ^ t[0])\r\n ) ^ 1\r\n\r\n for fst in [(1, 0), (0, 1), (1, 1), (0, 0)]:\r\n t = [None] * n\r\n t[0], t[1] = fst[0], fst[1]\r\n if possible(t):\r\n print(\"\".join(\"S\" if x == 1 else \"W\" for x in t))\r\n return\r\n print(-1)\r\n\r\n class ABC056:\r\n @staticmethod\r\n def a():\r\n def to_i(c):\r\n return 1 if c == \"H\" else 0\r\n\r\n a, b = map(to_i, sys.stdin.readline().split())\r\n print(\"D\" if a ^ b else \"H\")\r\n\r\n @staticmethod\r\n def b():\r\n w, a, b = map(int, sys.stdin.readline().split())\r\n if a > b:\r\n a, b = b, a\r\n print(max(b - (a + w), 0))\r\n\r\n @staticmethod\r\n def c():\r\n x = int(sys.stdin.readline().rstrip())\r\n print(int(math.ceil(math.sqrt(2 * x + 1 / 4) - 0.5)))\r\n\r\n @staticmethod\r\n def d():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = sorted(min(x, k) for x in a)\r\n\r\n def necessary(i):\r\n dp = np.zeros(k, dtype=np.bool)\r\n dp[0] = True\r\n for j in range(n):\r\n if j == i:\r\n continue\r\n dp[a[j] :] += dp[: -a[j]]\r\n return np.any(dp[k - a[i] :])\r\n\r\n def binary_search():\r\n lo, hi = -1, n\r\n while hi - lo > 1:\r\n i = (lo + hi) // 2\r\n if necessary(i):\r\n hi = i\r\n else:\r\n lo = i\r\n return hi\r\n\r\n print(binary_search())\r\n\r\n class ABC057:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print((a + b) % 24)\r\n\r\n @staticmethod\r\n def b():\r\n n, m, *I = map(int, sys.stdin.read().split())\r\n I = np.array(I).reshape(-1, 2)\r\n ab, cd = I[:n], I[n:]\r\n print(\r\n *(\r\n np.argmin(\r\n np.absolute(ab[:, None] - cd).sum(axis=-1), axis=-1\r\n )\r\n + 1\r\n ),\r\n sep=\"\\n\",\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n divs = NumberTheory.find_divisors(n)\r\n print(len(str(divs[bi_l(divs, math.sqrt(n))])))\r\n\r\n @staticmethod\r\n def d():\r\n c = Combinatorics.choose\r\n n, a, b, *v = map(int, sys.stdin.read().split())\r\n v.sort()\r\n print(sum(v[-a:]) / a)\r\n l, r = bi_l(v, v[-a]), bi_r(v, v[-a])\r\n print(\r\n sum(\r\n c(r - l, i)\r\n for i in range(r - n + a, r - max(l, n - b) + 1)\r\n )\r\n if r == n\r\n else c(r - l, r - n + a)\r\n )\r\n\r\n class ABC058:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(\"YES\" if c - b == b - a else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n a = \"\"\r\n for i in range(len(t)):\r\n a += s[i] + t[i]\r\n if len(s) > len(t):\r\n a += s[-1]\r\n print(a)\r\n\r\n @staticmethod\r\n def c():\r\n n, *s = sys.stdin.read().split()\r\n res = {c: 100 for c in string.ascii_lowercase}\r\n for counter in map(Counter, s):\r\n for (\r\n c,\r\n x,\r\n ) in res.items():\r\n res[c] = min(x, counter[c])\r\n t = \"\"\r\n for c, x in sorted(res.items()):\r\n t += c * x\r\n print(t)\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy[:n]), np.array(xy[n:])\r\n print(\r\n (x * (np.arange(n) + 1) - np.cumsum(x)).sum()\r\n % MOD\r\n * ((y * (np.arange(m) + 1) - np.cumsum(y)).sum() % MOD)\r\n % MOD\r\n )\r\n\r\n class ABC059:\r\n @staticmethod\r\n def a():\r\n def initial(s):\r\n return s[0].upper()\r\n\r\n print(\"\".join(map(initial, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n a, b = sys.stdin.read().split()\r\n la, lb = len(a), len(b)\r\n print(\r\n \"GREATER\"\r\n if la > lb\r\n else \"LESS\"\r\n if la < lb\r\n else \"GREATER\"\r\n if a > b\r\n else \"LESS\"\r\n if a < b\r\n else \"EQUAL\"\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n c = s = 0\r\n for i in range(n):\r\n s += a[i]\r\n if i & 1 and s >= 0:\r\n c += s + 1\r\n s = -1\r\n elif i & 1 ^ 1 and s <= 0:\r\n c += 1 - s\r\n s = 1\r\n c1 = c\r\n c = s = 0\r\n for i in range(n):\r\n s += a[i]\r\n if i & 1 and s <= 0:\r\n c += 1 - s\r\n s = 1\r\n elif i & 1 ^ 1 and s >= 0:\r\n c += s + 1\r\n s = -1\r\n c2 = c\r\n print(min(c1, c2))\r\n\r\n @staticmethod\r\n def d():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Brown\" if abs(x - y) <= 1 else \"Alice\")\r\n\r\n class ABC060:\r\n @staticmethod\r\n def a():\r\n a, b, c = sys.stdin.readline().split()\r\n print(\"YES\" if a[-1] == b[0] and b[-1] == c[0] else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(\"NO\" if c % NumberTheory.gcd(a, b) else \"YES\")\r\n\r\n @staticmethod\r\n def c():\r\n n, t, *a = map(int, sys.stdin.read().split())\r\n print(sum(min(a[i + 1] - a[i], t) for i in range(n - 1)) + t)\r\n\r\n @staticmethod\r\n def d():\r\n pass\r\n\r\n class ABC061:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(\"Yes\" if a <= c <= b else \"No\")\r\n\r\n @staticmethod\r\n def b():\r\n n, m, *ab = map(int, sys.stdin.read().split())\r\n ab = np.array(ab) - 1\r\n g = np.zeros(n, dtype=np.int32)\r\n np.add.at(g, ab, 1)\r\n print(*g, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *ab = map(int, sys.stdin.read().split())\r\n ab = np.transpose(np.array(ab).reshape(n, 2))\r\n a, b = ab[:, np.argsort(ab[0])]\r\n print(a[np.cumsum(b) >= k][0])\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abc = map(int, sys.stdin.read().split())\r\n a, b, c = np.array(abc).reshape(m, 3).T\r\n a -= 1\r\n b -= 1\r\n c *= -1\r\n g = csr_matrix(\r\n ([1] * (m + 1), (np.append(a, n - 1), np.append(b, 0))), (n, n)\r\n )\r\n _, labels = connected_components(g, connection=\"strong\")\r\n bl = (labels[a] == labels[0]) & (labels[b] == labels[0])\r\n g = csr_matrix((c[bl], (a[bl], b[bl])), (n, n))\r\n try:\r\n print(\r\n -shortest_path(g, method=\"BF\", directed=True, indices=0)[\r\n -1\r\n ].astype(int)\r\n )\r\n except:\r\n print(\"inf\")\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m, *abc = map(int, sys.stdin.read().split())\r\n a, b, c = np.array(abc).reshape(m, 3).T\r\n a -= 1\r\n b -= 1\r\n c *= -1\r\n d = np.full(n, np.inf)\r\n d[0] = 0\r\n for _ in range(n - 1):\r\n np.minimum.at(d, b, d[a] + c)\r\n neg_cycle = np.zeros(n, dtype=np.bool)\r\n for _ in range(n):\r\n np.logical_or.at(neg_cycle, b, d[a] + c < d[b])\r\n np.minimum.at(d, b, d[a] + c)\r\n print(inf if neg_cycle[-1] else -d[-1].astype(int))\r\n\r\n class ABC062:\r\n @staticmethod\r\n def a():\r\n g = [0, 2, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0]\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Yes\" if g[x - 1] == g[y - 1] else \"No\")\r\n\r\n @staticmethod\r\n def b():\r\n h, w = map(int, sys.stdin.readline().split())\r\n a = np.array(\r\n [list(s) for s in sys.stdin.read().split()], dtype=\"U1\"\r\n )\r\n a = np.pad(a, pad_width=1, constant_values=\"#\")\r\n for s in a:\r\n print(\"\".join(s))\r\n\r\n @staticmethod\r\n def c():\r\n h, w = map(int, sys.stdin.readline().split())\r\n if h * w % 3 == 0:\r\n print(0)\r\n return\r\n\r\n def minimize(h, w):\r\n return min(\r\n h,\r\n *(\r\n s[-1] - s[0]\r\n for x in range(w // 3, w // 3 + 2)\r\n for s in (\r\n sorted(\r\n [\r\n h * x,\r\n h // 2 * (w - x),\r\n (h + 1) // 2 * (w - x),\r\n ]\r\n ),\r\n )\r\n ),\r\n )\r\n\r\n print(min(minimize(h, w), minimize(w, h)))\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n\r\n def optimize(a):\r\n a = list(a)\r\n l, r = a[:n], a[n:]\r\n heapify(l)\r\n s = [None] * (n + 1)\r\n s[0] = sum(l)\r\n for i in range(n):\r\n x = heappop(l)\r\n heappush(l, max(x, r[i]))\r\n s[i + 1] = s[i] + max(0, r[i] - x)\r\n return np.array(s)\r\n\r\n print(\r\n (\r\n optimize(a[: 2 * n]) + optimize(-a[-1 : n - 1 : -1])[::-1]\r\n ).max()\r\n )\r\n\r\n class ABC063:\r\n @staticmethod\r\n def a():\r\n a = sum(map(int, sys.stdin.readline().split()))\r\n print(\"error\" if a >= 10 else a)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(\"yes\" if len(set(s)) == len(s) else \"no\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n s = a.sum()\r\n if s % 10:\r\n print(s)\r\n elif not np.count_nonzero(a % 10):\r\n print(0)\r\n else:\r\n print(s - a[a % 10 != 0].min())\r\n\r\n @staticmethod\r\n def d():\r\n n, a, b, *h = map(int, sys.stdin.read().split())\r\n h = np.array(h)\r\n d = a - b\r\n\r\n def possible(c):\r\n hh = h.copy()\r\n np.maximum(hh - b * c, 0, out=hh)\r\n return ((hh + d - 1) // d).sum() <= c\r\n\r\n def binary_search():\r\n lo, hi = 0, 10**9\r\n while hi - lo > 1:\r\n c = (lo + hi) // 2\r\n if possible(c):\r\n hi = c\r\n else:\r\n lo = c\r\n return hi\r\n\r\n print(binary_search())\r\n\r\n class ABC064:\r\n @staticmethod\r\n def a():\r\n r, g, b = map(int, sys.stdin.readline().split())\r\n print(\"NO\" if (10 * g + b) % 4 else \"YES\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a.sort()\r\n print(a[-1] - a[0])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.bincount(np.minimum(np.array(a) // 400, 8), minlength=9)\r\n mx = np.count_nonzero(a[:-1]) + a[-1]\r\n mn = max(mx - a[-1], 1)\r\n print(mn, mx)\r\n\r\n @staticmethod\r\n def d():\r\n n, s = sys.stdin.read().split()\r\n l = r = 0\r\n for c in s:\r\n if c == \"(\":\r\n r += 1\r\n else:\r\n if r == 0:\r\n l += 1\r\n else:\r\n r -= 1\r\n print(\"(\" * l + s + \")\" * r)\r\n\r\n class ABC065:\r\n @staticmethod\r\n def a():\r\n x, a, b = map(int, sys.stdin.readline().split())\r\n y = -a + b\r\n print(\"delicious\" if y <= 0 else \"safe\" if y <= x else \"dangerous\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = [int(x) - 1 for x in sys.stdin.read().split()]\r\n i = 0\r\n for c in range(n):\r\n i = a[i]\r\n if i == 1:\r\n print(c + 1)\r\n return\r\n print(-1)\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n d = abs(n - m)\r\n if d >= 2:\r\n print(0)\r\n return\r\n fac, _ = Algebra.generate_fac_ifac(10**5)\r\n print(fac[n] * fac[m] * (1 if d else 2) % MOD)\r\n\r\n @staticmethod\r\n def d():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(n, 2).T\r\n i = np.argsort(x)\r\n ax, bx, cx = (\r\n i[:-1],\r\n i[1:],\r\n x[\r\n i[1:],\r\n ]\r\n - x[i[:-1]],\r\n )\r\n i = np.argsort(y)\r\n ay, by, cy = (\r\n i[:-1],\r\n i[1:],\r\n y[\r\n i[1:],\r\n ]\r\n - y[i[:-1]],\r\n )\r\n e = np.vstack(\r\n [np.hstack([ax, ay]), np.hstack([bx, by]), np.hstack([cx, cy])]\r\n )\r\n e = e[:, np.argsort(e[-1])]\r\n _, i = np.unique(e[:-1], return_index=True, axis=1)\r\n a, b, c = e[:, i]\r\n print(\r\n minimum_spanning_tree(csr_matrix((c, (a, b)), (n, n)))\r\n .astype(np.int64)\r\n .sum()\r\n )\r\n\r\n @staticmethod\r\n def d_2():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n x, y = xy[::2], xy[1::2]\r\n g = GeometryTopology.Graph(n)\r\n\r\n def make(a):\r\n b = sorted(enumerate(a), key=lambda x: x[1])\r\n for i in range(n - 1):\r\n u, v, w = b[i][0], b[i + 1][0], b[i + 1][1] - b[i][1]\r\n for u, v in [(v, u), (u, v)]:\r\n if not v in g.edges[u]:\r\n g.add_edge(u, v, weight=w)\r\n else:\r\n g.edges[u][v].weight = min(g.edges[u][v].weight, w)\r\n\r\n make(x)\r\n make(y)\r\n _, d = g.kruskal()\r\n # _, d = g.prim()\r\n # _, d = g.boruvka()\r\n print(d)\r\n\r\n class ABC066:\r\n @staticmethod\r\n def a():\r\n print(sum(sorted(map(int, sys.stdin.readline().split()))[:-1]))\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n\r\n def f(s):\r\n n = len(s) // 2\r\n return s[:n] == s[n:]\r\n\r\n for i in range(len(s) - 2, 0, -2):\r\n if f(s[:i]):\r\n print(i)\r\n return\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n b = deque()\r\n for i in range(n):\r\n if i & 1:\r\n b.appendleft(a[i])\r\n else:\r\n b.append(a[i])\r\n if n & 1:\r\n b.reverse()\r\n print(*b)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n tmp = [None] * (n + 1)\r\n for i in range(n + 1):\r\n if tmp[a[i]] is not None:\r\n d = tmp[a[i]] + n - i\r\n break\r\n tmp[a[i]] = i\r\n k = np.arange(1, n + 2)\r\n c = Combinatorics.CombinationsMod(n + 1, MOD)\r\n print(*((c(n + 1, k) - c(d, k - 1)) % MOD), sep=\"\\n\")\r\n\r\n class ABC067:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n g.add_edge(a, b)\r\n g.add_edge(b, a)\r\n d1, d2 = g.bfs(0), g.bfs(n - 1)\r\n print(\r\n \"Fennec\"\r\n if sum(d1[i] <= d2[i] for i in range(n)) > n // 2\r\n else \"Snuke\"\r\n )\r\n\r\n class ABC068:\r\n @staticmethod\r\n def d():\r\n k = int(sys.stdin.readline().rstrip())\r\n q, r = divmod(k, 50)\r\n n = 50\r\n a = np.arange(n) + q\r\n if r:\r\n a[-r:] += 1\r\n print(n)\r\n print(*a)\r\n\r\n class ABC069:\r\n pass\r\n\r\n class ABC070:\r\n pass\r\n\r\n class ABC071:\r\n pass\r\n\r\n class ABC072:\r\n pass\r\n\r\n class ABC073:\r\n pass\r\n\r\n class ABC074:\r\n pass\r\n\r\n class ABC075:\r\n pass\r\n\r\n class ABC076:\r\n pass\r\n\r\n class ABC077:\r\n pass\r\n\r\n class ABC078:\r\n pass\r\n\r\n class ABC079:\r\n pass\r\n\r\n class ABC080:\r\n pass\r\n\r\n class ABC081:\r\n pass\r\n\r\n class ABC082:\r\n pass\r\n\r\n class ABC083:\r\n pass\r\n\r\n class ABC084:\r\n pass\r\n\r\n class ABC085:\r\n pass\r\n\r\n class ABC086:\r\n pass\r\n\r\n class ABC087:\r\n pass\r\n\r\n class ABC088:\r\n pass\r\n\r\n class ABC089:\r\n pass\r\n\r\n class ABC090:\r\n pass\r\n\r\n class ABC091:\r\n pass\r\n\r\n class ABC092:\r\n pass\r\n\r\n class ABC093:\r\n pass\r\n\r\n class ABC094:\r\n pass\r\n\r\n class ABC095:\r\n pass\r\n\r\n class ABC096:\r\n pass\r\n\r\n class ABC097:\r\n pass\r\n\r\n class ABC098:\r\n pass\r\n\r\n class ABC099:\r\n pass\r\n\r\n class ABC100:\r\n pass\r\n\r\n class ABC101:\r\n pass\r\n\r\n class ABC102:\r\n pass\r\n\r\n class ABC103:\r\n pass\r\n\r\n class ABC104:\r\n pass\r\n\r\n class ABC105:\r\n pass\r\n\r\n class ABC106:\r\n pass\r\n\r\n class ABC107:\r\n pass\r\n\r\n class ABC108:\r\n pass\r\n\r\n class ABC109:\r\n pass\r\n\r\n class ABC110:\r\n pass\r\n\r\n class ABC111:\r\n pass\r\n\r\n class ABC112:\r\n pass\r\n\r\n class ABC113:\r\n pass\r\n\r\n class ABC114:\r\n pass\r\n\r\n class ABC115:\r\n pass\r\n\r\n class ABC116:\r\n pass\r\n\r\n class ABC117:\r\n pass\r\n\r\n class ABC118:\r\n pass\r\n\r\n class ABC119:\r\n pass\r\n\r\n class ABC120:\r\n pass\r\n\r\n class ABC121:\r\n pass\r\n\r\n class ABC122:\r\n pass\r\n\r\n class ABC123:\r\n pass\r\n\r\n class ABC124:\r\n pass\r\n\r\n class ABC125:\r\n pass\r\n\r\n class ABC126:\r\n pass\r\n\r\n class ABC127:\r\n pass\r\n\r\n class ABC128:\r\n pass\r\n\r\n class ABC129:\r\n pass\r\n\r\n class ABC130:\r\n pass\r\n\r\n class ABC131:\r\n pass\r\n\r\n class ABC132:\r\n pass\r\n\r\n class ABC133:\r\n pass\r\n\r\n class ABC134:\r\n pass\r\n\r\n class ABC135:\r\n pass\r\n\r\n class ABC136:\r\n pass\r\n\r\n class ABC137:\r\n pass\r\n\r\n class ABC138:\r\n pass\r\n\r\n class ABC139:\r\n pass\r\n\r\n class ABC140:\r\n pass\r\n\r\n class ABC141:\r\n pass\r\n\r\n class ABC142:\r\n pass\r\n\r\n class ABC143:\r\n pass\r\n\r\n class ABC144:\r\n pass\r\n\r\n class ABC145:\r\n pass\r\n\r\n class ABC146:\r\n pass\r\n\r\n class ABC147:\r\n pass\r\n\r\n class ABC148:\r\n pass\r\n\r\n class ABC149:\r\n pass\r\n\r\n class ABC150:\r\n pass\r\n\r\n class ABC151:\r\n pass\r\n\r\n class ABC152:\r\n pass\r\n\r\n class ABC153:\r\n pass\r\n\r\n class ABC154:\r\n pass\r\n\r\n class ABC155:\r\n pass\r\n\r\n class ABC156:\r\n pass\r\n\r\n class ABC157:\r\n pass\r\n\r\n class ABC158:\r\n pass\r\n\r\n class ABC159:\r\n pass\r\n\r\n class ABC160:\r\n pass\r\n\r\n class ABC161:\r\n pass\r\n\r\n class ABC162:\r\n pass\r\n\r\n class ABC163:\r\n pass\r\n\r\n class ABC164:\r\n pass\r\n\r\n class ABC165:\r\n pass\r\n\r\n class ABC166:\r\n pass\r\n\r\n class ABC167:\r\n pass\r\n\r\n class ABC168:\r\n pass\r\n\r\n class ABC169:\r\n pass\r\n\r\n class ABC170:\r\n @staticmethod\r\n def a():\r\n x = [int(x) for x in sys.stdin.readline().split()]\r\n for i in range(5):\r\n if x[i] != i + 1:\r\n print(i + 1)\r\n break\r\n\r\n @staticmethod\r\n def b():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Yes\" if 2 * x <= y <= 4 * x and y % 2 == 0 else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n x, n, *p = map(int, sys.stdin.read().split())\r\n a = list(set(range(102)) - set(p))\r\n a = [(abs(y - x), y) for y in a]\r\n print(sorted(a)[0][1])\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n cand = set(a)\r\n cnt = 0\r\n for x, c in sorted(Counter(a).items()):\r\n cnt += c == 1 and x in cand\r\n cand -= set(range(x * 2, 10**6 + 1, x))\r\n print(cnt)\r\n\r\n @staticmethod\r\n def e():\r\n n, q = map(int, sys.stdin.readline().split())\r\n queue = []\r\n m = 2 * 10**5\r\n infants = [[] for _ in range(m)]\r\n highest_rate = [None] * m\r\n where = [None] * n\r\n rate = [None] * n\r\n\r\n def entry(i, k):\r\n where[i] = k\r\n while infants[k]:\r\n r, j = heappop(infants[k])\r\n if where[j] != k or j == i:\r\n continue\r\n if rate[i] >= -r:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (r, j))\r\n break\r\n else:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (-rate[i], i))\r\n\r\n def transfer(i, k):\r\n now = where[i]\r\n while infants[now]:\r\n r, j = heappop(infants[now])\r\n if where[j] != now or j == i:\r\n continue\r\n if highest_rate[now] != -r:\r\n highest_rate[now] = -r\r\n heappush(queue, (-r, now, j))\r\n heappush(infants[now], (r, j))\r\n break\r\n else:\r\n highest_rate[now] = None\r\n entry(i, k)\r\n\r\n def inquire():\r\n while True:\r\n r, k, i = heappop(queue)\r\n if where[i] != k or r != highest_rate[k]:\r\n continue\r\n heappush(queue, (r, k, i))\r\n return r\r\n\r\n for i in range(n):\r\n a, b = map(int, sys.stdin.readline().split())\r\n rate[i] = a\r\n entry(i, b - 1)\r\n for _ in range(q):\r\n c, d = map(int, sys.stdin.readline().split())\r\n transfer(c - 1, d - 1)\r\n print(inquire())\r\n\r\n class ABC171:\r\n @staticmethod\r\n def a():\r\n c = sys.stdin.readline().rstrip()\r\n print(\"A\" if c < \"a\" else \"a\")\r\n\r\n @staticmethod\r\n def b():\r\n n, k, *p = map(int, sys.stdin.read().split())\r\n print(sum(sorted(p)[:k]))\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n n -= 1\r\n l = 1\r\n while True:\r\n if n < pow(26, l):\r\n break\r\n n -= pow(26, l)\r\n l += 1\r\n res = \"\".join(\r\n [chr(ord(\"a\") + d) for d in NumberTheory.base_convert(n, 26)][\r\n ::-1\r\n ]\r\n )\r\n res = \"a\" * (l - len(res)) + res\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n s = sum(a)\r\n cnt = Counter(a)\r\n q = int(sys.stdin.readline().rstrip())\r\n for _ in range(q):\r\n b, c = map(int, sys.stdin.readline().split())\r\n s += (c - b) * cnt[b]\r\n print(s)\r\n cnt[c] += cnt[b]\r\n cnt[b] = 0\r\n\r\n @staticmethod\r\n def e():\r\n n, *a = map(int, sys.stdin.read().split())\r\n s = 0\r\n for x in a:\r\n s ^= x\r\n b = map(lambda x: x ^ s, a)\r\n print(*b, sep=\" \")\r\n\r\n class ABC172:\r\n @staticmethod\r\n def a():\r\n a = int(sys.stdin.readline().rstrip())\r\n print(a * (1 + a + a**2))\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n print(sum(s[i] != t[i] for i in range(len(s))))\r\n\r\n @staticmethod\r\n def c():\r\n n, m, k = map(int, sys.stdin.readline().split())\r\n a = [0] + [int(x) for x in sys.stdin.readline().split()]\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n (*sa,) = itertools.accumulate(a)\r\n (*sb,) = itertools.accumulate(b)\r\n res = 0\r\n for i in range(n + 1):\r\n r = k - sa[i]\r\n if r < 0:\r\n break\r\n res = max(res, i + bi_r(sb, r))\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n f = np.zeros(n + 1, dtype=np.int64)\r\n for i in range(1, n + 1):\r\n f[i::i] += 1\r\n print((np.arange(1, n + 1) * f[1:]).sum())\r\n\r\n class ABC173:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n charge = (n + 999) // 1000 * 1000 - n\r\n print(charge)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n c = Counter(s)\r\n for v in \"AC, WA, TLE, RE\".split(\", \"):\r\n print(f\"{v} x {c[v]}\")\r\n\r\n @staticmethod\r\n def c():\r\n h, w, k = map(int, sys.stdin.readline().split())\r\n c = [sys.stdin.readline().rstrip() for _ in range(h)]\r\n tot = 0\r\n for i in range(1 << h):\r\n for j in range(1 << w):\r\n cnt = 0\r\n for y in range(h):\r\n for x in range(w):\r\n if i >> y & 1 or j >> x & 1:\r\n continue\r\n cnt += c[y][x] == \"#\"\r\n tot += cnt == k\r\n print(tot)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a.sort(reverse=True)\r\n res = (\r\n a[0]\r\n + sum(a[1 : 1 + (n - 2) // 2]) * 2\r\n + a[1 + (n - 2) // 2] * (n & 1)\r\n )\r\n print(res)\r\n\r\n @staticmethod\r\n def e():\r\n MOD = 10**9 + 7\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n minus = [x for x in a if x < 0]\r\n plus = [x for x in a if x > 0]\r\n if len(plus) + len(minus) // 2 * 2 >= k: # plus\r\n (*minus,) = map(abs, minus)\r\n minus.sort(reverse=True)\r\n plus.sort(reverse=True)\r\n cand = []\r\n if len(minus) & 1:\r\n minus = minus[:-1]\r\n for i in range(0, len(minus) - 1, 2):\r\n cand.append(minus[i] * minus[i + 1] % MOD)\r\n if k & 1:\r\n res = plus[0]\r\n plus = plus[1:]\r\n else:\r\n res = 1\r\n if len(plus) & 1:\r\n plus = plus[:-1]\r\n for i in range(0, len(plus) - 1, 2):\r\n cand.append(plus[i] * plus[i + 1] % MOD)\r\n cand.sort(reverse=True)\r\n for x in cand[: k // 2]:\r\n res *= x\r\n res %= MOD\r\n print(res)\r\n elif 0 in a:\r\n print(0)\r\n else:\r\n cand = sorted(map(abs, a))\r\n res = 1\r\n for i in range(k):\r\n res *= cand[i]\r\n res %= MOD\r\n res = MOD - res\r\n print(res)\r\n pass\r\n\r\n class ABC174:\r\n @staticmethod\r\n def a():\r\n print(\"Yes\" if int(sys.stdin.readline().rstrip()) >= 30 else \"No\")\r\n\r\n class ABC178:\r\n @staticmethod\r\n def a():\r\n pass\r\n\r\n @staticmethod\r\n def b():\r\n pass\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n @staticmethod\r\n def d():\r\n s = int(sys.stdin.readline().rstrip())\r\n if s == 0:\r\n print(1)\r\n return\r\n elif s == 1:\r\n print(0)\r\n return\r\n c = np.eye(3, k=-1, dtype=np.int64)\r\n c[0, 0] = c[0, 2] = 1\r\n a = np.array([0, 0, 1])\r\n print(Algebra.dot(Algebra.matrix_pow(c, s - 2), a)[0])\r\n\r\n class ABC179:\r\n @staticmethod\r\n def a():\r\n s = sys.stdin.readline().rstrip()\r\n print(s + \"s\" if s[-1] != \"s\" else s + \"es\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *d = map(int, sys.stdin.read().split())\r\n d = np.array(d).reshape(n, 2).T\r\n d = np.equal(d[0], d[1]).astype(int)\r\n dd = d.copy()\r\n dd[1:] += d[:-1]\r\n dd[:-1] += d[1:]\r\n print(\"Yes\" if (dd >= 3).any() else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = (n // np.arange(1, n + 1)).sum() - len(\r\n NumberTheory.find_divisors(n)\r\n )\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n mod = 998244353\r\n n, k, *lr = map(int, sys.stdin.read().split())\r\n l, r = np.array(lr).reshape(k, -1).T\r\n\r\n @njit((i8, i8[:], i8[:]), cache=True)\r\n def solve(n, l, r):\r\n res = np.zeros(n * 2, dtype=np.int64)\r\n res[0], res[1] = 1, -1\r\n for i in range(n - 1):\r\n res[i + 1] = (res[i + 1] + res[i]) % mod\r\n res[i + l] = (res[i + l] + res[i]) % mod\r\n res[i + r + 1] = (res[i + r + 1] - res[i]) % mod\r\n print(res[n - 1])\r\n\r\n solve(n, l, r)\r\n\r\n @staticmethod\r\n def e():\r\n n, x, m = map(int, sys.stdin.readline().split())\r\n res = [-1 for _ in range(m)]\r\n s = 0\r\n loop = np.zeros(m, dtype=np.int64)\r\n for i in range(m + 1):\r\n if i == n:\r\n print(s)\r\n return\r\n if res[x] != -1:\r\n l, loop = i - res[x], loop[res[x] : i]\r\n q, r = divmod(n - i, l)\r\n print(s + q * loop.sum() + loop[:r].sum())\r\n return\r\n res[x], loop[i] = i, x\r\n s += x\r\n x = x**2 % m\r\n\r\n class ABC180:\r\n @staticmethod\r\n def a():\r\n n, a, b = map(int, sys.stdin.readline().split())\r\n print(n - a + b)\r\n\r\n @staticmethod\r\n def b():\r\n n, *x = map(int, sys.stdin.read().split())\r\n x = np.absolute(np.array(x))\r\n print(x.sum())\r\n print(np.sqrt((x**2).sum()))\r\n print(x.max())\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n div = NumberTheory.find_divisors(n)\r\n print(*div, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n x, y, a, b = map(int, sys.stdin.readline().split())\r\n cnt = 0\r\n while x * a <= x + b:\r\n x *= a\r\n if x >= y:\r\n print(cnt)\r\n return\r\n cnt += 1\r\n cnt += (y - x - 1) // b\r\n print(cnt)\r\n\r\n @staticmethod\r\n def e():\r\n n, *xyz = map(int, sys.stdin.read().split())\r\n\r\n xyz = list(zip(*[iter(xyz)] * 3))\r\n dist = [[0] * n for _ in range(n)]\r\n for i in range(n):\r\n a, b, c = xyz[i]\r\n for j in range(n):\r\n p, q, r = xyz[j]\r\n dist[i][j] = abs(p - a) + abs(q - b) + max(0, r - c)\r\n\r\n dp = [[inf] * n for _ in range(1 << n)]\r\n dp[0][0] = 0\r\n for s in range(1 << n):\r\n for i in range(n):\r\n t = s | (1 << i)\r\n for j in range(n):\r\n dp[t][i] = min(dp[t][i], dp[s][j] + dist[j][i])\r\n print(dp[-1][0])\r\n\r\n @staticmethod\r\n def f(): # rewrite with jit compiling later.\r\n n, m, l = map(int, sys.stdin.readline().split())\r\n c = Combinatorics.CombinationsMod(n, MOD)\r\n path = np.zeros(n + 1, dtype=np.int64)\r\n path[1] = path[2] = 1\r\n for i in range(3, n + 1):\r\n path[i] = path[i - 1] * i % MOD\r\n cycle = np.zeros(n + 1, dtype=np.int64)\r\n cycle[1:] = path[:-1]\r\n dp = np.zeros((n + 1, m + 1), dtype=np.int64)\r\n\r\n def f(l):\r\n dp[:, :] = 0\r\n dp[0, 0] = 1\r\n for i in range(n):\r\n for j in range(m + 1):\r\n k = np.arange(1, min(l, n - i, m - j + 1) + 1)\r\n dp[i + k, j + k - 1] += (\r\n dp[i, j]\r\n * c(n - i - 1, k - 1)\r\n % MOD\r\n * path[k]\r\n % MOD\r\n )\r\n dp[i + k, j + k - 1] %= MOD\r\n k = np.arange(2, min(l, n - i, m - j) + 1)\r\n dp[i + k, j + k] += (\r\n dp[i, j]\r\n * c(n - i - 1, k - 1)\r\n % MOD\r\n * cycle[k]\r\n % MOD\r\n )\r\n dp[i + k, j + k] %= MOD\r\n return dp[n, m]\r\n\r\n print((f(l) - f(l - 1)) % MOD)\r\n\r\n @staticmethod\r\n def f_2(): # PyPy\r\n n, m, l = map(int, sys.stdin.readline().split())\r\n c = Combinatorics.CombinationsMod(n, MOD)\r\n path = [0] * (n + 1)\r\n path[1] = path[2] = 1\r\n for i in range(3, n + 1):\r\n path[i] = path[i - 1] * i % MOD\r\n cycle = [0] + path[:-1]\r\n\r\n def f(l):\r\n dp = [[0] * (m + 1) for _ in range(n + 1)]\r\n dp[0][0] = 1\r\n for i in range(n):\r\n for j in range(m + 1):\r\n for k in range(1, min(l, n - i, m - j + 1) + 1):\r\n dp[i + k][j + k - 1] += (\r\n dp[i][j]\r\n * c(n - i - 1, k - 1)\r\n % MOD\r\n * path[k]\r\n % MOD\r\n )\r\n dp[i + k][j + k - 1] %= MOD\r\n for k in range(1, min(l, n - i, m - j) + 1):\r\n dp[i + k][j + k] += (\r\n dp[i][j]\r\n * c(n - i - 1, k - 1)\r\n % MOD\r\n * cycle[k]\r\n % MOD\r\n )\r\n dp[i + k][j + k] %= MOD\r\n\r\n return dp[n][m]\r\n\r\n print((f(l) - f(l - 1)) % MOD)\r\n\r\n class ACL001:\r\n @staticmethod\r\n def a():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*[iter(xy)] * 2)\r\n print(xy)\r\n pass\r\n\r\n class TDPC:\r\n @staticmethod\r\n def t():\r\n pass\r\n\r\n class MSolutions2020:\r\n @staticmethod\r\n def a():\r\n x = int(sys.stdin.readline().rstrip())\r\n x -= 400\r\n print(8 - x // 200)\r\n\r\n @staticmethod\r\n def b():\r\n r, g, b, k = map(int, sys.stdin.read().split())\r\n while k and g <= r:\r\n g *= 2\r\n k -= 1\r\n while k and b <= g:\r\n b *= 2\r\n k -= 1\r\n print(\"Yes\" if r < g < b else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n for i in range(k, n):\r\n print(\"Yes\" if a[i] > a[i - k] else \"No\")\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n m = 1000\r\n s = 0\r\n for i in range(n):\r\n if a[i + 1] == a[i]:\r\n continue\r\n elif a[i + 1] > a[i]:\r\n cnt = m // a[i]\r\n m -= a[i] * cnt\r\n s += cnt\r\n else:\r\n m += a[i] * s\r\n s = 0\r\n print(m)\r\n\r\n\r\nclass Codeforces:\r\n class CR676div2:\r\n @staticmethod\r\n def a():\r\n t = int(sys.stdin.readline().rstrip())\r\n for _ in range(t):\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(a ^ b)\r\n\r\n @staticmethod\r\n def b():\r\n t = int(sys.stdin.readline().rstrip())\r\n for _ in range(t):\r\n n = int(sys.stdin.readline().rstrip())\r\n s = [list(sys.stdin.readline().rstrip()) for _ in range(n)]\r\n s[0][0] = s[-1][-1] = \"0\"\r\n for i in range(n):\r\n for j in range(n):\r\n s[i][j] = int(s[i][j])\r\n\r\n def can_goal(g, c=0):\r\n visited = [0] * n\r\n stack = [(0, 0)]\r\n visited[0] |= 1 << 0\r\n while stack:\r\n y, x = stack.pop()\r\n for dy, dx in [(-1, 0), (0, -1), (1, 0), (0, 1)]:\r\n i, j = y + dy, x + dx\r\n if i < 0 or i >= n or j < 0 or j >= n:\r\n continue\r\n if i == j == n - 1:\r\n return True\r\n if visited[i] >> j & 1:\r\n continue\r\n visited[i] |= 1 << j\r\n if g[i][j] != c:\r\n continue\r\n stack.append((i, j))\r\n return False\r\n\r\n if not (can_goal(s, 0) or can_goal(s, 1)):\r\n print(0)\r\n continue\r\n\r\n flg = 0\r\n for i in range(n):\r\n for j in range(n):\r\n if i == j == 0 or i == j == n - 1:\r\n continue\r\n s[i][j] ^= 1\r\n if not (can_goal(s, 0) or can_goal(s, 1)):\r\n print(1)\r\n print(i + 1, j + 1)\r\n flg = 1\r\n break\r\n s[i][j] ^= 1\r\n if flg:\r\n break\r\n if flg:\r\n continue\r\n\r\n print(2)\r\n if s[0][1] == s[1][0]:\r\n print(n, n - 1)\r\n print(n - 1, n)\r\n continue\r\n\r\n if s[0][1] == s[-1][-2]:\r\n print(1, 2)\r\n print(n - 1, n)\r\n else:\r\n print(1, 2)\r\n print(n, n - 1)\r\n\r\n @staticmethod\r\n def c():\r\n pass\r\n\r\n\r\nclass ProjectEuler:\r\n @staticmethod\r\n def p1():\r\n def f(n, x):\r\n return (x + n // x * x) * (n // x) // 2\r\n\r\n n = 1000\r\n ans = f(n - 1, 3) + f(n - 1, 5) - f(n - 1, 15)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p2():\r\n fib = [1, 2]\r\n while fib[-1] < 4 * 10**6:\r\n fib.append(fib[-1] + fib[-2])\r\n print(sum(fib[1:-1:3]))\r\n\r\n @staticmethod\r\n def p3():\r\n pn = NumberTheory.PrimeNumbers()\r\n res = pn.factorize(600851475143)\r\n print(max(res.keys()))\r\n\r\n @staticmethod\r\n def p4():\r\n def is_palindrome(n):\r\n n = str(n)\r\n return n == n[::-1]\r\n\r\n cand = []\r\n for a in range(100, 1000):\r\n for b in range(a, 1000):\r\n n = a * b\r\n if is_palindrome(n):\r\n cand.append(n)\r\n print(max(cand))\r\n\r\n @staticmethod\r\n def p5():\r\n pn = NumberTheory.PrimeNumbers()\r\n res = defaultdict(int)\r\n for i in range(1, 21):\r\n for p, c in pn.factorize(i).items():\r\n res[p] = max(res[p], c)\r\n ans = 1\r\n for p, c in res.items():\r\n ans *= pow(p, c)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p6():\r\n a = np.arange(101)\r\n b = np.cumsum(a**2)\r\n a = a.cumsum()\r\n print(a[100] ** 2 - b[100])\r\n\r\n @staticmethod\r\n def p7():\r\n nt = NumberTheory.PrimeNumbers()\r\n print(sorted(nt)[10000])\r\n\r\n @staticmethod\r\n def p8():\r\n n = \"7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450\"\r\n n = [int(d) for d in list(n)]\r\n res = 0\r\n for i in range(988):\r\n x = 1\r\n for j in range(13):\r\n x *= n[i + j]\r\n res = max(res, x)\r\n print(res)\r\n\r\n @staticmethod\r\n def p9():\r\n for a in range(1, 997):\r\n for b in range(a, 998 - a):\r\n c = 1000 - a - b\r\n if a**2 + b**2 == c**2:\r\n print(a * b * c)\r\n return\r\n\r\n @staticmethod\r\n def p10():\r\n pn = NumberTheory.PrimeNumbers(2 * 10**6 + 1)\r\n print(sum(pn))\r\n\r\n @staticmethod\r\n def p11():\r\n grid = \"08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48\"\r\n print(grid)\r\n\r\n pass\r\n\r\n\r\nclass Yukicoder:\r\n def __init__(self):\r\n pass\r\n\r\n def __call__(self):\r\n print(1)\r\n\r\n\r\nclass AOJ:\r\n @staticmethod\r\n def ALDS1_12_A():\r\n n, *a = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for i in range(n - 1):\r\n for j in range(i + 1, n):\r\n if a[i * n + j] == -1:\r\n continue\r\n g.add_edge(i, j, weight=a[i * n + j])\r\n g.add_edge(j, i, weight=a[i * n + j])\r\n _, d = g.kruskal()\r\n # _, d = g.prim()\r\n # _, d = g.boruvka()\r\n print(d)\r\n\r\n @staticmethod\r\n def GRL_3_C(): # strongly connected components\r\n n, m = map(int, sys.stdin.readline().split())\r\n g = GeometryTopology.Graph(n)\r\n for _ in range(m):\r\n g.add_edge(*map(int, sys.stdin.readline().split()))\r\n r = g.scc()\r\n q, *uv = map(int, sys.stdin.read().split())\r\n for u, v in zip(*[iter(uv)] * 2):\r\n print(int(r[u] == r[v]))\r\n\r\n\r\nclass YosupoJudge:\r\n @staticmethod\r\n def Directed_MST():\r\n n, m, s, *abc = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b, c in zip(*[iter(abc)] * 3):\r\n g.add_edge(a, b, weight=c)\r\n _, d, p = g.prim(src=s, return_parent=True)\r\n print(d)\r\n print(*p)\r\n\r\n @staticmethod\r\n def Manhattan_MST():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # AtCoder.ABC179.f()\r\n # AtCoder.ABC060.d()\r\n AtCoder.ABC068.d()\r\n # YosupoJudge.Directed_MST()\r\n pass\r\n", "import sys\r\n\r\nimport numpy as np\r\n\r\nMOD = 10 ** 9 + 7\r\n\r\nn, *a = map(int, sys.stdin.read().split())\r\na = np.array(a, dtype=np.int64)\r\n\r\ndef main():\r\n b = a >> np.arange(61).reshape(-1, 1) & 1\r\n x = np.count_nonzero(b, axis=1)\r\n y = n - x\r\n x *= y\r\n x %= MOD\r\n x *= 2 ** np.arange(61) % MOD\r\n x %= MOD\r\n\r\n ans = np.sum(x) % MOD\r\n return ans\r\n\r\nif __name__ == '__main__':\r\n ans = main()\r\n print(ans)\n", "# 2019-11-19 01:10:02(JST)\r\nimport collections\r\n\r\n# import math\r\n# from string import ascii_lowercase, ascii_uppercase, digits\r\n# from bisect import bisect_left as bi_l, bisect_right as bi_r\r\nimport itertools\r\nimport sys\r\n\r\n# from functools import reduce\r\n# import operator as op\r\n# import re\r\n# import heapq\r\n# import array\r\nfrom scipy.misc import comb # (default: exact=False)\r\n\r\n# import numpy as np\r\n\r\n# def nCr(n, r):\r\n# r = min(n, r)\r\n# return np.cumprod(range(n, n-r, -1)) // np.cumprod(range(r, 0, -1))\r\n\r\ndef main():\r\n n, *a = [int(x) for x in sys.stdin.read().split()]\r\n s = list(itertools.accumulate(a))\r\n\r\n\r\n res = s.count(0)\r\n for x in [x for x in collections.Counter(s).values() if x >= 2]:\r\n res += comb(x, 2, exact=True)\r\n\r\n print(res)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n", "import typing\n\nimport numba as nb\nimport numpy as np\n\n\[email protected]\ndef find_divisors(\n n: int,\n) -> np.array:\n i = np.arange(int(n ** .5))\n i += 1\n i = i[n % i == 0]\n i = np.hstack((i, n // i))\n return np.unique(i)\n\n\n\[email protected]\ndef lpf(\n n: int = 1 << 20,\n) -> np.array:\n s = np.arange(n)\n s[:2] = -1\n i = 0\n while i * i < n - 1:\n i += 1\n if s[i] == i: s[i::i] = i\n return s\n\n\[email protected]\ndef spf(\n n: int = 1 << 20,\n) -> np.array:\n s = np.arange(n)\n s[:2] = -1\n i = 0\n while i * i < n - 1:\n i += 1\n j = np.arange(i, n, i)\n s[j][s[j] == j] = i\n return s\n\n\[email protected]\ndef sieve_of_eratosthenes(\n n: int = 1 << 20,\n) -> np.array:\n return lpf(n) == np.arange(n)\n\n\n\[email protected]\ndef prime_numbers(\n n: int = 1 << 20,\n) -> np.array:\n s = sieve_of_eratosthenes(n)\n return np.flatnonzero(s)\n\n\n\[email protected]\ndef euler_totient(\n n: int,\n prime_numbers: np.array,\n) -> int:\n c = n\n for p in prime_numbers:\n if p * p > n: break\n if n % p: continue\n c = c // p * (p - 1)\n while not n % p: n //= p\n if n > 1:\n c = c // n * (n - 1)\n return c\n\n\[email protected](\n (nb.i8, ),\n cache=True,\n)\ndef solve(\n p: int,\n) -> typing.NoReturn:\n n = p - 1\n divs = find_divisors(n)\n pn = prime_numbers(1 << 20)\n mod = 998244353\n c = 1\n for d in divs:\n d %= mod\n e = euler_totient(d, pn)\n e %= mod\n c += e * d % mod\n c %= mod\n print(c)\n\n\ndef main() -> typing.NoReturn:\n p = int(input())\n solve(p)\n\n\nmain()\n", "import math\r\nimport string\r\nimport sys\r\nfrom bisect import bisect_left as bi_l\r\nfrom bisect import bisect_right as bi_r\r\nfrom collections import Counter, defaultdict, deque\r\n\r\n# from numba import jit\r\nfrom heapq import heappop, heappush\r\nfrom itertools import accumulate, combinations, product\r\n\r\nimport numpy as np\r\nfrom scipy import optimize\r\nfrom scipy.sparse import csr_matrix\r\nfrom scipy.sparse.csgraph import shortest_path\r\n\r\ninf = float(\"inf\")\r\nfrom functools import lru_cache, reduce\r\n\r\nsys.setrecursionlimit(10**6)\r\nMOD = 10**9 + 7\r\n# MOD = 998244353\r\n\r\n\"\"\"*****************************************lib***************************************\"\"\"\r\n\"\"\"*****************************************lib***************************************\"\"\"\r\n\"\"\"*****************************************lib***************************************\"\"\"\r\n\"\"\"*****************************************lib***************************************\"\"\"\r\n\"\"\"*****************************************lib***************************************\"\"\"\r\n\"\"\"*****************************************lib***************************************\"\"\"\r\n\r\n\r\nclass NumberTheory:\r\n def __init__(self, n=2 * 10**6, numpy=True):\r\n self.n = n\r\n self.np_flg = numpy\r\n self.is_prime_number, self.prime_numbers = self.sieve_of_eratosthenes(\r\n n\r\n )\r\n\r\n def sieve_of_eratosthenes(self, n):\r\n if self.np_flg:\r\n sieve = np.ones(n + 1, dtype=np.int64)\r\n sieve[:2] = 0\r\n for i in range(2, int(n**0.5) + 1):\r\n if sieve[i]:\r\n sieve[i * 2 :: i] = 0\r\n prime_numbers = np.flatnonzero(sieve)\r\n else:\r\n sieve = [1] * (n + 1)\r\n sieve[0] = sieve[1] = 0\r\n for i in range(2, int(n**0.5) + 1):\r\n if not sieve[i]:\r\n continue\r\n for j in range(i * 2, n + 1, i):\r\n sieve[j] = 0\r\n prime_numbers = [i for i in range(2, n + 1) if sieve[i]]\r\n return sieve, prime_numbers\r\n\r\n def prime_factorize(self, n):\r\n res = dict()\r\n if n < 2:\r\n return res\r\n border = int(n**0.5)\r\n for p in self.prime_numbers:\r\n if p > border:\r\n break\r\n while n % p == 0:\r\n res[p] = res.get(p, 0) + 1\r\n n //= p\r\n if n == 1:\r\n return res\r\n res[n] = 1\r\n return res\r\n\r\n def prime_factorize_factorial(self, n):\r\n res = dict()\r\n for i in range(2, n + 1):\r\n for p, c in self.prime_factorize(i).items():\r\n res[p] = res.get(p, 0) + c\r\n return res\r\n\r\n @classmethod\r\n def gcd(cls, a, b):\r\n return cls.gcd(b, a % b) if b else abs(a)\r\n\r\n @classmethod\r\n def lcm(cls, a, b):\r\n return abs(a // cls.gcd(a, b) * b)\r\n\r\n @staticmethod\r\n def find_divisors(n):\r\n divisors = []\r\n for i in range(1, int(n**0.5) + 1):\r\n if n % i:\r\n continue\r\n divisors.append(i)\r\n j = n // i\r\n if j != i:\r\n divisors.append(j)\r\n return divisors\r\n\r\n @staticmethod\r\n def base_convert(n, b):\r\n if not n:\r\n return [0]\r\n res = []\r\n while n:\r\n n, r = divmod(n, b)\r\n if r < 0:\r\n n += 1\r\n r -= b\r\n res.append(r)\r\n return res\r\n\r\n\r\nclass UnionFind:\r\n def __init__(self, n=10**6):\r\n self.root = list(range(n))\r\n self.height = [0] * n\r\n self.size = [1] * n\r\n\r\n def find_root(self, u):\r\n if self.root[u] == u:\r\n return u\r\n self.root[u] = self.find_root(self.root[u])\r\n return self.root[u]\r\n\r\n def unite(self, u, v):\r\n ru = self.find_root(u)\r\n rv = self.find_root(v)\r\n if ru == rv:\r\n return\r\n hu = self.height[ru]\r\n hv = self.height[rv]\r\n if hu >= hv:\r\n self.root[rv] = ru\r\n self.size[ru] += self.size[rv]\r\n self.height[ru] = max(hu, hv + 1)\r\n else:\r\n self.root[ru] = rv\r\n self.size[rv] += self.size[ru]\r\n\r\n\r\nclass Combinatorics:\r\n def __init__(self, N=10**9, n=10**6, mod=10**9 + 7, numpy=True):\r\n self.mod = mod\r\n self.nCr = dict()\r\n self.np_flg = numpy\r\n self.make_mod_tables(N, n)\r\n\r\n sys.setrecursionlimit(10**6)\r\n\r\n def choose(self, n, r, mod=None): # no mod, or mod ≠ prime\r\n if r > n or r < 0:\r\n return 0\r\n if r == 0:\r\n return 1\r\n if (n, r) in self.nCr:\r\n return self.nCr[(n, r)]\r\n if not mod:\r\n self.nCr[(n, r)] = self.choose(n - 1, r) + self.choose(\r\n n - 1, r - 1\r\n )\r\n else:\r\n self.nCr[(n, r)] = (\r\n self.choose(n - 1, r, mod) + self.choose(n - 1, r - 1, mod)\r\n ) % mod\r\n return self.nCr[(n, r)]\r\n\r\n def cumprod(self, a):\r\n p = self.mod\r\n l = len(a)\r\n sql = int(np.sqrt(l) + 1)\r\n a = np.resize(a, sql**2).reshape(sql, sql)\r\n for i in range(sql - 1):\r\n a[:, i + 1] *= a[:, i]\r\n a[:, i + 1] %= p\r\n for i in range(sql - 1):\r\n a[i + 1] *= a[i, -1]\r\n a[i + 1] %= p\r\n return np.ravel(a)[:l]\r\n\r\n def make_mod_tables(self, N, n):\r\n p = self.mod\r\n if self.np_flg:\r\n fac = np.arange(n + 1)\r\n fac[0] = 1\r\n fac = self.cumprod(fac)\r\n ifac = np.arange(n + 1, 0, -1)\r\n ifac[0] = pow(int(fac[-1]), p - 2, p)\r\n ifac = self.cumprod(ifac)[n::-1]\r\n n_choose = np.arange(N + 1, N - n, -1)\r\n n_choose[0] = 1\r\n n_choose[1:] = self.cumprod(n_choose[1:]) * ifac[1 : n + 1] % p\r\n else:\r\n fac = [None] * (n + 1)\r\n fac[0] = 1\r\n for i in range(n):\r\n fac[i + 1] = fac[i] * (i + 1) % p\r\n ifac = [None] * (n + 1)\r\n ifac[n] = pow(fac[n], p - 2, p)\r\n for i in range(n, 0, -1):\r\n ifac[i - 1] = ifac[i] * i % p\r\n n_choose = [None] * (n + 1)\r\n n_choose[0] = 1\r\n for i in range(n):\r\n n_choose[i + 1] = n_choose[i] * (N - i) % p\r\n for i in range(n + 1):\r\n n_choose[i] = n_choose[i] * ifac[i] % p\r\n self.fac, self.ifac, self.mod_n_choose = fac, ifac, n_choose\r\n\r\n def mod_choose(self, n, r):\r\n return (\r\n self.fac[n] * self.ifac[r] % self.mod * self.ifac[n - r] % self.mod\r\n )\r\n\r\n\r\ndef z_algorithm(s):\r\n n = len(s)\r\n a = [0] * n\r\n a[0] = n\r\n l = r = -1\r\n for i in range(1, n):\r\n if r >= i:\r\n a[i] = min(a[i - l], r - i)\r\n while i + a[i] < n and s[i + a[i]] == s[a[i]]:\r\n a[i] += 1\r\n if i + a[i] >= r:\r\n l, r = i, i + a[i]\r\n return a\r\n\r\n\r\n\"\"\"******************************lib************************************\"\"\"\r\n\"\"\"******************************lib************************************\"\"\"\r\n\"\"\"******************************lib************************************\"\"\"\r\n\"\"\"******************************lib************************************\"\"\"\r\n\"\"\"******************************lib************************************\"\"\"\r\n\r\n\r\nclass AtCoder:\r\n class ABC001:\r\n @staticmethod\r\n def a():\r\n h1, h2 = map(int, sys.stdin.read().split())\r\n print(h1 - h2)\r\n\r\n class ABC002:\r\n @staticmethod\r\n def a():\r\n print(max(map(int, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n vowels = set(\"aeiou\")\r\n print(\r\n \"\".join(\r\n [\r\n c\r\n for c in sys.stdin.readline().rstrip()\r\n if c not in vowels\r\n ]\r\n )\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n def triangle_area(x0, y0, x1, y1, x2, y2):\r\n x1 -= x0\r\n x2 -= x0\r\n y1 -= y0\r\n y2 -= y0\r\n return abs(x1 * y2 - x2 * y1) / 2\r\n\r\n print(triangle_area(*map(int, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n edges = set(\r\n (x - 1, y - 1)\r\n for x, y in zip(*[map(int, sys.stdin.read().split())] * 2)\r\n )\r\n print(\r\n max(\r\n len(s)\r\n for i in range(1, 1 << n)\r\n for s in [[j for j in range(n) if i >> j & 1]]\r\n if all((x, y) in edges for x, y in combinations(s, 2))\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m = map(int, sys.stdin.readline().split())\r\n relations = [1 << i for i in range(n)]\r\n for x, y in zip(*[map(int, sys.stdin.read().split())] * 2):\r\n x -= 1\r\n y -= 1\r\n relations[x] |= 1 << y\r\n relations[y] |= 1 << x\r\n res = 0\r\n for i in range(1 << n):\r\n cnt = 0\r\n s = 0\r\n t = (1 << n) - 1\r\n for j in range(n):\r\n if i >> j & 1:\r\n s |= 1 << j\r\n t &= relations[j]\r\n cnt += 1\r\n if t & s == s:\r\n res = max(res, cnt)\r\n print(res)\r\n\r\n class ABC003:\r\n @staticmethod\r\n def a():\r\n print((int(sys.stdin.readline().rstrip()) + 1) * 5000)\r\n\r\n @staticmethod\r\n def b():\r\n atcoder = set(\"atcoder\")\r\n s, t = sys.stdin.read().split()\r\n print(\r\n all(\r\n s[i] == t[i]\r\n or s[i] == \"@\"\r\n and t[i] in atcoder\r\n or t[i] == \"@\"\r\n and s[i] in atcoder\r\n for i in range(len(s))\r\n )\r\n and \"You can win\"\r\n or \"You will lose\"\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *r = map(int, sys.stdin.read().split())\r\n print(reduce(lambda x, y: (x + y) / 2, sorted(r)[-k:], 0))\r\n\r\n class ABC004:\r\n @staticmethod\r\n def a():\r\n print(int(sys.stdin.readline().rstrip()) * 2)\r\n\r\n @staticmethod\r\n def b():\r\n for l in [sys.stdin.readline().rstrip() for _ in range(4)][::-1]:\r\n print(l[::-1])\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip()) % 30\r\n res = list(range(1, 7))\r\n for i in range(n):\r\n i %= 5\r\n res[i], res[i + 1] = res[i + 1], res[i]\r\n print(*res, sep=\"\")\r\n\r\n class ABC005:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(y // x)\r\n\r\n @staticmethod\r\n def b():\r\n n, *t = map(int, sys.stdin.read().split())\r\n print(min(t))\r\n\r\n @staticmethod\r\n def c():\r\n t = int(sys.stdin.readline().rstrip())\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n m = int(sys.stdin.readline().rstrip())\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n i = 0\r\n for p in b:\r\n if i == n:\r\n print(\"no\")\r\n return\r\n while p - a[i] > t:\r\n i += 1\r\n if i == n:\r\n print(\"no\")\r\n return\r\n if a[i] > p:\r\n print(\"no\")\r\n return\r\n i += 1\r\n print(\"yes\")\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n d = np.array(\r\n [sys.stdin.readline().split() for _ in range(n)], np.int64\r\n )\r\n s = d.cumsum(axis=0).cumsum(axis=1)\r\n s = np.pad(s, 1)\r\n max_del = np.zeros((n + 1, n + 1), dtype=np.int64)\r\n for y in range(1, n + 1):\r\n for x in range(1, n + 1):\r\n max_del[y, x] = np.amax(\r\n s[y : n + 1, x : n + 1]\r\n - s[0 : n - y + 1, x : n + 1]\r\n - s[y : n + 1, 0 : n - x + 1]\r\n + s[0 : n - y + 1, 0 : n - x + 1]\r\n )\r\n res = np.arange(n**2 + 1)[:, None]\r\n i = np.arange(1, n + 1)\r\n res = max_del[i, np.minimum(res // i, n)].max(axis=1)\r\n q = int(sys.stdin.readline().rstrip())\r\n p = np.array(sys.stdin.read().split(), dtype=np.int64)\r\n print(*res[p], sep=\"\\n\")\r\n\r\n class ABC006:\r\n @staticmethod\r\n def a():\r\n n = sys.stdin.readline().rstrip()\r\n if \"3\" in n:\r\n print(\"YES\")\r\n elif int(n) % 3 == 0:\r\n print(\"YES\")\r\n else:\r\n print(\"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n mod = 10007\r\n t = [0, 0, 1]\r\n for _ in range(1001001):\r\n t.append(t[-1] + t[-2] + t[-3])\r\n t[-1] %= mod\r\n n = int(sys.stdin.readline().rstrip())\r\n print(t[n - 1])\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n cnt = [0, 0, 0]\r\n if m == 1:\r\n cnt = [-1, -1, -1]\r\n else:\r\n if m & 1:\r\n m -= 3\r\n cnt[1] += 1\r\n n -= 1\r\n cnt[2] = m // 2 - n\r\n cnt[0] = n - cnt[2]\r\n if cnt[0] < 0 or cnt[1] < 0 or cnt[2] < 0:\r\n print(-1, -1, -1)\r\n else:\r\n print(*cnt, sep=\" \")\r\n\r\n @staticmethod\r\n def d():\r\n n, *c = map(int, sys.stdin.read().split())\r\n lis = [inf] * n\r\n for x in c:\r\n lis[bi_l(lis, x)] = x\r\n print(n - bi_l(lis, inf))\r\n\r\n class ABC007:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n - 1)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n if s == \"a\":\r\n print(-1)\r\n else:\r\n print(\"a\")\r\n\r\n @staticmethod\r\n def c():\r\n r, c = map(int, sys.stdin.readline().split())\r\n sy, sx = map(int, sys.stdin.readline().split())\r\n gy, gx = map(int, sys.stdin.readline().split())\r\n sy -= 1\r\n sx -= 1\r\n gy -= 1\r\n gx -= 1\r\n maze = [sys.stdin.readline().rstrip() for _ in range(r)]\r\n queue = deque([(sy, sx)])\r\n dist = np.full((r, c), np.inf)\r\n dist[sy, sx] = 0\r\n while queue:\r\n y, x = queue.popleft()\r\n for i, j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\r\n i += y\r\n j += x\r\n if maze[i][j] == \"#\" or dist[i, j] != np.inf:\r\n continue\r\n dist[i, j] = dist[y, x] + 1\r\n queue.append((i, j))\r\n print(int(dist[gy, gx]))\r\n\r\n @staticmethod\r\n def d():\r\n ng = set([4, 9])\r\n\r\n def count(d):\r\n return d if d <= 4 else d - 1\r\n\r\n def f(n):\r\n x = [int(d) for d in str(n)]\r\n flg = True\r\n dp = 0\r\n for d in x:\r\n dp = dp * 8 + flg * count(d)\r\n if d in ng:\r\n flg = False\r\n return n - (dp + flg)\r\n\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(f(b) - f(a - 1))\r\n\r\n class ABC008:\r\n @staticmethod\r\n def a():\r\n s, t = map(int, sys.stdin.readline().split())\r\n print(t - s + 1)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n res = defaultdict(int)\r\n for name in s:\r\n res[name] += 1\r\n print(sorted(res.items(), key=lambda x: x[1])[-1][0])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n c = n - np.count_nonzero(a[:, None] % a, axis=1)\r\n print(np.sum((c + 1) // 2 / c))\r\n\r\n @staticmethod\r\n def d():\r\n w, h, n, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*([iter(xy)] * 2))\r\n\r\n @lru_cache(maxsize=None)\r\n def count(x1, y1, x2, y2):\r\n res = 0\r\n for x, y in xy:\r\n if not (x1 <= x <= x2 and y1 <= y <= y2):\r\n continue\r\n cnt = (x2 - x1) + (y2 - y1) + 1\r\n cnt += count(x1, y1, x - 1, y - 1)\r\n cnt += count(x1, y + 1, x - 1, y2)\r\n cnt += count(x + 1, y1, x2, y - 1)\r\n cnt += count(x + 1, y + 1, x2, y2)\r\n res = max(res, cnt)\r\n return res\r\n\r\n print(count(1, 1, w, h))\r\n\r\n class ABC009:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((n + 1) // 2)\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n print(sorted(set(a))[-2])\r\n\r\n @staticmethod\r\n def c():\r\n n, k = map(int, sys.stdin.readline().split())\r\n s = list(sys.stdin.readline().rstrip())\r\n cost = [1] * n\r\n r = k\r\n for i in range(n - 1):\r\n q = []\r\n for j in range(i + 1, n):\r\n if s[j] < s[i] and cost[i] + cost[j] <= r:\r\n heappush(q, (s[j], cost[i] + cost[j], -j))\r\n if not q:\r\n continue\r\n _, c, j = heappop(q)\r\n j = -j\r\n s[i], s[j] = s[j], s[i]\r\n r -= c\r\n cost[i] = cost[j] = 0\r\n print(\"\".join(s))\r\n\r\n @staticmethod\r\n def d():\r\n k, m = map(int, sys.stdin.readline().split())\r\n a = np.array([int(x) for x in sys.stdin.readline().split()])[::-1]\r\n\r\n c = [int(x) for x in sys.stdin.readline().split()]\r\n mask = (1 << 32) - 1\r\n d = np.eye(k, k, -1, dtype=np.uint32) * mask\r\n d[0] = c\r\n\r\n def bitwise_dot(a, b):\r\n return np.bitwise_xor.reduce(\r\n a[:, :, None] & b[None, :, :], axis=1\r\n )\r\n\r\n def bitwise_mat_pow(a, n):\r\n if n == 0:\r\n return np.eye(k, dtype=np.uint32) * mask\r\n res = bitwise_mat_pow(a, n // 2)\r\n res = bitwise_dot(res, res)\r\n return bitwise_dot(res, a) if n & 1 else res\r\n\r\n res = bitwise_dot(bitwise_mat_pow(d, m - k), a.reshape(-1, 1))\r\n print(res[0].item())\r\n\r\n class ABC010:\r\n @staticmethod\r\n def a():\r\n print(sys.stdin.readline().rstrip() + \"pp\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n tot = 0\r\n for x in a:\r\n c = 0\r\n while x % 2 == 0 or x % 3 == 2:\r\n x -= 1\r\n c += 1\r\n tot += c\r\n print(tot)\r\n\r\n @staticmethod\r\n def c():\r\n sx, sy, gx, gy, t, v, n, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(-1, 2).T\r\n\r\n def dist(x1, y1, x2, y2):\r\n return np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\r\n\r\n ans = (\r\n \"YES\"\r\n if (dist(sx, sy, x, y) + dist(x, y, gx, gy) <= v * t).any()\r\n else \"NO\"\r\n )\r\n print(ans)\r\n\r\n class ABC011:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n % 12 + 1)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(s[0].upper() + s[1:].lower())\r\n\r\n @staticmethod\r\n def c():\r\n n, *ng = map(int, sys.stdin.read().split())\r\n ng = set(ng)\r\n if n in ng:\r\n print(\"NO\")\r\n else:\r\n r = 100\r\n while n > 0:\r\n if r == 0:\r\n print(\"NO\")\r\n return\r\n for i in range(3, 0, -1):\r\n if (n - i) in ng:\r\n continue\r\n n -= i\r\n r -= 1\r\n break\r\n else:\r\n print(\"NO\")\r\n return\r\n print(\"YES\")\r\n\r\n class ABC032:\r\n @staticmethod\r\n def a():\r\n a, b, n = map(int, sys.stdin.read().split())\r\n l = NumberTheory().lcm(a, b)\r\n print((n + l - 1) // l * l)\r\n\r\n @staticmethod\r\n def b():\r\n s, k = sys.stdin.read().split()\r\n k = int(k)\r\n res = set()\r\n for i in range(len(s) - k + 1):\r\n res.add(s[i : i + k])\r\n print(len(res))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *s = map(int, sys.stdin.read().split())\r\n if 0 in s:\r\n print(n)\r\n return\r\n s += [inf]\r\n res = 0\r\n l = r = 0\r\n tmp = 1\r\n while r <= n:\r\n tmp *= s[r]\r\n while tmp > k:\r\n res = max(res, r - l)\r\n tmp //= s[l]\r\n l += 1\r\n r += 1\r\n print(res)\r\n\r\n class ABC033:\r\n @staticmethod\r\n def a():\r\n n = set(sys.stdin.readline().rstrip())\r\n print(\"SAME\" if len(n) == 1 else \"DIFFERENT\")\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = dict()\r\n for _ in range(n):\r\n s, p = sys.stdin.readline().split()\r\n p = int(p)\r\n res[s] = p\r\n tot = sum(res.values())\r\n for s, p in res.items():\r\n if p > tot / 2:\r\n print(s)\r\n return\r\n print(\"atcoder\")\r\n\r\n @staticmethod\r\n def c():\r\n s = sys.stdin.readline().rstrip()\r\n res = sum(not \"0\" in f for f in s.split(\"+\"))\r\n print(res)\r\n\r\n class ABC034:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Better\" if y > x else \"Worse\")\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n + 1 if n & 1 else n - 1)\r\n\r\n @staticmethod\r\n def c():\r\n h, w = map(int, sys.stdin.read().split())\r\n combinatorics = Combinatorics(n=2 * 10**5, numpy=True, mod=MOD)\r\n print(combinatorics.mod_choose(h + w - 2, h - 1))\r\n\r\n @staticmethod\r\n def d():\r\n n, k, *wp = map(int, sys.stdin.read().split())\r\n w, p = np.array(wp).reshape(-1, 2).T\r\n\r\n def f(x):\r\n return np.sort(w * (p - x))[-k:].sum()\r\n\r\n print(optimize.bisect(f, 0, 100))\r\n\r\n class ABC035:\r\n @staticmethod\r\n def a():\r\n w, h = map(int, sys.stdin.readline().split())\r\n print(\"4:3\" if 4 * h == 3 * w else \"16:9\")\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n y = 0\r\n x = 0\r\n z = 0\r\n for c in s:\r\n if c == \"?\":\r\n z += 1\r\n elif c == \"L\":\r\n x -= 1\r\n elif c == \"R\":\r\n x += 1\r\n elif c == \"D\":\r\n y -= 1\r\n elif c == \"U\":\r\n y += 1\r\n d = abs(y) + abs(x)\r\n if t == \"1\":\r\n print(d + z)\r\n else:\r\n print(max(d - z, (d - z) & 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, q, *lr = map(int, sys.stdin.read().split())\r\n l, r = np.array(lr).reshape(q, 2).T\r\n res = np.zeros(n + 1, dtype=int)\r\n np.add.at(res, l - 1, 1)\r\n np.subtract.at(res, r, 1)\r\n np.cumsum(res, out=res)\r\n res = res & 1\r\n print(\"\".join(map(str, res[:-1])))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, t = map(int, sys.stdin.readline().split())\r\n point = np.array(sys.stdin.readline().split(), dtype=int)\r\n a, b, c = (\r\n np.array(sys.stdin.read().split(), dtype=np.int64)\r\n .reshape(m, 3)\r\n .T\r\n )\r\n a -= 1\r\n b -= 1\r\n d_1 = shortest_path(\r\n csr_matrix((c, (a, b)), (n, n)),\r\n method=\"D\",\r\n directed=True,\r\n indices=0,\r\n )\r\n d_2 = shortest_path(\r\n csr_matrix((c, (b, a)), (n, n)),\r\n method=\"D\",\r\n directed=True,\r\n indices=0,\r\n )\r\n print(int(np.amax((t - (d_1 + d_2)) * point)))\r\n\r\n class ABC036:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print((b + a - 1) // a)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n n = int(n)\r\n for j in range(n):\r\n row = \"\"\r\n for i in range(n - 1, -1, -1):\r\n row += s[i][j]\r\n print(row)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n b = [None] * n\r\n prev = None\r\n j = -1\r\n for i, x in sorted(enumerate(a), key=lambda x: x[1]):\r\n if x != prev:\r\n j += 1\r\n b[i] = j\r\n prev = x\r\n print(*b, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n edges = [[] for _ in range(n)]\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n edges[a].append(b)\r\n edges[b].append(a)\r\n parent = [None] * n\r\n\r\n def count(u):\r\n black, white = 1, 1\r\n for v in edges[u]:\r\n if v == parent[u]:\r\n continue\r\n parent[v] = u\r\n b, w = count(v)\r\n black *= w\r\n black %= MOD\r\n white *= (b + w) % MOD\r\n white %= MOD\r\n return black, white\r\n\r\n print(sum(count(0)) % MOD)\r\n\r\n class ABC037:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(c // min(a, b))\r\n\r\n @staticmethod\r\n def b():\r\n n, q, *lrt = map(int, sys.stdin.read().split())\r\n a = np.zeros(n, dtype=int)\r\n for l, r, t in zip(*[iter(lrt)] * 3):\r\n a[l - 1 : r] = t\r\n print(*a, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = np.array([0] + a)\r\n np.cumsum(a, out=a)\r\n s = (a[k:] - a[:-k]).sum()\r\n print(s)\r\n\r\n @staticmethod\r\n def d():\r\n h, w = map(int, sys.stdin.readline().split())\r\n a = [\r\n [int(x) for x in sys.stdin.readline().split()]\r\n for _ in range(h)\r\n ]\r\n dyx = [(-1, 0), (0, -1), (1, 0), (0, 1)]\r\n path = [[None] * w for _ in range(h)]\r\n\r\n def paths(i, j):\r\n if path[i][j]:\r\n return path[i][j]\r\n val = a[i][j]\r\n cnt = 1\r\n for dy, dx in dyx:\r\n y = i + dy\r\n x = j + dx\r\n if 0 <= y < h and 0 <= x < w and a[y][x] < val:\r\n cnt += paths(y, x)\r\n cnt %= MOD\r\n path[i][j] = cnt\r\n return cnt\r\n\r\n tot = 0\r\n for i in range(h):\r\n for j in range(w):\r\n tot += paths(i, j)\r\n tot %= MOD\r\n print(tot)\r\n\r\n class ABC038:\r\n @staticmethod\r\n def a():\r\n s = sys.stdin.readline().rstrip()\r\n print(\"YES\" if s[-1] == \"T\" else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c, d = map(int, sys.stdin.read().split())\r\n print(\"YES\" if a == c or b == c or a == d or b == d else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n cnt = n\r\n tmp = 1\r\n for i in range(n):\r\n if a[i + 1] > a[i]:\r\n tmp += 1\r\n else:\r\n cnt += tmp * (tmp - 1) // 2\r\n tmp = 1\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, *wh = map(int, sys.stdin.read().split())\r\n wh = sorted(zip(*[iter(wh)] * 2), key=lambda x: (-x[0], x[1]))\r\n w = [x[1] for x in wh][::-1]\r\n res = [inf] * n\r\n for x in w:\r\n res[bi_l(res, x)] = x\r\n print(bi_l(res, inf))\r\n\r\n class ABC039:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print((a * b + b * c + c * a) * 2)\r\n\r\n @staticmethod\r\n def b():\r\n x = int(sys.stdin.readline().rstrip())\r\n for n in range(1, int(x**0.5) + 1):\r\n if pow(n, 4) == x:\r\n print(n)\r\n return\r\n\r\n @staticmethod\r\n def c():\r\n board = \"WBWBWWBWBWBW\" * 3\r\n convert = \"Do, *, Re, *, Mi, Fa, *, So, *, La, *, Si\".split(\", \")\r\n s = sys.stdin.readline().rstrip()\r\n print(convert[board.index(s)])\r\n\r\n @staticmethod\r\n def d():\r\n h, w = map(int, sys.stdin.readline().split())\r\n s = sys.stdin.read().split()\r\n dyx = list(product((-1, 0, 1), repeat=2))\r\n black_certain = set()\r\n black_before = set()\r\n for i in range(h):\r\n for j in range(w):\r\n black_cand = set()\r\n for dy, dx in dyx:\r\n y = i + dy\r\n x = j + dx\r\n if y < 0 or y >= h or x < 0 or x >= w:\r\n continue\r\n if s[y][x] == \".\":\r\n break\r\n black_cand.add((y, x))\r\n else:\r\n black_before.add((i, j))\r\n black_certain |= black_cand\r\n for i in range(h):\r\n for j in range(w):\r\n if s[i][j] == \"#\" and not (i, j) in black_certain:\r\n print(\"impossible\")\r\n return\r\n print(\"possible\")\r\n for i in range(h):\r\n row = \"\"\r\n for j in range(w):\r\n row += \"#\" if (i, j) in black_before else \".\"\r\n print(\"\".join(row))\r\n\r\n class ABC040:\r\n @staticmethod\r\n def a():\r\n n, x = map(int, sys.stdin.readline().split())\r\n print(min(x - 1, n - x))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = inf\r\n for i in range(1, int(n**0.5) + 1):\r\n res = min(res, n // i - i + n % i)\r\n print(res)\r\n\r\n @staticmethod\r\n def c():\r\n n, *h = map(int, sys.stdin.read().split())\r\n h = [h[0]] + h\r\n cost = [None] * (n + 1)\r\n cost[0] = cost[1] = 0\r\n for i in range(2, n + 1):\r\n cost[i] = min(\r\n cost[i - 2] + abs(h[i] - h[i - 2]),\r\n cost[i - 1] + abs(h[i] - h[i - 1]),\r\n )\r\n print(cost[n])\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n uf = UnionFind(n=n)\r\n queue = []\r\n for _ in range(m):\r\n a, b, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2 * y), a - 1, b - 1))\r\n q = int(sys.stdin.readline().rstrip())\r\n for i in range(q):\r\n v, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2 * y + 1), v - 1, i))\r\n res = [None] * q\r\n while queue:\r\n y, i, j = heappop(queue)\r\n if y & 1:\r\n res[j] = uf.size[uf.find_root(i)]\r\n else:\r\n uf.unite(i, j)\r\n print(*res, sep=\"\\n\")\r\n\r\n class ABC041:\r\n @staticmethod\r\n def a():\r\n s, i = sys.stdin.read().split()\r\n i = int(i)\r\n print(s[i - 1])\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n ans = a * b % MOD * c % MOD\r\n print(ans)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n for i, h in sorted(enumerate(a), key=lambda x: -x[1]):\r\n print(i + 1)\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*[iter(xy)] * 2)\r\n edges = [0] * n\r\n for x, y in xy:\r\n x -= 1\r\n y -= 1\r\n edges[x] |= 1 << y\r\n comb = [None] * (1 << n)\r\n comb[0] = 1\r\n\r\n def count(edges, bit):\r\n if comb[bit] is not None:\r\n return comb[bit]\r\n comb[bit] = 0\r\n for i in range(n):\r\n if (bit >> i) & 1 and not edges[i]:\r\n nxt_bit = bit & ~(1 << i)\r\n nxt_edges = edges.copy()\r\n for j in range(n):\r\n nxt_edges[j] &= ~(1 << i)\r\n cnt = count(nxt_edges, nxt_bit)\r\n comb[bit] += cnt\r\n return comb[bit]\r\n\r\n print(count(edges, (1 << n) - 1))\r\n\r\n class ABC042:\r\n @staticmethod\r\n def a():\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n c = Counter(a)\r\n print(\"YES\" if c[5] == 2 and c[7] == 1 else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n n, l, *s = sys.stdin.read().split()\r\n print(\"\".join(sorted(s)))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *d = sys.stdin.read().split()\r\n l = len(n)\r\n ok = sorted(set(string.digits) - set(d))\r\n cand = [int(\"\".join(p)) for p in product(ok, repeat=l)] + [\r\n int(min(x for x in ok if x > \"0\") + min(ok) * l)\r\n ]\r\n print(cand[bi_l(cand, int(n))])\r\n\r\n @staticmethod\r\n def d():\r\n h, w, a, b = map(int, sys.stdin.read().split())\r\n combinatorics = Combinatorics(n=2 * 10**5, mod=MOD, numpy=True)\r\n tot = combinatorics.mod_choose(h + w - 2, h - 1)\r\n i = np.arange(h - a, h)\r\n ng = np.sum(\r\n combinatorics.mod_choose(i + b - 1, i)\r\n * combinatorics.mod_choose(h - i + w - b - 2, h - 1 - i)\r\n % MOD\r\n )\r\n tot -= ng\r\n tot %= MOD\r\n print(tot)\r\n\r\n class ABC043:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((1 + n) * n // 2)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n t = \"\"\r\n for c in s:\r\n if c == \"B\":\r\n t = t[:-1]\r\n else:\r\n t += c\r\n print(t)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n x = np.around(a.sum() / n).astype(int)\r\n print(np.sum((a - x) ** 2))\r\n\r\n @staticmethod\r\n def d():\r\n s = sys.stdin.readline().rstrip()\r\n n = len(s)\r\n for i in range(n - 1):\r\n if s[i] == s[i + 1]:\r\n print(i + 1, i + 2)\r\n return\r\n for i in range(n - 2):\r\n if s[i] == s[i + 2]:\r\n print(i + 1, i + 3)\r\n return\r\n print(-1, -1)\r\n\r\n class ABC170:\r\n @staticmethod\r\n def a():\r\n x = [int(x) for x in sys.stdin.readline().split()]\r\n for i in range(5):\r\n if x[i] != i + 1:\r\n print(i + 1)\r\n break\r\n\r\n @staticmethod\r\n def b():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Yes\" if 2 * x <= y <= 4 * x and y % 2 == 0 else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n x, n, *p = map(int, sys.stdin.read().split())\r\n a = list(set(range(102)) - set(p))\r\n a = [(abs(y - x), y) for y in a]\r\n print(sorted(a)[0][1])\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n cand = set(a)\r\n cnt = 0\r\n for x, c in sorted(Counter(a).items()):\r\n cnt += c == 1 and x in cand\r\n cand -= set(range(x * 2, 10**6 + 1, x))\r\n print(cnt)\r\n\r\n @staticmethod\r\n def e():\r\n n, q = map(int, sys.stdin.readline().split())\r\n queue = []\r\n m = 2 * 10**5\r\n infants = [[] for _ in range(m)]\r\n highest_rate = [None] * m\r\n where = [None] * n\r\n rate = [None] * n\r\n\r\n def entry(i, k):\r\n where[i] = k\r\n while infants[k]:\r\n r, j = heappop(infants[k])\r\n if where[j] != k or j == i:\r\n continue\r\n if rate[i] >= -r:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (r, j))\r\n break\r\n else:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (-rate[i], i))\r\n\r\n def transfer(i, k):\r\n now = where[i]\r\n while infants[now]:\r\n r, j = heappop(infants[now])\r\n if where[j] != now or j == i:\r\n continue\r\n if highest_rate[now] != -r:\r\n highest_rate[now] = -r\r\n heappush(queue, (-r, now, j))\r\n heappush(infants[now], (r, j))\r\n break\r\n else:\r\n highest_rate[now] = None\r\n entry(i, k)\r\n\r\n def inquire():\r\n while True:\r\n r, k, i = heappop(queue)\r\n if where[i] != k or r != highest_rate[k]:\r\n continue\r\n heappush(queue, (r, k, i))\r\n return r\r\n\r\n for i in range(n):\r\n a, b = map(int, sys.stdin.readline().split())\r\n rate[i] = a\r\n entry(i, b - 1)\r\n for _ in range(q):\r\n c, d = map(int, sys.stdin.readline().split())\r\n transfer(c - 1, d - 1)\r\n print(inquire())\r\n\r\n class ABC171:\r\n @staticmethod\r\n def a():\r\n c = sys.stdin.readline().rstrip()\r\n print(\"A\" if c < \"a\" else \"a\")\r\n\r\n @staticmethod\r\n def b():\r\n n, k, *p = map(int, sys.stdin.read().split())\r\n print(sum(sorted(p)[:k]))\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n n -= 1\r\n l = 1\r\n while True:\r\n if n < pow(26, l):\r\n break\r\n n -= pow(26, l)\r\n l += 1\r\n res = \"\".join(\r\n [chr(ord(\"a\") + d) for d in NumberTheory.base_convert(n, 26)][\r\n ::-1\r\n ]\r\n )\r\n res = \"a\" * (l - len(res)) + res\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n s = sum(a)\r\n cnt = Counter(a)\r\n q = int(sys.stdin.readline().rstrip())\r\n for _ in range(q):\r\n b, c = map(int, sys.stdin.readline().split())\r\n s += (c - b) * cnt[b]\r\n print(s)\r\n cnt[c] += cnt[b]\r\n cnt[b] = 0\r\n\r\n @staticmethod\r\n def e():\r\n n, *a = map(int, sys.stdin.read().split())\r\n s = 0\r\n for x in a:\r\n s ^= x\r\n b = map(lambda x: x ^ s, a)\r\n print(*b, sep=\" \")\r\n\r\n class ABC172:\r\n @staticmethod\r\n def a():\r\n a = int(sys.stdin.readline().rstrip())\r\n print(a * (1 + a + a**2))\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n print(sum(s[i] != t[i] for i in range(len(s))))\r\n\r\n @staticmethod\r\n def c():\r\n n, m, k = map(int, sys.stdin.readline().split())\r\n a = [0] + [int(x) for x in sys.stdin.readline().split()]\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n (*sa,) = accumulate(a)\r\n (*sb,) = accumulate(b)\r\n res = 0\r\n for i in range(n + 1):\r\n r = k - sa[i]\r\n if r < 0:\r\n break\r\n res = max(res, i + bi_r(sb, r))\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n f = np.zeros(n + 1, dtype=np.int64)\r\n for i in range(1, n + 1):\r\n f[i::i] += 1\r\n print((np.arange(1, n + 1) * f[1:]).sum())\r\n\r\n class ABC173:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n charge = (n + 999) // 1000 * 1000 - n\r\n print(charge)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n c = Counter(s)\r\n for v in \"AC, WA, TLE, RE\".split(\", \"):\r\n print(f\"{v} x {c[v]}\")\r\n\r\n @staticmethod\r\n def c():\r\n h, w, k = map(int, sys.stdin.readline().split())\r\n c = [sys.stdin.readline().rstrip() for _ in range(h)]\r\n tot = 0\r\n for i in range(1 << h):\r\n for j in range(1 << w):\r\n cnt = 0\r\n for y in range(h):\r\n for x in range(w):\r\n if i >> y & 1 or j >> x & 1:\r\n continue\r\n cnt += c[y][x] == \"#\"\r\n tot += cnt == k\r\n print(tot)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a.sort(reverse=True)\r\n res = (\r\n a[0]\r\n + sum(a[1 : 1 + (n - 2) // 2]) * 2\r\n + a[1 + (n - 2) // 2] * (n & 1)\r\n )\r\n print(res)\r\n\r\n @staticmethod\r\n def e():\r\n MOD = 10**9 + 7\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n minus = [x for x in a if x < 0]\r\n plus = [x for x in a if x > 0]\r\n if len(plus) + len(minus) // 2 * 2 >= k: # plus\r\n (*minus,) = map(abs, minus)\r\n minus.sort(reverse=True)\r\n plus.sort(reverse=True)\r\n cand = []\r\n if len(minus) & 1:\r\n minus = minus[:-1]\r\n for i in range(0, len(minus) - 1, 2):\r\n cand.append(minus[i] * minus[i + 1] % MOD)\r\n if k & 1:\r\n res = plus[0]\r\n plus = plus[1:]\r\n else:\r\n res = 1\r\n if len(plus) & 1:\r\n plus = plus[:-1]\r\n for i in range(0, len(plus) - 1, 2):\r\n cand.append(plus[i] * plus[i + 1] % MOD)\r\n cand.sort(reverse=True)\r\n for x in cand[: k // 2]:\r\n res *= x\r\n res %= MOD\r\n print(res)\r\n elif 0 in a:\r\n print(0)\r\n else:\r\n cand = sorted(map(abs, a))\r\n res = 1\r\n for i in range(k):\r\n res *= cand[i]\r\n res %= MOD\r\n res = MOD - res\r\n print(res)\r\n pass\r\n\r\n class ABC174:\r\n @staticmethod\r\n def a():\r\n print(\"Yes\" if int(sys.stdin.readline().rstrip()) >= 30 else \"No\")\r\n\r\n class MSolutions2020:\r\n @staticmethod\r\n def a():\r\n x = int(sys.stdin.readline().rstrip())\r\n x -= 400\r\n print(8 - x // 200)\r\n\r\n @staticmethod\r\n def b():\r\n r, g, b, k = map(int, sys.stdin.read().split())\r\n while k and g <= r:\r\n g *= 2\r\n k -= 1\r\n while k and b <= g:\r\n b *= 2\r\n k -= 1\r\n print(\"Yes\" if r < g < b else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n for i in range(k, n):\r\n print(\"Yes\" if a[i] > a[i - k] else \"No\")\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n m = 1000\r\n s = 0\r\n for i in range(n):\r\n if a[i + 1] == a[i]:\r\n continue\r\n elif a[i + 1] > a[i]:\r\n cnt = m // a[i]\r\n m -= a[i] * cnt\r\n s += cnt\r\n else:\r\n m += a[i] * s\r\n s = 0\r\n print(m)\r\n\r\n\r\nclass Codeforces:\r\n pass\r\n\r\n\r\nclass ProjectEuler:\r\n @staticmethod\r\n def p1():\r\n def f(n, x):\r\n return (x + n // x * x) * (n // x) // 2\r\n\r\n n = 1000\r\n ans = f(n - 1, 3) + f(n - 1, 5) - f(n - 1, 15)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p2():\r\n fib = [1, 2]\r\n while fib[-1] < 4 * 10**6:\r\n fib.append(fib[-1] + fib[-2])\r\n print(sum(fib[1:-1:3]))\r\n\r\n @staticmethod\r\n def p3():\r\n number_theory = NumberTheory()\r\n res = number_theory.prime_factorize(600851475143)\r\n print(max(res.keys()))\r\n\r\n @staticmethod\r\n def p4():\r\n def is_palindrome(n):\r\n n = str(n)\r\n return n == n[::-1]\r\n\r\n cand = []\r\n for a in range(100, 1000):\r\n for b in range(a, 1000):\r\n n = a * b\r\n if is_palindrome(n):\r\n cand.append(n)\r\n print(max(cand))\r\n\r\n @staticmethod\r\n def p5():\r\n number_theory = NumberTheory()\r\n res = defaultdict(int)\r\n for i in range(1, 21):\r\n for p, c in number_theory.prime_factorize(i).items():\r\n res[p] = max(res[p], c)\r\n ans = 1\r\n for p, c in res.items():\r\n ans *= pow(p, c)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p6():\r\n a = np.arange(101)\r\n b = np.cumsum(a**2)\r\n a = a.cumsum()\r\n print(a[100] ** 2 - b[100])\r\n\r\n @staticmethod\r\n def p7():\r\n number_theory = NumberTheory()\r\n print(sorted(number_theory.prime_numbers)[10000])\r\n\r\n @staticmethod\r\n def p8():\r\n n = \"7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450\"\r\n n = [int(d) for d in list(n)]\r\n res = 0\r\n for i in range(988):\r\n x = 1\r\n for j in range(13):\r\n x *= n[i + j]\r\n res = max(res, x)\r\n print(res)\r\n\r\n @staticmethod\r\n def p9():\r\n for a in range(1, 997):\r\n for b in range(a, 998 - a):\r\n c = 1000 - a - b\r\n if a**2 + b**2 == c**2:\r\n print(a * b * c)\r\n return\r\n\r\n @staticmethod\r\n def p10():\r\n number_theory = NumberTheory(2 * 10**6 - 1)\r\n print(sum(number_theory.prime_numbers))\r\n\r\n @staticmethod\r\n def p11():\r\n grid = \"08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48\"\r\n # grid = np.array(grid.split(), dtype=np.int64).reshape(20, -1)\r\n # cand = []\r\n # for i in range(20):\r\n # bl1 = i+3 < 20\r\n # for j in range(20):\r\n # bl2 = j+3 < 20\r\n # if bl1:\r\n # np.prod\r\n # tmp = 1\r\n # for d in range(4):\r\n # tmp *= grid[i+d, j]\r\n print(grid)\r\n\r\n pass\r\n\r\n\r\nclass Yukicoder:\r\n pass\r\n\r\n\r\nif __name__ == \"__main__\":\r\n AtCoder.ABC009.d()\r\n pass\r\n", "import sys\r\n\r\nimport numpy as np\r\n\r\nn = int(sys.stdin.readline().rstrip())\r\ngraph = [None] * n\r\nfor i in range(n):\r\n a = int(sys.stdin.readline().rstrip())\r\n if not a: continue\r\n graph[i] = np.array([sys.stdin.readline().split() for _ in range(a)], dtype=np.int16).T\r\n\r\ndef main():\r\n comb = (np.arange(1 << n)[:, None] >> np.arange(n) & 1).astype(np.bool)\r\n ok = np.full(1 << n, True, dtype=np.bool)\r\n for i in range(n):\r\n if graph[i] is None: continue\r\n x, y = graph[i]\r\n ok &= ~comb[:, i] | np.all(comb[:, x-1] == y.astype(np.bool), axis=1)\r\n\r\n print(np.amax(comb[ok].sum(axis=1)))\r\n\r\nif __name__ == '__main__':\r\n main()\n", "import sys\r\nimport typing\r\n\r\nimport numba as nb\r\nimport numpy as np\r\n\r\n\r\[email protected]\r\ndef pow(mod: int, x: int, n: int) -> int:\r\n y = 1\r\n while n:\r\n if n & 1: y = y * x % mod\r\n x = x * x % mod\r\n n >>= 1\r\n return y\r\n\r\n\r\[email protected]((nb.i8[:], ), cache=True)\r\ndef solve(a: np.ndarray) -> typing.NoReturn:\r\n n = len(a)\r\n b = np.bincount(a)\r\n if b[0] != 1 or a[0] != 0:\r\n print(0)\r\n return\r\n mod = 10 ** 9 + 7\r\n k = 1 << 17\r\n pow2 = np.ones(n, np.int64)\r\n for i in range(n - 1): pow2[i + 1] = pow2[i] * 2 % mod\r\n m = len(b)\r\n c = np.zeros(m, np.int64)\r\n c[0] = 1\r\n def choose2(n):\r\n return n * (n - 1) // 2\r\n\r\n for i in range(m - 1):\r\n c[i + 1] = c[i] * pow(mod, (pow2[b[i]] - 1) % mod, b[i + 1]) % mod * pow(mod, 2, choose2(b[i + 1])) % mod\r\n print(c[-1])\r\n\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n n = int(input())\r\n a = np.array(sys.stdin.readline().split(), dtype=np.int64)\r\n solve(a)\r\n\r\n\r\nmain()\n", "import sys\r\nimport typing\r\n\r\nimport numpy as np\r\n\r\n\r\ndef sa_doubling(\r\n a: np.array,\r\n) -> np.array:\r\n n = a.size\r\n a = np.searchsorted(\r\n np.unique(a),\r\n a,\r\n )\r\n cnt = np.zeros(n + 1, dtype=np.int32)\r\n\r\n def count_sort(a):\r\n for x in a: cnt[x + 1] += 1\r\n for i in range(n): cnt[i + 1] += cnt[i]\r\n idx = np.empty(n, dtype=np.int32)\r\n for i in range(n):\r\n x = a[i]\r\n idx[cnt[x]] = i\r\n cnt[x] += 1\r\n cnt[:] = 0\r\n return idx\r\n\r\n k = 1\r\n rank = a\r\n while 1:\r\n b = np.zeros(n, dtype=np.int64)\r\n for i in range(n - k):\r\n b[i] = rank[i + k] + 1\r\n ord_b = count_sort(b)\r\n a = rank[ord_b]\r\n ord_a = count_sort(a)\r\n sa = ord_b[ord_a]\r\n c = a[ord_a] << 30 | b[sa]\r\n rank = np.empty(n, dtype=np.int64)\r\n rank[sa[0]] = 0\r\n for i in range(n - 1):\r\n rank[sa[i + 1]] = rank[sa[i]] + (c[i + 1] > c[i])\r\n k *= 2\r\n if k >= n: break\r\n return sa\r\n\r\n\r\n\r\ndef kasai(\r\n a: np.array,\r\n sa: np.array,\r\n) -> np.array:\r\n n = a.size\r\n assert n > 0 and sa.size == n\r\n\r\n rank = np.empty(n, np.int32)\r\n rank[sa] = np.arange(n)\r\n h, l = np.empty(n - 1, np.int32), 0\r\n for i in range(n):\r\n if l: l -= 1\r\n r = rank[i]\r\n if r == n - 1: continue\r\n j = sa[r + 1]\r\n while i + l < n and j + l < n:\r\n if a[i + l] != a[j + l]: break\r\n l += 1\r\n h[r] = l\r\n return h\r\n\r\n\r\n\r\ndef solve(\r\n a: np.array,\r\n) -> typing.NoReturn:\r\n n = a.size\r\n sa = sa_doubling(a)\r\n lcp = kasai(a, sa)\r\n\r\n a = np.arange(n, 0, -1)\r\n for _ in range(2):\r\n st = []\r\n s = 0\r\n for i in range(n - 1):\r\n h = lcp[i]\r\n l = 1\r\n while st and st[-1][0] >= h:\r\n x = st.pop()\r\n l += x[1]\r\n s -= x[0] * x[1]\r\n s += h * l\r\n st.append((h, l))\r\n a[sa[i + 1]] += s\r\n sa = sa[::-1]\r\n lcp = lcp[::-1]\r\n\r\n for i in range(n):\r\n print(a[i])\r\n\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n n = int(sys.stdin.buffer.readline().rstrip())\r\n s = np.frombuffer(\r\n sys.stdin.buffer.readline().rstrip(),\r\n dtype='b',\r\n ).astype(np.int64)\r\n solve(s)\r\n\r\n\r\n\r\nif sys.argv[-1] == 'ONLINE_JUDGE':\r\n import numba as nb\r\n from numba.pycc import CC\r\n cc = CC('my_module')\r\n sa_doubling = nb.njit(sa_doubling)\r\n kasai = nb.njit(kasai)\r\n fn = solve\r\n sig = (nb.i8[:], )\r\n cc.export(\r\n fn.__name__,\r\n sig,\r\n )(fn)\r\n cc.compile()\r\n exit(0)\r\n\r\n\r\nfrom my_module import solve\r\n\r\nmain()\r\n", "import sys\r\nimport typing\r\n\r\nimport numba as nb\r\nimport numpy as np\r\n\r\n\r\n# dense graph $O(V^2)$\r\[email protected]\r\ndef maximum_flow_dinic(\r\n g: np.ndarray,\r\n src: int,\r\n sink: int,\r\n) -> int:\r\n n = len(g)\r\n inf = 1 << 60\r\n level = np.full(n, -1, np.int32)\r\n\r\n def _update_level():\r\n level[:] = -1\r\n level[src] = 0\r\n fifo_que = [src]\r\n for u in fifo_que:\r\n for v in range(n):\r\n if level[v] != -1 or g[u, v] <= 0:\r\n continue\r\n level[v] = level[u] + 1\r\n fifo_que.append(v)\r\n\r\n flow_in = np.zeros(n + 1, np.int64)\r\n flow_out = np.zeros(n + 1, np.int64)\r\n prev = np.full(n, -1, np.int64)\r\n\r\n def _compute_flow():\r\n flow_in[:] = 0\r\n flow_in[src] = inf\r\n flow_out[:] = 0\r\n prev[:] = -1\r\n st = [src]\r\n while st:\r\n u = st.pop()\r\n if u < 0:\r\n u = -u - 1\r\n if u == src:\r\n return flow_out[src]\r\n p = prev[u]\r\n f = flow_out[u]\r\n flow_out[p] += f\r\n flow_in[p] -= f\r\n g[p, u] -= f\r\n g[u, p] += f\r\n flow_in[u] = flow_out[u] = 0\r\n continue\r\n st.append(-u - 1)\r\n p = prev[u]\r\n if u != src:\r\n flow_in[u] = min(flow_in[p], g[p, u])\r\n if u == sink:\r\n flow_out[u] = flow_in[u]\r\n continue\r\n if flow_in[u] == 0:\r\n continue\r\n for v in range(n):\r\n if g[u, v] == 0 or level[v] <= level[u]:\r\n continue\r\n prev[v] = u\r\n st.append(v)\r\n\r\n flow = 0\r\n while 1:\r\n _update_level()\r\n if level[sink] == -1:\r\n return flow\r\n flow += _compute_flow()\r\n\r\n\r\[email protected]((nb.i8, nb.i8[:], nb.i8[:, :]), cache=True)\r\ndef solve(\r\n n: int,\r\n p: np.ndarray,\r\n ab: np.ndarray,\r\n) -> typing.NoReturn:\r\n n += 1\r\n g = np.zeros((n, n), np.int64)\r\n for i in range(len(ab)):\r\n a, b = ab[i]\r\n g[a, b] = g[b, a] = 1\r\n for i in p:\r\n g[i, n - 1] = 1\r\n v = maximum_flow_dinic(g, 0, n - 1)\r\n print(v)\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n n, g, e = map(int, input().split())\r\n p = np.array(\r\n sys.stdin.readline().split(),\r\n dtype=np.int64,\r\n )\r\n ab = np.array(\r\n sys.stdin.read().split(),\r\n dtype=np.int64,\r\n ).reshape(e, 2)\r\n solve(n, p, ab)\r\n\r\n\r\nmain()\r\n", "\r\n\r\nimport typing\r\n\r\nimport numba as nb\r\nimport numpy as np\r\n\r\n\r\[email protected]\r\ndef extgcd(a: int, b: int) -> typing.Tuple[int, int, int]:\r\n if b == 0: return a, 1, 0\r\n g, s, t = extgcd(b, a % b)\r\n return g, t, s - a // b * t\r\n\r\n\r\[email protected]\r\ndef crt(r: np.ndarray, m: np.ndarray) -> typing.Tuple[int, int]:\r\n r0, m0 = 0, 1\r\n assert r.size == m.size\r\n for i in range(r.size):\r\n r1, m1 = r[i], m[i]\r\n assert m1 >= 1\r\n r1 %= m1\r\n if m0 < m1:\r\n r0, r1 = r1, r0\r\n m0, m1 = m1, m0\r\n if m0 % m1 == 0:\r\n if r0 % m1 != r1: return 0, 0\r\n continue\r\n g, p, q = extgcd(m0, m1)\r\n if (r1 - r0) % g: return 0, 0\r\n u1 = m1 // g\r\n r0 += (r1 - r0) // g % u1 * p % u1 * m0\r\n m0 *= u1\r\n r0 %= m0\r\n return r0, m0\r\n\r\n\r\[email protected]\r\ndef find_divisors(n: int) -> np.ndarray:\r\n i = np.arange(int(n ** .5) + 1) + 1\r\n i = i[n % i == 0]\r\n return np.unique(np.hstack((i, n // i)))\r\n\r\n\r\[email protected]((nb.i8, ), cache=True)\r\ndef solve(n: int) -> typing.NoReturn:\r\n n *= 2\r\n divs = find_divisors(n)\r\n\r\n mn = 1 << 60\r\n for a in divs:\r\n b = n // a\r\n # g, p, q = extgcd(a, b)\r\n # if np.gcd(a, b) != 1 or b == 1: continue\r\n # k = a * (-p % b)\r\n # mn = min(mn, k)\r\n rs = np.array([0, -1])\r\n ms = np.array([a, b])\r\n r, l = crt(rs, ms)\r\n if l == 0 or r == 0: continue\r\n mn = min(mn, r)\r\n print(mn)\r\n\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n n = int(input())\r\n solve(n)\r\n\r\nmain()\r\n", "import sys\r\n\r\nimport numpy as np\r\n\r\nn, m, *lr = map(int, sys.stdin.read().split())\r\nl, r = np.array(lr).reshape(-1, 2).T\r\n\r\ndef main():\r\n res = max(0, r.min() - l.max() + 1)\r\n print(res)\r\n\r\nif __name__ == '__main__':\r\n main()\n", "import sys\r\n\r\nimport numpy as np\r\n\r\nMOD = 10 ** 9 + 7\r\n\r\nn, *a = map(int, sys.stdin.read().split())\r\na = np.array(a, dtype=np.int64)\r\n\r\ndef main():\r\n b = a >> np.arange(60).reshape(-1, 1) & 1\r\n x = np.count_nonzero(b, axis=1)\r\n x = x * (n - x)\r\n x *= np.power(2, np.arange(60)) % MOD\r\n x %= MOD\r\n ans = np.sum(x) % MOD\r\n return ans\r\n\r\nif __name__ == '__main__':\r\n ans = main()\r\n print(ans)\n", "import sys\nimport typing\n\nimport numpy as np\n\n\ndef main():\n n = int(input())\n a = np.array(\n sys.stdin.readline()\n .split(),\n dtype=np.int64,\n )\n i = np.argsort(a)\n print(i[-2] + 1)\n\n\nmain()\n", "import itertools\r\nimport math\r\nimport string\r\nimport sys\r\nfrom bisect import bisect_left as bi_l\r\nfrom bisect import bisect_right as bi_r\r\nfrom collections import Counter, defaultdict, deque\r\nfrom heapq import heappop, heappush\r\nfrom operator import or_, xor\r\n\r\ninf = float(\"inf\")\r\nfrom functools import lru_cache, reduce\r\n\r\nsys.setrecursionlimit(10**6)\r\nMOD = 10**9 + 7\r\n# MOD = 998244353\r\n\r\nglobal using_numpy\r\nusing_numpy = 1\r\nimport networkx as nx\r\nimport numpy as np\r\nfrom numba import jit\r\nfrom scipy import optimize\r\nfrom scipy.ndimage import distance_transform_cdt\r\nfrom scipy.sparse import csr_matrix\r\nfrom scipy.sparse.csgraph import (\r\n csgraph_to_dense,\r\n maximum_flow,\r\n minimum_spanning_tree,\r\n shortest_path,\r\n)\r\nfrom scipy.spatial import ConvexHull\r\nfrom scipy.special import comb\r\n\r\n\r\nclass Algebra:\r\n class Mint(int):\r\n def __init__(self, n, mod=MOD):\r\n self.value = n\r\n self.mod = mod\r\n\r\n def __str__(self):\r\n return f\"{self.value}\"\r\n\r\n def __add__(self, x):\r\n return self.__class__((self.value + x.value) % self.mod)\r\n\r\n def __sub__(self, x):\r\n return self.__class__((self.value - x.value) % self.mod)\r\n\r\n def __mul__(self, x):\r\n return self.__class__((self.value * x.value) % self.mod)\r\n\r\n def __pow__(self, x):\r\n return self.__class__(pow(self.value, x.value, self.mod))\r\n\r\n def __lt__(self, x):\r\n return self.value < x.value\r\n\r\n def __le__(self, x):\r\n return self.value <= x.value\r\n\r\n def __eq__(self, x):\r\n return self.value == x.value\r\n\r\n def __ne__(self, x):\r\n return self.value != x.value\r\n\r\n def __gt__(self, x):\r\n return self.value > x.value\r\n\r\n def __ge__(self, x):\r\n return self.value >= x.value\r\n\r\n class SemiGroup:\r\n pass\r\n\r\n class Monoid:\r\n pass\r\n\r\n class Group:\r\n pass\r\n\r\n class SemiRing:\r\n pass\r\n\r\n class Ring:\r\n pass\r\n\r\n @staticmethod\r\n def identity(n):\r\n if using_numpy:\r\n return np.identity(n, dtype=np.int64)\r\n else:\r\n a = [[0] * n for _ in range(n)]\r\n for i in range(n):\r\n a[i][i] = 1\r\n return a\r\n\r\n @staticmethod\r\n def dot(a, b):\r\n if using_numpy:\r\n return np.dot(a, b)\r\n else:\r\n h, w, l = len(a), len(b[0]), len(b)\r\n assert len(a[0]) == l\r\n c = [[0] * w for _ in range(h)]\r\n for i in range(h):\r\n for j in range(w):\r\n for k in range(l):\r\n c[i][j] += a[i][k] * b[k][j]\r\n return c\r\n\r\n @classmethod\r\n def matrix_pow(cls, a, n, mod=10**9 + 7):\r\n m = len(a)\r\n b = cls.identity(m)\r\n while n:\r\n if n & 1:\r\n b = cls.dot(b, a)\r\n n >>= 1\r\n a = cls.dot(a, a)\r\n if using_numpy:\r\n a %= mod\r\n b %= mod\r\n else:\r\n for i in range(m):\r\n for j in range(m):\r\n a[i][j] %= mod\r\n b[i][j] %= mod\r\n return b\r\n\r\n @staticmethod\r\n def bitwise_dot(a, b):\r\n if using_numpy:\r\n return np.bitwise_xor.reduce(\r\n a[:, None, :] & b.T[None, :, :], axis=-1\r\n )\r\n else:\r\n h, w, l = len(a), len(b[0]), len(b)\r\n assert len(a[0]) == l\r\n c = [[0] * w for _ in range(h)]\r\n for i in range(h):\r\n for j in range(w):\r\n for k in range(l):\r\n c[i][j] ^= a[i][k] & b[k][j]\r\n return c\r\n\r\n @classmethod\r\n def bitwise_mat_pow(cls, a, n):\r\n if n == 0:\r\n return np.eye(len(a), dtype=np.uint32) * ((1 << 32) - 1)\r\n res = cls.bitwise_mat_pow(a, n // 2)\r\n res = cls.bitwise_dot(res, res)\r\n return cls.bitwise_dot(res, a) if n & 1 else res\r\n\r\n class Kitamasa:\r\n pass\r\n\r\n\r\nmint = Algebra.Mint\r\n\r\n\r\nclass NumberTheory:\r\n class PrimeNumbers: # pn\r\n def __init__(self, n=2 * 10**6):\r\n self.is_prime, self.prime_nums = self.find(n)\r\n\r\n def __call__(self, n):\r\n return self.is_prime[n]\r\n\r\n def __iter__(self):\r\n return iter(self.prime_nums)\r\n\r\n def __getitem__(self, key):\r\n return self.prime_nums[key]\r\n\r\n @staticmethod\r\n def find(n): # Sieve of eratosthenes\r\n if using_numpy:\r\n is_prime = np.ones(n + 1, dtype=np.bool)\r\n is_prime[:2] = 0\r\n for i in range(2, int(n**0.5) + 1):\r\n if is_prime[i]:\r\n is_prime[i * 2 :: i] = 0\r\n prime_nums = np.flatnonzero(is_prime)\r\n else:\r\n is_prime = [True] * (n + 1)\r\n is_prime[0] = is_prime[1] = 0\r\n for i in range(2, int(n**0.5) + 1):\r\n if not is_prime[i]:\r\n continue\r\n for j in range(i * 2, n + 1, i):\r\n is_prime[j] = 0\r\n prime_nums = [i for i in range(2, n + 1) if is_prime[i]]\r\n return is_prime, prime_nums\r\n\r\n @lru_cache(maxsize=None)\r\n def factorize(self, n):\r\n res = defaultdict(int)\r\n if n < 2:\r\n return res\r\n for p in self:\r\n if p * p > n:\r\n break\r\n while n % p == 0:\r\n res[p] += 1\r\n n //= p\r\n if n == 1:\r\n return res\r\n res[n] = 1\r\n return res\r\n\r\n def factorize_factorial(self, n):\r\n res = defaultdict(int)\r\n for i in range(2, n + 1):\r\n for p, c in self.factorize(i).items():\r\n res[p] += c\r\n return res\r\n\r\n @classmethod\r\n @lru_cache(maxsize=None)\r\n def gcd(cls, a, b):\r\n return cls.gcd(b, a % b) if b else abs(a)\r\n\r\n @classmethod\r\n def lcm(cls, a, b):\r\n return abs(a // cls.gcd(a, b) * b)\r\n\r\n @staticmethod\r\n def find_divisors(n):\r\n divisors = []\r\n for i in range(1, int(n**0.5) + 1):\r\n if n % i:\r\n continue\r\n divisors.append(i)\r\n j = n // i\r\n if j != i:\r\n divisors.append(j)\r\n return sorted(divisors)\r\n\r\n @staticmethod\r\n def base_convert(n, b):\r\n if not n:\r\n return [0]\r\n res = []\r\n while n:\r\n n, r = divmod(n, b)\r\n if r < 0:\r\n n += 1\r\n r -= b\r\n res.append(r)\r\n return res\r\n\r\n\r\nclass Combinatorics:\r\n def __init__(self, N=10**9, n=10**6, mod=10**9 + 7):\r\n self.mod = mod\r\n self.make_mod_tables(N, n)\r\n\r\n @classmethod\r\n @lru_cache(maxsize=None)\r\n def choose(cls, n, r, mod=None): # no mod, or mod ≠ prime\r\n if r > n or r < 0:\r\n return 0\r\n if r == 0:\r\n return 1\r\n res = cls.choose(n - 1, r, mod) + cls.choose(n - 1, r - 1, mod)\r\n if mod:\r\n res %= mod\r\n return res\r\n\r\n def cumprod(self, a):\r\n p = self.mod\r\n l = len(a)\r\n sql = int(np.sqrt(l) + 1)\r\n a = np.resize(a, sql**2).reshape(sql, sql)\r\n for i in range(sql - 1):\r\n a[:, i + 1] *= a[:, i]\r\n a[:, i + 1] %= p\r\n for i in range(sql - 1):\r\n a[i + 1] *= a[i, -1]\r\n a[i + 1] %= p\r\n return np.ravel(a)[:l]\r\n\r\n def make_mod_tables(self, N, n):\r\n p = self.mod\r\n if using_numpy:\r\n fac = np.arange(n + 1)\r\n fac[0] = 1\r\n fac = self.cumprod(fac)\r\n ifac = np.arange(n + 1, 0, -1)\r\n ifac[0] = pow(int(fac[-1]), p - 2, p)\r\n ifac = self.cumprod(ifac)[n::-1]\r\n n_choose = np.arange(N + 1, N - n, -1)\r\n n_choose[0] = 1\r\n n_choose[1:] = self.cumprod(n_choose[1:]) * ifac[1 : n + 1] % p\r\n else:\r\n fac = [None] * (n + 1)\r\n fac[0] = 1\r\n for i in range(n):\r\n fac[i + 1] = fac[i] * (i + 1) % p\r\n ifac = [None] * (n + 1)\r\n ifac[n] = pow(fac[n], p - 2, p)\r\n for i in range(n, 0, -1):\r\n ifac[i - 1] = ifac[i] * i % p\r\n n_choose = [None] * (n + 1)\r\n n_choose[0] = 1\r\n for i in range(n):\r\n n_choose[i + 1] = n_choose[i] * (N - i) % p\r\n for i in range(n + 1):\r\n n_choose[i] = n_choose[i] * ifac[i] % p\r\n self.fac, self.ifac, self.mod_n_choose = fac, ifac, n_choose\r\n\r\n def mod_choose(self, n, r):\r\n p = self.mod\r\n return self.fac[n] * self.ifac[r] % p * self.ifac[n - r] % p\r\n\r\n @classmethod\r\n def permutations(cls, a, r=None, i=0):\r\n a = list(a)\r\n n = len(a)\r\n if r is None:\r\n r = n\r\n res = []\r\n if r > n or i > r:\r\n return res\r\n if i == r:\r\n return [tuple(a[:r])]\r\n for j in range(i, n):\r\n a[i], a[j] = a[j], a[i]\r\n res += cls.permutations(a, r, i + 1)\r\n return res\r\n\r\n @staticmethod\r\n def combinations(a, r):\r\n a = tuple(a)\r\n n = len(a)\r\n if r > n:\r\n return\r\n indices = list(range(r))\r\n yield a[:r]\r\n while True:\r\n for i in range(r - 1, -1, -1):\r\n if indices[i] != i + n - r:\r\n break\r\n else:\r\n return\r\n indices[i] += 1\r\n for j in range(i + 1, r):\r\n indices[j] = indices[j - 1] + 1\r\n yield tuple(a[i] for i in indices)\r\n\r\n\r\nclass String:\r\n @staticmethod\r\n def z_algorithm(s):\r\n n = len(s)\r\n a = [0] * n\r\n a[0] = n\r\n l = r = -1\r\n for i in range(1, n):\r\n if r >= i:\r\n a[i] = min(a[i - l], r - i)\r\n while i + a[i] < n and s[i + a[i]] == s[a[i]]:\r\n a[i] += 1\r\n if i + a[i] >= r:\r\n l, r = i, i + a[i]\r\n return a\r\n\r\n\r\nclass GeometryTopology:\r\n class Graph:\r\n class __Edge:\r\n def __init__(self, weight=1, capacity=1, **args):\r\n self.weight = weight\r\n self.capacity = capacity\r\n\r\n class __Node:\r\n def __init__(self, **args):\r\n pass\r\n\r\n def __init__(self, n=0):\r\n self.__N = n\r\n self.__nodes = [None] * n\r\n self.__edges = [{} for _ in range(n)]\r\n\r\n def add_node(self, v, **args):\r\n self.__nodes[v] = self.__Node(**args)\r\n\r\n def add_edge(self, u, v, **args):\r\n # self.add_node(u); self.add_node(v)\r\n self.__edges[u][v] = self.__Edge(**args)\r\n\r\n def get_size(self):\r\n return self.__N\r\n\r\n def bfs(self, src=0):\r\n assert 0 <= src < self.__N\r\n self.__depth = self.__lv = lv = [None] * self.__N\r\n lv[src] = 0 # depth in tree, or level in general graph.\r\n self.__dist = dist = [inf] * self.__N\r\n dist[src] = 0\r\n self.__parent = par = [None] * self.__N\r\n par[src] = src\r\n q = deque([src])\r\n while q:\r\n u = q.popleft()\r\n for v, e in self.__edges[u].items():\r\n if e.capacity == 0 or lv[v] is not None:\r\n continue\r\n lv[v] = lv[u] + 1\r\n dist[v] = dist[u] + e.weight # only tree\r\n par[v] = u\r\n q.append(v)\r\n return lv\r\n\r\n def dinic(self, src, sink):\r\n def flow_to_sink(u, flow_in):\r\n if u == sink:\r\n return flow_in\r\n flow = 0\r\n for v, e in self.__edges[u].items():\r\n if e.capacity == 0 or self.__lv[v] <= self.__lv[u]:\r\n continue\r\n f = flow_to_sink(v, min(flow_in, e.capacity))\r\n if not f:\r\n continue\r\n self.__edges[u][v].capacity -= f\r\n if u in self.__edges[v]:\r\n self.__edges[v][u].capacity += f\r\n else:\r\n self.add_edge(v, u, capacity=f)\r\n flow_in -= f\r\n flow += f\r\n return flow\r\n\r\n flow = 0\r\n while True:\r\n self.bfs(src)\r\n if self.__lv[sink] is None:\r\n return flow\r\n flow += flow_to_sink(src, inf)\r\n\r\n def ford_fulkerson(self):\r\n pass\r\n\r\n def push_relabel(self):\r\n pass\r\n\r\n def floyd_warshall(self):\r\n n = self.__N\r\n d = [[inf] * n for _ in range(n)]\r\n for u in range(n):\r\n d[u][u] = 0\r\n for v, e in self.__edges[u].items():\r\n d[u][v] = e.weight\r\n for w in range(n):\r\n for u in range(n):\r\n for v in range(n):\r\n d[u][v] = min(d[u][v], d[u][w] + d[w][v])\r\n return d\r\n\r\n def dijkstra(self, src, paths_cnt=False, mod=None):\r\n dist = [inf] * self.__N\r\n dist[src] = 0\r\n visited = [False] * self.__N\r\n paths = [0] * self.__N\r\n paths[src] = 1\r\n q = [(0, src)]\r\n while q:\r\n d, u = heappop(q)\r\n if visited[u]:\r\n continue\r\n visited[u] = True\r\n for v, e in self.__edges[u].items():\r\n dv = d + e.weight\r\n if dv > dist[v]:\r\n continue\r\n elif dv == dist[v]:\r\n paths[v] += paths[u]\r\n if mod:\r\n paths[v] %= mod\r\n continue\r\n paths[v] = paths[u]\r\n dist[v] = dv\r\n heappush(q, (dv, v))\r\n if paths_cnt:\r\n return dist, paths\r\n else:\r\n return dist\r\n\r\n def astar(self, src, tgt, heuristic_func):\r\n cost = [inf] * self.__N\r\n q = [(heuristic_func(src, tgt), 0, src)]\r\n while q:\r\n _, c, u = heappop(q)\r\n if u == tgt:\r\n return c\r\n if cost[u] != inf:\r\n continue\r\n cost[u] = c\r\n for v, e in self.__edges[u].items():\r\n if cost[v] != inf:\r\n continue\r\n h = heuristic_func(v, tgt)\r\n nc = c + e.weight\r\n heappush(q, (h + nc, nc, v))\r\n return inf\r\n\r\n def find_ancestors(self): # tree doubling.\r\n self.__ancestors = ancestors = [self.__parent]\r\n for _ in range(max(self.__depth).bit_length()):\r\n ancestors.append([ancestors[-1][u] for u in ancestors[-1]])\r\n\r\n def find_dist(self, u, v):\r\n return (\r\n self.__dist[u]\r\n + self.__dist[v]\r\n - 2 * self.__dist[self.__find_lca(u, v)]\r\n )\r\n\r\n def __find_lca(self, u, v):\r\n du, dv = self.__depth[u], self.__depth[v]\r\n if du > dv:\r\n u, v = v, u\r\n du, dv = dv, du\r\n\r\n d = dv - du\r\n for i in range(d.bit_length()): # up-stream\r\n if d >> i & 1:\r\n v = self.__ancestors[i][v]\r\n if v == u:\r\n return v\r\n\r\n for i in range(\r\n du.bit_length() - 1, -1, -1\r\n ): # find direct child of LCA.\r\n nu, nv = self.__ancestors[i][u], self.__ancestors[i][v]\r\n if nu == nv:\r\n continue\r\n u, v = nu, nv\r\n\r\n return self.__ancestors[0][u]\r\n\r\n @staticmethod\r\n def triangle_area(p0, p1, p2, signed=False):\r\n x1, y1, x2, y2 = (\r\n p1[0] - p0[0],\r\n p1[1] - p0[1],\r\n p2[0] - p0[0],\r\n p2[1] - p0[1],\r\n )\r\n return (\r\n (x1 * y2 - x2 * y1) / 2 if signed else abs(x1 * y2 - x2 * y1) / 2\r\n )\r\n\r\n @classmethod\r\n def intersect(cls, seg1, seg2):\r\n (p1, p2), (p3, p4) = seg1, seg2\r\n t1 = cls.triangle_area(p1, p2, p3, signed=True)\r\n t2 = cls.triangle_area(p1, p2, p4, signed=True)\r\n t3 = cls.triangle_area(p3, p4, p1, signed=True)\r\n t4 = cls.triangle_area(p3, p4, p2, signed=True)\r\n return (t1 * t2 < 0) & (t3 * t4 < 0)\r\n\r\n class UnionFind:\r\n def __init__(self, n=10**6):\r\n self.root = list(range(n))\r\n self.height = [0] * n\r\n self.size = [1] * n\r\n\r\n def find_root(self, u):\r\n if self.root[u] == u:\r\n return u\r\n self.root[u] = self.find_root(self.root[u])\r\n return self.root[u]\r\n\r\n def unite(self, u, v):\r\n ru = self.find_root(u)\r\n rv = self.find_root(v)\r\n if ru == rv:\r\n return\r\n hu = self.height[ru]\r\n hv = self.height[rv]\r\n if hu >= hv:\r\n self.root[rv] = ru\r\n self.size[ru] += self.size[rv]\r\n self.height[ru] = max(hu, hv + 1)\r\n else:\r\n self.root[ru] = rv\r\n self.size[rv] += self.size[ru]\r\n\r\n\r\ndef cumxor(a):\r\n return reduce(xor, a, 0)\r\n\r\n\r\ndef cumor(a):\r\n return reduce(or_, a, 0)\r\n\r\n\r\ndef bit_count(n):\r\n cnt = 0\r\n while n:\r\n cnt += n & 1\r\n n >>= 1\r\n return cnt\r\n\r\n\r\nclass AtCoder:\r\n class ABC001:\r\n @staticmethod\r\n def a():\r\n h1, h2 = map(int, sys.stdin.read().split())\r\n print(h1 - h2)\r\n\r\n @staticmethod\r\n def d():\r\n def to_minuites(x):\r\n q, r = divmod(x, 100)\r\n return 60 * q + r\r\n\r\n def to_hmform(x):\r\n q, r = divmod(x, 60)\r\n return 100 * q + r\r\n\r\n n = int(sys.stdin.readline().rstrip())\r\n term = [0] * 2001\r\n for _ in range(n):\r\n s, e = map(\r\n to_minuites,\r\n map(int, sys.stdin.readline().rstrip().split(\"-\")),\r\n )\r\n s = s // 5 * 5\r\n e = (e + 4) // 5 * 5\r\n term[s] += 1\r\n term[e + 1] -= 1\r\n for i in range(2000):\r\n term[i + 1] += term[i]\r\n\r\n res = []\r\n raining = False\r\n for i in range(2001):\r\n if term[i]:\r\n if not raining:\r\n s = i\r\n raining = True\r\n elif raining:\r\n res.append((s, i - 1))\r\n raining = False\r\n for s, e in res:\r\n print(f\"{to_hmform(s):04}-{to_hmform(e):04}\")\r\n\r\n class ABC002:\r\n @staticmethod\r\n def a():\r\n print(max(map(int, sys.stdin.readline().split())))\r\n\r\n @staticmethod\r\n def b():\r\n vowels = set(\"aeiou\")\r\n print(\r\n \"\".join(\r\n [\r\n c\r\n for c in sys.stdin.readline().rstrip()\r\n if c not in vowels\r\n ]\r\n )\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n print(\r\n GeometryTopology.triangle_area(\r\n *map(int, sys.stdin.readline().split())\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n edges = set(\r\n (x - 1, y - 1)\r\n for x, y in zip(*[map(int, sys.stdin.read().split())] * 2)\r\n )\r\n print(\r\n max(\r\n len(s)\r\n for i in range(1, 1 << n)\r\n for s in [[j for j in range(n) if i >> j & 1]]\r\n if all(\r\n (x, y) in edges\r\n for x, y in itertools.combinations(s, 2)\r\n )\r\n )\r\n )\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m = map(int, sys.stdin.readline().split())\r\n relations = [1 << i for i in range(n)]\r\n for x, y in zip(*[map(int, sys.stdin.read().split())] * 2):\r\n x -= 1\r\n y -= 1\r\n relations[x] |= 1 << y\r\n relations[y] |= 1 << x\r\n res = 0\r\n for i in range(1 << n):\r\n cnt = 0\r\n s = 0\r\n t = (1 << n) - 1\r\n for j in range(n):\r\n if i >> j & 1:\r\n s |= 1 << j\r\n t &= relations[j]\r\n cnt += 1\r\n if t & s == s:\r\n res = max(res, cnt)\r\n print(res)\r\n\r\n class ABC003:\r\n @staticmethod\r\n def a():\r\n print((int(sys.stdin.readline().rstrip()) + 1) * 5000)\r\n\r\n @staticmethod\r\n def b():\r\n atcoder = set(\"atcoder\")\r\n s, t = sys.stdin.read().split()\r\n print(\r\n all(\r\n s[i] == t[i]\r\n or s[i] == \"@\"\r\n and t[i] in atcoder\r\n or t[i] == \"@\"\r\n and s[i] in atcoder\r\n for i in range(len(s))\r\n )\r\n and \"You can win\"\r\n or \"You will lose\"\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *r = map(int, sys.stdin.read().split())\r\n print(reduce(lambda x, y: (x + y) / 2, sorted(r)[-k:], 0))\r\n\r\n class ABC004:\r\n @staticmethod\r\n def a():\r\n print(int(sys.stdin.readline().rstrip()) * 2)\r\n\r\n @staticmethod\r\n def b():\r\n for l in [sys.stdin.readline().rstrip() for _ in range(4)][::-1]:\r\n print(l[::-1])\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip()) % 30\r\n res = list(range(1, 7))\r\n for i in range(n):\r\n i %= 5\r\n res[i], res[i + 1] = res[i + 1], res[i]\r\n print(*res, sep=\"\")\r\n\r\n class ABC005:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(y // x)\r\n\r\n @staticmethod\r\n def b():\r\n n, *t = map(int, sys.stdin.read().split())\r\n print(min(t))\r\n\r\n @staticmethod\r\n def c():\r\n t = int(sys.stdin.readline().rstrip())\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n m = int(sys.stdin.readline().rstrip())\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n i = 0\r\n for p in b:\r\n if i == n:\r\n print(\"no\")\r\n return\r\n while p - a[i] > t:\r\n i += 1\r\n if i == n:\r\n print(\"no\")\r\n return\r\n if a[i] > p:\r\n print(\"no\")\r\n return\r\n i += 1\r\n print(\"yes\")\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n d = np.array(\r\n [sys.stdin.readline().split() for _ in range(n)], np.int64\r\n )\r\n s = d.cumsum(axis=0).cumsum(axis=1)\r\n s = np.pad(s, 1)\r\n max_del = np.zeros((n + 1, n + 1), dtype=np.int64)\r\n for y in range(1, n + 1):\r\n for x in range(1, n + 1):\r\n max_del[y, x] = np.amax(\r\n s[y : n + 1, x : n + 1]\r\n - s[0 : n - y + 1, x : n + 1]\r\n - s[y : n + 1, 0 : n - x + 1]\r\n + s[0 : n - y + 1, 0 : n - x + 1]\r\n )\r\n res = np.arange(n**2 + 1)[:, None]\r\n i = np.arange(1, n + 1)\r\n res = max_del[i, np.minimum(res // i, n)].max(axis=1)\r\n q = int(sys.stdin.readline().rstrip())\r\n p = np.array(sys.stdin.read().split(), dtype=np.int64)\r\n print(*res[p], sep=\"\\n\")\r\n\r\n class ABC006:\r\n @staticmethod\r\n def a():\r\n n = sys.stdin.readline().rstrip()\r\n if \"3\" in n:\r\n print(\"YES\")\r\n elif int(n) % 3 == 0:\r\n print(\"YES\")\r\n else:\r\n print(\"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n mod = 10007\r\n a = np.eye(N=3, k=-1, dtype=np.int64)\r\n a[0] = 1\r\n n = int(sys.stdin.readline().rstrip())\r\n a = Algebra.matrix_pow(a, n - 1, mod)\r\n print(a[2][0])\r\n\r\n @staticmethod\r\n def c():\r\n n, m = map(int, sys.stdin.readline().split())\r\n cnt = [0, 0, 0]\r\n if m == 1:\r\n cnt = [-1, -1, -1]\r\n else:\r\n if m & 1:\r\n m -= 3\r\n cnt[1] += 1\r\n n -= 1\r\n cnt[2] = m // 2 - n\r\n cnt[0] = n - cnt[2]\r\n if cnt[0] < 0 or cnt[1] < 0 or cnt[2] < 0:\r\n print(-1, -1, -1)\r\n else:\r\n print(*cnt, sep=\" \")\r\n\r\n @staticmethod\r\n def d():\r\n n, *c = map(int, sys.stdin.read().split())\r\n lis = [inf] * n\r\n for x in c:\r\n lis[bi_l(lis, x)] = x\r\n print(n - bi_l(lis, inf))\r\n\r\n class ABC007:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n - 1)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n if s == \"a\":\r\n print(-1)\r\n else:\r\n print(\"a\")\r\n\r\n @staticmethod\r\n def c():\r\n r, c = map(int, sys.stdin.readline().split())\r\n sy, sx = map(int, sys.stdin.readline().split())\r\n gy, gx = map(int, sys.stdin.readline().split())\r\n sy -= 1\r\n sx -= 1\r\n gy -= 1\r\n gx -= 1\r\n maze = [sys.stdin.readline().rstrip() for _ in range(r)]\r\n queue = deque([(sy, sx)])\r\n dist = np.full((r, c), np.inf)\r\n dist[sy, sx] = 0\r\n while queue:\r\n y, x = queue.popleft()\r\n for i, j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\r\n i += y\r\n j += x\r\n if maze[i][j] == \"#\" or dist[i, j] != np.inf:\r\n continue\r\n dist[i, j] = dist[y, x] + 1\r\n queue.append((i, j))\r\n print(int(dist[gy, gx]))\r\n\r\n @staticmethod\r\n def d():\r\n ng = set([4, 9])\r\n\r\n def count(d):\r\n return d if d <= 4 else d - 1\r\n\r\n def f(n):\r\n x = [int(d) for d in str(n)]\r\n flg = True\r\n dp = 0\r\n for d in x:\r\n dp = dp * 8 + flg * count(d)\r\n if d in ng:\r\n flg = False\r\n return n - (dp + flg)\r\n\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(f(b) - f(a - 1))\r\n\r\n class ABC008:\r\n @staticmethod\r\n def a():\r\n s, t = map(int, sys.stdin.readline().split())\r\n print(t - s + 1)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n res = defaultdict(int)\r\n for name in s:\r\n res[name] += 1\r\n print(sorted(res.items(), key=lambda x: x[1])[-1][0])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n c = n - np.count_nonzero(a[:, None] % a, axis=1)\r\n print(np.sum((c + 1) // 2 / c))\r\n\r\n @staticmethod\r\n def d():\r\n w, h, n, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*([iter(xy)] * 2))\r\n\r\n @lru_cache(maxsize=None)\r\n def count(x1, y1, x2, y2):\r\n res = 0\r\n for x, y in xy:\r\n if not (x1 <= x <= x2 and y1 <= y <= y2):\r\n continue\r\n cnt = (x2 - x1) + (y2 - y1) + 1\r\n cnt += count(x1, y1, x - 1, y - 1)\r\n cnt += count(x1, y + 1, x - 1, y2)\r\n cnt += count(x + 1, y1, x2, y - 1)\r\n cnt += count(x + 1, y + 1, x2, y2)\r\n res = max(res, cnt)\r\n return res\r\n\r\n print(count(1, 1, w, h))\r\n\r\n class ABC009:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((n + 1) // 2)\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n print(sorted(set(a))[-2])\r\n\r\n @staticmethod\r\n def c():\r\n n, k = map(int, sys.stdin.readline().split())\r\n s = list(sys.stdin.readline().rstrip())\r\n cost = [1] * n\r\n r = k\r\n for i in range(n - 1):\r\n q = []\r\n for j in range(i + 1, n):\r\n if s[j] < s[i] and cost[i] + cost[j] <= r:\r\n heappush(q, (s[j], cost[i] + cost[j], -j))\r\n if not q:\r\n continue\r\n _, c, j = heappop(q)\r\n j = -j\r\n s[i], s[j] = s[j], s[i]\r\n r -= c\r\n cost[i] = cost[j] = 0\r\n print(\"\".join(s))\r\n\r\n @staticmethod\r\n def d():\r\n k, m = map(int, sys.stdin.readline().split())\r\n a = np.array([int(x) for x in sys.stdin.readline().split()])\r\n c = np.array([int(x) for x in sys.stdin.readline().split()])\r\n mask = (1 << 32) - 1\r\n d = np.eye(k, k, -1, dtype=np.uint32) * mask\r\n d[0] = c\r\n if m <= k:\r\n print(a[m - 1])\r\n return\r\n # print(Algebra.bitwise_mat_pow(d, m-k))\r\n # print(Algebra.bitwise_dot(Algebra.bitwise_mat_pow(d, m-k), a[::-1].reshape(-1, 1))[0].item())\r\n print(\r\n Algebra.bitwise_dot(\r\n Algebra.bitwise_mat_pow(d, m - k), a[::-1].reshape(-1, 1)\r\n )[0][0]\r\n )\r\n\r\n class ABC010:\r\n @staticmethod\r\n def a():\r\n print(sys.stdin.readline().rstrip() + \"pp\")\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n tot = 0\r\n for x in a:\r\n c = 0\r\n while x % 2 == 0 or x % 3 == 2:\r\n x -= 1\r\n c += 1\r\n tot += c\r\n print(tot)\r\n\r\n @staticmethod\r\n def c():\r\n sx, sy, gx, gy, t, v, n, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(-1, 2).T\r\n\r\n def dist(x1, y1, x2, y2):\r\n return np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\r\n\r\n ans = (\r\n \"YES\"\r\n if (dist(sx, sy, x, y) + dist(x, y, gx, gy) <= v * t).any()\r\n else \"NO\"\r\n )\r\n print(ans)\r\n\r\n @staticmethod\r\n def d():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n p = [int(x) for x in sys.stdin.readline().split()]\r\n x, y = [], []\r\n for _ in range(e):\r\n a, b = map(int, sys.stdin.readline().split())\r\n x.append(a)\r\n y.append(b)\r\n x.append(b)\r\n y.append(a)\r\n for a in p:\r\n x.append(a)\r\n y.append(n)\r\n if not x:\r\n print(0)\r\n return\r\n c = [1] * len(x)\r\n min_cut = maximum_flow(\r\n csr_matrix((c, (x, y)), (n + 1, n + 1)), source=0, sink=n\r\n ).flow_value\r\n print(min_cut)\r\n\r\n @staticmethod\r\n def d_2():\r\n n, g, e = map(int, sys.stdin.readline().split())\r\n graph = nx.DiGraph()\r\n graph.add_nodes_from(range(n + 1))\r\n for p in [int(x) for x in sys.stdin.readline().split()]:\r\n graph.add_edge(p, n, capacity=1)\r\n for _ in range(e):\r\n a, b = map(int, sys.stdin.readline().split())\r\n graph.add_edge(a, b, capacity=1)\r\n graph.add_edge(b, a, capacity=1)\r\n print(nx.minimum_cut_value(graph, 0, n))\r\n\r\n @staticmethod\r\n def d_3():\r\n n, q, m = map(int, sys.stdin.readline().split())\r\n g = GeometryTopology.Graph(n + 1)\r\n # for i in range(n+1): g.add_node(i)\r\n for p in [int(x) for x in sys.stdin.readline().split()]:\r\n g.add_edge(p, n, capacity=1)\r\n for a, b in zip(*[map(int, sys.stdin.read().split())] * 2):\r\n g.add_edge(a, b, capacity=1)\r\n g.add_edge(b, a, capacity=1)\r\n print(g.dinic(0, n))\r\n\r\n class ABC011:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n % 12 + 1)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n print(s[0].upper() + s[1:].lower())\r\n\r\n @staticmethod\r\n def c():\r\n n, *ng = map(int, sys.stdin.read().split())\r\n ng = set(ng)\r\n if n in ng:\r\n print(\"NO\")\r\n else:\r\n r = 100\r\n while n > 0:\r\n if r == 0:\r\n print(\"NO\")\r\n return\r\n for i in range(3, 0, -1):\r\n if (n - i) in ng:\r\n continue\r\n n -= i\r\n r -= 1\r\n break\r\n else:\r\n print(\"NO\")\r\n return\r\n print(\"YES\")\r\n\r\n @staticmethod\r\n def d():\r\n n, d, x, y = map(int, sys.stdin.read().split())\r\n x, y = abs(x), abs(y)\r\n if x % d or y % d:\r\n print(0)\r\n return\r\n x, y = x // d, y // d\r\n r = n - (x + y)\r\n if r < 0 or r & 1:\r\n print(0)\r\n return\r\n\r\n res = 0\r\n half_p = pow(1 / 2, n)\r\n for d in range(r // 2 + 1): # 0 <= d <= r//2, south\r\n south, north = d, y + d\r\n west = (r - 2 * d) // 2\r\n res += (\r\n half_p\r\n * comb(n, south, exact=True)\r\n * comb(n - south, north, exact=True)\r\n * comb(n - south - north, west, exact=True)\r\n * half_p\r\n )\r\n print(res)\r\n\r\n class ABC012:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print(b, a)\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n h, n = divmod(n, 3600)\r\n m, s = divmod(n, 60)\r\n print(f\"{h:02}:{m:02}:{s:02}\")\r\n\r\n @staticmethod\r\n def c():\r\n n = 2025 - int(sys.stdin.readline().rstrip())\r\n res = []\r\n for i in range(1, 10):\r\n if n % i != 0 or n // i > 9:\r\n continue\r\n res.append(f\"{i} x {n//i}\")\r\n print(*sorted(res), sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *abt = map(int, sys.stdin.read().split())\r\n a, b, t = np.array(abt).reshape(m, 3).T\r\n res = shortest_path(\r\n csr_matrix((t, (a - 1, b - 1)), (n, n)),\r\n method=\"FW\",\r\n directed=False,\r\n )\r\n print(res.max(axis=-1).min().astype(np.int64))\r\n\r\n @staticmethod\r\n def d_2():\r\n n, m, *abt = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph(n)\r\n for a, b, t in zip(*[iter(abt)] * 3):\r\n a -= 1\r\n b -= 1\r\n g.add_edge(a, b, weight=t)\r\n g.add_edge(b, a, weight=t)\r\n\r\n print(min(max(d) for d in g.floyd_warshall()))\r\n\r\n class ABC013:\r\n @staticmethod\r\n def a():\r\n print(ord(sys.stdin.readline().rstrip()) - ord(\"A\") + 1)\r\n\r\n @staticmethod\r\n def b():\r\n a, b = map(int, sys.stdin.read().split())\r\n d = abs(a - b)\r\n print(min(d, 10 - d))\r\n\r\n @staticmethod\r\n def c():\r\n n, h, a, b, c, d, e = map(int, sys.stdin.read().split())\r\n y = np.arange(n + 1)\r\n x = (n * e - h - (d + e) * y) // (b + e) + 1\r\n np.maximum(x, 0, out=x)\r\n np.minimum(x, n - y, out=x)\r\n print(np.amin(a * x + c * y))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, d, *a = map(int, sys.stdin.read().split())\r\n res = list(range(n))\r\n\r\n def swap(i, j):\r\n res[i], res[j] = res[j], res[i]\r\n\r\n for i in a[::-1]:\r\n swap(i - 1, i)\r\n\r\n group = [None] * n\r\n root = [None] * n\r\n index_in_group = [None] * n\r\n for i in range(n):\r\n if root[i] is not None:\r\n continue\r\n group[i] = []\r\n j = i\r\n for cnt in range(1, n + 1):\r\n index_in_group[j] = cnt - 1\r\n group[i].append(j)\r\n j = res[j]\r\n root[j] = i\r\n if j == i:\r\n break\r\n\r\n for i in range(n):\r\n g = group[root[i]]\r\n print(g[(index_in_group[i] + d) % len(g)] + 1)\r\n\r\n class ABC014:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.read().split())\r\n print((a + b - 1) // b * b - a)\r\n\r\n @staticmethod\r\n def b():\r\n n, x, *a = map(int, sys.stdin.read().split())\r\n print(sum(a[i] for i in range(n) if x >> i & 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n a, b = np.array(ab).reshape(n, 2).T\r\n res = np.zeros(10**6 + 2, dtype=np.int64)\r\n np.add.at(res, a, 1)\r\n np.subtract.at(res, b + 1, 1)\r\n np.cumsum(res, out=res)\r\n print(res.max())\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n g = GeometryTopology.Graph(n)\r\n for _ in range(n - 1):\r\n x, y = map(int, sys.stdin.readline().split())\r\n x -= 1\r\n y -= 1\r\n g.add_edge(x, y, weight=1)\r\n g.add_edge(y, x, weight=1)\r\n\r\n g.bfs(0)\r\n g.find_ancestors()\r\n\r\n q, *ab = map(int, sys.stdin.read().split())\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n print(g.find_dist(a, b) + 1)\r\n\r\n class ABC015:\r\n @staticmethod\r\n def a():\r\n a, b = sys.stdin.read().split()\r\n print(a if len(a) > len(b) else b)\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n print(\r\n np.ceil(\r\n a[np.nonzero(a)[0]].sum() / np.count_nonzero(a)\r\n ).astype(np.int8)\r\n )\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *t = map(int, sys.stdin.read().split())\r\n t = np.array(t).reshape(n, k)\r\n x = np.zeros((1, 1), dtype=np.int8)\r\n for i in range(n):\r\n x = x.reshape(-1, 1) ^ t[i]\r\n print(\"Found\" if np.count_nonzero(x == 0) > 0 else \"Nothing\")\r\n\r\n @staticmethod\r\n def d():\r\n w, n, k, *ab = map(int, sys.stdin.read().split())\r\n dp = np.zeros((k + 1, w + 1), dtype=np.int32)\r\n for a, b in zip(*[iter(ab)] * 2):\r\n prev = dp.copy()\r\n np.maximum(dp[1:, a:], prev[:-1, :-a] + b, out=dp[1:, a:])\r\n print(dp[k][w])\r\n\r\n class ABC016:\r\n @staticmethod\r\n def a():\r\n m, d = map(int, sys.stdin.readline().split())\r\n print(\"YES\" if m % d == 0 else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n f1, f2 = a + b == c, a - b == c\r\n if f1 & f2:\r\n print(\"?\")\r\n elif f1 & (~f2):\r\n print(\"+\")\r\n elif (~f1) & f2:\r\n print(\"-\")\r\n else:\r\n print(\"!\")\r\n\r\n @staticmethod\r\n def c():\r\n n, _, *ab = map(int, sys.stdin.read().split())\r\n friends = [0] * n\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n friends[a] |= 1 << b\r\n friends[b] |= 1 << a\r\n res = [\r\n bit_count(\r\n cumor(friends[j] for j in range(n) if friends[i] >> j & 1)\r\n & ~(friends[i] | 1 << i)\r\n )\r\n for i in range(n)\r\n ]\r\n print(*res, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n sx, sy, gx, gy = map(int, sys.stdin.readline().split())\r\n seg1 = ((sx, sy), (gx, gy))\r\n n = int(sys.stdin.readline().rstrip())\r\n p1 = (\r\n np.array(sys.stdin.read().split(), dtype=np.int64)\r\n .reshape(n, 2)\r\n .T\r\n )\r\n p2 = np.hstack((p1[:, 1:], p1[:, :1]))\r\n seg2 = (p1, p2)\r\n print(\r\n np.count_nonzero(GeometryTopology.intersect(seg1, seg2)) // 2\r\n + 1\r\n )\r\n\r\n class ABC017:\r\n @staticmethod\r\n def a():\r\n s, e = (\r\n np.array(sys.stdin.read().split(), dtype=np.int16)\r\n .reshape(3, 2)\r\n .T\r\n )\r\n print((s // 10 * e).sum())\r\n\r\n @staticmethod\r\n def b():\r\n choku_tail = set(\"ch, o, k, u\".split(\", \"))\r\n\r\n def is_choku(s):\r\n if s == \"\":\r\n return True\r\n if len(s) >= 1 and (s[-1] in choku_tail) and is_choku(s[:-1]):\r\n return True\r\n if len(s) >= 2 and (s[-2:] in choku_tail) and is_choku(s[:-2]):\r\n return True\r\n return False\r\n\r\n print(\"YES\" if is_choku(sys.stdin.readline().rstrip()) else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *lrs = map(int, sys.stdin.read().split())\r\n l, r, s = np.array(lrs).reshape(n, 3).T\r\n score = np.zeros((m + 1,), dtype=np.int32)\r\n np.add.at(score, l - 1, s)\r\n np.subtract.at(score, r, s)\r\n np.cumsum(score, out=score)\r\n print(s.sum() - score[:m].min())\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *f = map(int, sys.stdin.read().split())\r\n prev = [0] * (n + 1)\r\n tmp = defaultdict(int)\r\n for i in range(n):\r\n prev[i + 1] = tmp[f[i]]\r\n tmp[f[i]] = i + 1\r\n\r\n dp = [0] * (n + 1)\r\n dp[0] = 1\r\n l, s = 0, dp[0]\r\n for i in range(1, n + 1):\r\n while l < prev[i]:\r\n s = (s - dp[l]) % MOD\r\n l += 1\r\n dp[i] = s\r\n s = (s + dp[i]) % MOD\r\n print(dp[n])\r\n\r\n class ABC018:\r\n @staticmethod\r\n def a():\r\n (*a,) = map(int, sys.stdin.read().split())\r\n a = sorted(enumerate(a), key=lambda x: -x[1])\r\n res = [None] * 3\r\n for i in range(3):\r\n res[a[i][0]] = i + 1\r\n print(*res, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n n, *lr = map(int, sys.stdin.read().split())\r\n for l, r in zip(*[iter(lr)] * 2):\r\n l -= 1\r\n r -= 1\r\n s = s[:l] + s[l : r + 1][::-1] + s[r + 1 :]\r\n print(s)\r\n\r\n @staticmethod\r\n def c():\r\n r, c, k = map(int, sys.stdin.readline().split())\r\n s = np.array([list(s) for s in sys.stdin.read().split()])\r\n s = np.pad(s, 1, constant_values=\"x\")\r\n\r\n a = np.zeros_like(s, dtype=np.float64)\r\n a[s == \"o\"] = np.inf\r\n for i in range(1, r + 1):\r\n np.minimum(a[i - 1, :] + 1, a[i, :], out=a[i, :])\r\n for i in range(r, 0, -1):\r\n np.minimum(a[i + 1, :] + 1, a[i, :], out=a[i, :])\r\n for j in range(1, c + 1):\r\n np.minimum(a[:, j - 1] + 1, a[:, j], out=a[:, j])\r\n for j in range(c, 0, -1):\r\n np.minimum(a[:, j + 1] + 1, a[:, j], out=a[:, j])\r\n print(np.count_nonzero(a >= k))\r\n\r\n @staticmethod\r\n def c_2():\r\n r, c, k = map(int, sys.stdin.readline().split())\r\n s = np.array([list(s) for s in sys.stdin.read().split()])\r\n s = np.pad(s, 1, constant_values=\"x\")\r\n a = (s == \"o\").astype(np.int16)\r\n a = distance_transform_cdt(a, metric=\"taxicab\")\r\n print(np.count_nonzero(a >= k))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, p, q, r, *xyz = map(int, sys.stdin.read().split())\r\n x, y, z = np.array(xyz).reshape(r, 3).T\r\n h = np.zeros((n, m), dtype=np.int32)\r\n h[x - 1, y - 1] = z\r\n g = np.array([*itertools.combinations(range(n), p)])\r\n print(np.sort(h[g].sum(axis=1), axis=1)[:, -q:].sum(axis=1).max())\r\n\r\n class ABC019:\r\n @staticmethod\r\n def a():\r\n (*a,) = map(int, sys.stdin.readline().split())\r\n print(sorted(a)[1])\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip() + \"$\"\r\n cnt = 0\r\n prev = \"$\"\r\n t = \"\"\r\n for c in s:\r\n if c == prev:\r\n cnt += 1\r\n continue\r\n t += prev + str(cnt)\r\n prev = c\r\n cnt = 1\r\n print(t[2:])\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n res = set()\r\n for x in a:\r\n while not x & 1:\r\n x >>= 1\r\n res.add(x)\r\n print(len(res))\r\n\r\n @staticmethod\r\n def d():\r\n def inquire(u, v):\r\n print(f\"? {u} {v}\".format(u, v), flush=True)\r\n return int(sys.stdin.readline().rstrip())\r\n\r\n n = int(sys.stdin.readline().rstrip())\r\n u = sorted([(inquire(1, v), v) for v in range(2, n + 1)])[-1][1]\r\n d = max((inquire(u, v)) for v in range(1, n + 1) if u != v)\r\n print(f\"! {d}\")\r\n\r\n class ABC020:\r\n @staticmethod\r\n def a():\r\n print(\r\n \"ABC\"\r\n if int(sys.stdin.readline().rstrip()) == 1\r\n else \"chokudai\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n a, b = sys.stdin.readline().split()\r\n print(int(a + b) * 2)\r\n\r\n @staticmethod\r\n def c():\r\n h, w, t = map(int, sys.stdin.readline().split())\r\n s = [list(s) for s in sys.stdin.read().split()]\r\n for i in range(h):\r\n for j in range(w):\r\n if s[i][j] == \"S\":\r\n sy, sx = i, j\r\n if s[i][j] == \"G\":\r\n gy, gx = i, j\r\n s[sy][sx] = s[gy][gx] = \".\"\r\n source, target = sy * w + sx, gy * w + gx\r\n\r\n def heuristic_function(u, v=target):\r\n uy, ux = divmod(u, w)\r\n vy, vx = divmod(v, w)\r\n return abs(vy - uy) + abs(ux - vx)\r\n\r\n def min_time(x):\r\n g = GeometryTopology.Graph(h * w)\r\n # g = nx.DiGraph()\r\n\r\n for i in range(h):\r\n for j in range(w):\r\n u = i * w + j\r\n if i > 0:\r\n g.add_edge(\r\n u,\r\n (i - 1) * w + j,\r\n weight=(1 if s[i - 1][j] == \".\" else x),\r\n )\r\n if i < h - 1:\r\n g.add_edge(\r\n u,\r\n (i + 1) * w + j,\r\n weight=(1 if s[i + 1][j] == \".\" else x),\r\n )\r\n if j > 0:\r\n g.add_edge(\r\n u,\r\n i * w + j - 1,\r\n weight=(1 if s[i][j - 1] == \".\" else x),\r\n )\r\n if j < w - 1:\r\n g.add_edge(\r\n u,\r\n i * w + j + 1,\r\n weight=(1 if s[i][j + 1] == \".\" else x),\r\n )\r\n\r\n return g.dijkstra(source)[target]\r\n return g.astar(source, target, heuristic_function)\r\n # return nx.dijkstra_path_length(g, source, target)\r\n # return nx.astar_path_length(g, source, target, heuristic_function)\r\n\r\n def binary_search():\r\n lo, hi = 1, t + 1\r\n while lo + 1 < hi:\r\n x = (lo + hi) // 2\r\n if min_time(x) > t:\r\n hi = x\r\n else:\r\n lo = x\r\n return lo\r\n\r\n print(binary_search())\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.readline().split())\r\n div = sorted(NumberTheory.find_divisors(k))\r\n l = len(div)\r\n s = [0] * l\r\n for i, d in enumerate(div):\r\n s[i] = (1 + n // d) * (n // d) // 2 * d % MOD\r\n for i in range(l - 1, -1, -1):\r\n for j in range(i + 1, l):\r\n if div[j] % div[i]:\r\n continue\r\n s[i] = (s[i] - s[j]) % MOD\r\n\r\n print(\r\n sum(s[i] * k // div[i] % MOD for i in range(l)) % MOD\r\n ) # ans is LCM.\r\n\r\n class ABC021:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n s = [1 << i for i in range(5) if n >> i & 1]\r\n print(len(s), *s, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def b():\r\n n, a, b, k, *p = map(int, sys.stdin.read().split())\r\n print(\"YES\" if len(set(p) | set([a, b])) == k + 2 else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, a, b, m, *xy = map(int, sys.stdin.read().split())\r\n x, y = np.array(xy).reshape(m, 2).T - 1\r\n a -= 1\r\n b -= 1\r\n g = csgraph_to_dense(\r\n csr_matrix((np.ones(m), (x, y)), (n, n), dtype=np.int8)\r\n )\r\n g = np.logical_or(g, g.T)\r\n paths = np.zeros(n, dtype=np.int64).reshape(-1, 1)\r\n paths[a, 0] = 1\r\n while not paths[b, 0]:\r\n paths = np.dot(g, paths) % MOD\r\n print(paths[b, 0])\r\n\r\n @staticmethod\r\n def c_2():\r\n n, a, b, m, *xy = map(int, sys.stdin.read().split())\r\n a -= 1\r\n b -= 1\r\n g = GeometryTopology.Graph()\r\n\r\n for x, y in zip(*[iter(xy)] * 2):\r\n x -= 1\r\n y -= 1\r\n g.add_edge(x, y, weight=1)\r\n g.add_edge(y, x, weight=1)\r\n\r\n dist, paths = g.dijkstra(a, paths_cnt=True, mod=MOD)\r\n print(paths[b])\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.read().split())\r\n combinatorics = Combinatorics()\r\n print(combinatorics.mod_choose(n + k - 1, k))\r\n\r\n class ABC022:\r\n @staticmethod\r\n def a():\r\n n, s, t, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n np.cumsum(a, out=a)\r\n print(((s <= a) & (a <= t)).sum())\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n c = Counter(a)\r\n print(sum(c.values()) - len(c))\r\n\r\n @staticmethod\r\n def c():\r\n n, m, *uvl = map(int, sys.stdin.read().split())\r\n u, v, l = np.array(uvl).reshape(m, 3).T\r\n u -= 1\r\n v -= 1\r\n g = csgraph_to_dense(csr_matrix((l, (u, v)), (n, n)))\r\n g += g.T\r\n g[g == 0] = np.inf\r\n dist0 = g[0].copy()\r\n g[0] = 0\r\n g[:, 0] = 0\r\n dist = shortest_path(g, method=\"FW\", directed=False)\r\n u, v = np.array([*itertools.combinations(range(1, n), 2)]).T\r\n res = (dist0[u] + dist[u, v] + dist0[v]).min()\r\n print(-1 if res == np.inf else int(res))\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n c = np.array(ab).reshape(2, n, 2)\r\n g = c.mean(axis=1)\r\n d = np.sqrt(((c - g[:, None, :]) ** 2).sum(axis=-1)).sum(axis=1)\r\n print(d[1] / d[0])\r\n\r\n class ABC023:\r\n @staticmethod\r\n def a():\r\n print(sum(divmod(int(sys.stdin.readline().rstrip()), 10)))\r\n\r\n @staticmethod\r\n def b():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n t = \"b\"\r\n for i in range(n // 2):\r\n if i % 3 == 0:\r\n t = \"a\" + t + \"c\"\r\n elif i % 3 == 1:\r\n t = \"c\" + t + \"a\"\r\n else:\r\n t = \"b\" + t + \"b\"\r\n print(n // 2 if t == s else -1)\r\n\r\n @staticmethod\r\n def b_2():\r\n n, s = sys.stdin.read().split()\r\n n = int(n)\r\n if n & 1 ^ 1:\r\n print(-1)\r\n return\r\n a = list(\"abc\")\r\n i = (1 - n // 2) % 3\r\n for c in s:\r\n if c != a[i]:\r\n print(-1)\r\n return\r\n i = (i + 1) % 3\r\n print(n // 2)\r\n\r\n @staticmethod\r\n def c():\r\n h, w, k, n, *rc = map(int, sys.stdin.read().split())\r\n r, c = np.array(rc).reshape(n, 2).T - 1\r\n rb = np.bincount(r, minlength=h)\r\n cb = np.bincount(c, minlength=w)\r\n rbb = np.bincount(rb, minlength=k + 1)\r\n cbb = np.bincount(cb, minlength=k + 1)\r\n tot = (rbb[: k + 1] * cbb[k::-1]).sum()\r\n real = np.bincount(rb[r] + cb[c] - 1, minlength=k + 1)\r\n print(tot - real[k - 1] + real[k])\r\n\r\n @staticmethod\r\n def d():\r\n n, *hs = map(int, sys.stdin.read().split())\r\n h, s = np.array(hs).reshape(n, 2).T\r\n\r\n t = np.arange(n)\r\n\r\n def is_ok(x):\r\n t_lim = (x - h) // s\r\n t_lim.sort()\r\n return np.all(t_lim >= t)\r\n\r\n def binary_search():\r\n lo, hi = 0, 10**14\r\n while lo + 1 < hi:\r\n x = (lo + hi) // 2\r\n if is_ok(x):\r\n hi = x\r\n else:\r\n lo = x\r\n return hi\r\n\r\n print(binary_search())\r\n\r\n class ABC024:\r\n @staticmethod\r\n def a():\r\n a, b, c, k, s, t = map(int, sys.stdin.read().split())\r\n print(a * s + b * t - c * (s + t) * (s + t >= k))\r\n\r\n @staticmethod\r\n def b():\r\n n, t, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n print(np.minimum(a[1:] - a[:-1], t).sum() + t)\r\n\r\n @staticmethod\r\n def c():\r\n n, d, k, *lrst = map(int, sys.stdin.read().split())\r\n lrst = np.array(lrst)\r\n lr = lrst[: 2 * d].reshape(d, 2)\r\n s, t = lrst[2 * d :].reshape(k, 2).T\r\n day = np.zeros((k,), dtype=np.int32)\r\n for i in range(d):\r\n l, r = lr[i]\r\n move = (l <= s) & (s <= r) & (s != t)\r\n reach = move & (l <= t) & (t <= r)\r\n s[move & (s < t)] = r\r\n s[move & (s > t)] = l\r\n s[reach] = t[reach]\r\n day[reach] = i + 1\r\n print(*day, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n a, b, c = map(int, sys.stdin.read().split())\r\n p = MOD\r\n denom = pow(a * b % p - b * c % p + c * a % p, p - 2, p)\r\n w = (b * c - a * b) % p * denom % p\r\n h = (b * c - a * c) % p * denom % p\r\n print(h, w)\r\n\r\n class ABC025:\r\n @staticmethod\r\n def a():\r\n s, n = sys.stdin.read().split()\r\n n = int(n)\r\n i, j = divmod(n - 1, 5)\r\n print(s[i] + s[j])\r\n\r\n @staticmethod\r\n def b():\r\n n, a, b = map(int, sys.stdin.readline().split())\r\n res = defaultdict(int)\r\n for _ in range(n):\r\n s, d = sys.stdin.readline().split()\r\n d = int(d)\r\n res[s] += min(max(d, a), b)\r\n res = res[\"East\"] - res[\"West\"]\r\n if res == 0:\r\n ans = 0\r\n elif res > 0:\r\n ans = f\"East {res}\"\r\n else:\r\n ans = f\"West {-res}\"\r\n print(ans)\r\n\r\n @staticmethod\r\n def c():\r\n b = [0] * 6\r\n for i in range(2):\r\n (*row,) = map(int, sys.stdin.readline().split())\r\n for j in range(3):\r\n b[i * 3 + j] = row[j]\r\n c = [0] * 8\r\n for i in range(3):\r\n (*row,) = map(int, sys.stdin.readline().split())\r\n for j in range(2):\r\n c[i * 3 + j] = row[j]\r\n tot = sum(b) + sum(c)\r\n\r\n @lru_cache(maxsize=None)\r\n def f(s=tuple(0 for _ in range(9))):\r\n if all(s):\r\n res = 0\r\n for i in range(6):\r\n res += (s[i] == s[i + 3]) * b[i]\r\n for i in range(8):\r\n res += (s[i] == s[i + 1]) * c[i]\r\n return res\r\n cand = [i for i in range(9) if not s[i]]\r\n flg = len(cand) & 1\r\n s = list(s)\r\n res = []\r\n for i in cand:\r\n s[i] = (flg ^ 1) + 1\r\n res.append(f(tuple(s)))\r\n s[i] = 0\r\n return sorted(res, reverse=flg)[0]\r\n\r\n a = f()\r\n b = tot - a\r\n print(a)\r\n print(b)\r\n\r\n class ABC026:\r\n @staticmethod\r\n def a():\r\n a = int(sys.stdin.readline().rstrip())\r\n print(a // 2 * (a - a // 2))\r\n\r\n @staticmethod\r\n def b():\r\n n, *r = map(int, sys.stdin.read().split())\r\n s = np.pi * np.array([0] + r) ** 2\r\n s.sort()\r\n res = s[n::-2].sum() - s[n - 1 :: -2].sum()\r\n print(res)\r\n\r\n @staticmethod\r\n def c():\r\n n, *b = map(int, sys.stdin.read().split())\r\n g = GeometryTopology.Graph()\r\n for i in range(1, n):\r\n g.add_edge(b[i - 1] - 1, i, weight=1)\r\n\r\n def f(u=0):\r\n if not g.edges[u]:\r\n return 1\r\n s = [f(v) for v in g.edges[u]]\r\n return max(s) + min(s) + 1\r\n\r\n print(f())\r\n\r\n @staticmethod\r\n def d():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n\r\n def f(t):\r\n return a * t + b * np.sin(c * t * np.pi) - 100\r\n\r\n print(optimize.brenth(f, 0, 200))\r\n\r\n class ABC027:\r\n @staticmethod\r\n def a():\r\n l = [int(l) for l in sys.stdin.readline().split()]\r\n l.sort()\r\n print(l[2] if l[0] == l[1] else l[0])\r\n\r\n @staticmethod\r\n def b():\r\n n, *a = map(int, sys.stdin.read().split())\r\n m, r = divmod(sum(a), n)\r\n if r:\r\n print(-1)\r\n return\r\n population = 0\r\n towns = 0\r\n cnt = 0\r\n for x in a:\r\n population += x\r\n towns += 1\r\n if population / towns != m:\r\n cnt += 1\r\n continue\r\n population, towns = 0, 0\r\n print(cnt)\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n flg = n.bit_length() & 1 ^ 1\r\n t = 0\r\n x = 1\r\n while x <= n:\r\n t += 1\r\n x = 2 * x + 1 if t & 1 ^ flg else 2 * x\r\n print(\"Aoki\" if t & 1 else \"Takahashi\")\r\n\r\n class ABC028:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(\r\n \"Bad\"\r\n if n < 60\r\n else \"Good\"\r\n if n < 90\r\n else \"Great\"\r\n if n < 100\r\n else \"Perfect\"\r\n )\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n cnt = Counter(s)\r\n print(*[cnt.get(c, 0) for c in \"ABCDEF\"])\r\n\r\n @staticmethod\r\n def c():\r\n a, b, c, d, e = map(int, sys.stdin.readline().split())\r\n print(max(b + c + e, a + d + e))\r\n\r\n @staticmethod\r\n def d():\r\n n, k = map(int, sys.stdin.readline().split())\r\n c = 3 * 2 * (n - k) * (k - 1) + 3 * (n - 1) + 1\r\n print(c / n**3)\r\n\r\n class ABC032:\r\n @staticmethod\r\n def a():\r\n a, b, n = map(int, sys.stdin.read().split())\r\n l = NumberTheory.lcm(a, b)\r\n print((n + l - 1) // l * l)\r\n\r\n @staticmethod\r\n def b():\r\n s, k = sys.stdin.read().split()\r\n k = int(k)\r\n res = set()\r\n for i in range(len(s) - k + 1):\r\n res.add(s[i : i + k])\r\n print(len(res))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *s = map(int, sys.stdin.read().split())\r\n if 0 in s:\r\n print(n)\r\n return\r\n s += [inf]\r\n res = 0\r\n l = r = 0\r\n tmp = 1\r\n while r <= n:\r\n tmp *= s[r]\r\n while tmp > k:\r\n res = max(res, r - l)\r\n tmp //= s[l]\r\n l += 1\r\n r += 1\r\n print(res)\r\n\r\n class ABC033:\r\n @staticmethod\r\n def a():\r\n n = set(sys.stdin.readline().rstrip())\r\n print(\"SAME\" if len(n) == 1 else \"DIFFERENT\")\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = dict()\r\n for _ in range(n):\r\n s, p = sys.stdin.readline().split()\r\n p = int(p)\r\n res[s] = p\r\n tot = sum(res.values())\r\n for s, p in res.items():\r\n if p > tot / 2:\r\n print(s)\r\n return\r\n print(\"atcoder\")\r\n\r\n @staticmethod\r\n def c():\r\n s = sys.stdin.readline().rstrip()\r\n res = sum(not \"0\" in f for f in s.split(\"+\"))\r\n print(res)\r\n\r\n class ABC034:\r\n @staticmethod\r\n def a():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Better\" if y > x else \"Worse\")\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n print(n + 1 if n & 1 else n - 1)\r\n\r\n @staticmethod\r\n def c():\r\n h, w = map(int, sys.stdin.read().split())\r\n combinatorics = Combinatorics(n=2 * 10**5, mod=MOD)\r\n print(combinatorics.mod_choose(h + w - 2, h - 1))\r\n\r\n @staticmethod\r\n def d():\r\n n, k, *wp = map(int, sys.stdin.read().split())\r\n w, p = np.array(wp).reshape(-1, 2).T\r\n\r\n def f(x):\r\n return np.sort(w * (p - x))[-k:].sum()\r\n\r\n print(optimize.bisect(f, 0, 100))\r\n\r\n class ABC035:\r\n @staticmethod\r\n def a():\r\n w, h = map(int, sys.stdin.readline().split())\r\n print(\"4:3\" if 4 * h == 3 * w else \"16:9\")\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n y = 0\r\n x = 0\r\n z = 0\r\n for c in s:\r\n if c == \"?\":\r\n z += 1\r\n elif c == \"L\":\r\n x -= 1\r\n elif c == \"R\":\r\n x += 1\r\n elif c == \"D\":\r\n y -= 1\r\n elif c == \"U\":\r\n y += 1\r\n d = abs(y) + abs(x)\r\n if t == \"1\":\r\n print(d + z)\r\n else:\r\n print(max(d - z, (d - z) & 1))\r\n\r\n @staticmethod\r\n def c():\r\n n, q, *lr = map(int, sys.stdin.read().split())\r\n l, r = np.array(lr).reshape(q, 2).T\r\n res = np.zeros(n + 1, dtype=int)\r\n np.add.at(res, l - 1, 1)\r\n np.subtract.at(res, r, 1)\r\n np.cumsum(res, out=res)\r\n res = res & 1\r\n print(\"\".join(map(str, res[:-1])))\r\n\r\n @staticmethod\r\n def d():\r\n n, m, t = map(int, sys.stdin.readline().split())\r\n point = np.array(sys.stdin.readline().split(), dtype=int)\r\n a, b, c = (\r\n np.array(sys.stdin.read().split(), dtype=np.int64)\r\n .reshape(m, 3)\r\n .T\r\n )\r\n a -= 1\r\n b -= 1\r\n d_1 = shortest_path(\r\n csr_matrix((c, (a, b)), (n, n)),\r\n method=\"D\",\r\n directed=True,\r\n indices=0,\r\n )\r\n d_2 = shortest_path(\r\n csr_matrix((c, (b, a)), (n, n)),\r\n method=\"D\",\r\n directed=True,\r\n indices=0,\r\n )\r\n print(int(np.amax((t - (d_1 + d_2)) * point)))\r\n\r\n class ABC036:\r\n @staticmethod\r\n def a():\r\n a, b = map(int, sys.stdin.readline().split())\r\n print((b + a - 1) // a)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n n = int(n)\r\n for j in range(n):\r\n row = \"\"\r\n for i in range(n - 1, -1, -1):\r\n row += s[i][j]\r\n print(row)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n b = [None] * n\r\n prev = None\r\n j = -1\r\n for i, x in sorted(enumerate(a), key=lambda x: x[1]):\r\n if x != prev:\r\n j += 1\r\n b[i] = j\r\n prev = x\r\n print(*b, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def d():\r\n n, *ab = map(int, sys.stdin.read().split())\r\n edges = [[] for _ in range(n)]\r\n for a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n edges[a].append(b)\r\n edges[b].append(a)\r\n parent = [None] * n\r\n\r\n def count(u):\r\n black, white = 1, 1\r\n for v in edges[u]:\r\n if v == parent[u]:\r\n continue\r\n parent[v] = u\r\n b, w = count(v)\r\n black *= w\r\n black %= MOD\r\n white *= (b + w) % MOD\r\n white %= MOD\r\n return black, white\r\n\r\n print(sum(count(0)) % MOD)\r\n\r\n class ABC037:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print(c // min(a, b))\r\n\r\n @staticmethod\r\n def b():\r\n n, q, *lrt = map(int, sys.stdin.read().split())\r\n a = np.zeros(n, dtype=int)\r\n for l, r, t in zip(*[iter(lrt)] * 3):\r\n a[l - 1 : r] = t\r\n print(*a, sep=\"\\n\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n a = np.array([0] + a)\r\n np.cumsum(a, out=a)\r\n s = (a[k:] - a[:-k]).sum()\r\n print(s)\r\n\r\n @staticmethod\r\n def d():\r\n h, w = map(int, sys.stdin.readline().split())\r\n a = [\r\n [int(x) for x in sys.stdin.readline().split()]\r\n for _ in range(h)\r\n ]\r\n dyx = [(-1, 0), (0, -1), (1, 0), (0, 1)]\r\n path = [[None] * w for _ in range(h)]\r\n\r\n def paths(i, j):\r\n if path[i][j]:\r\n return path[i][j]\r\n val = a[i][j]\r\n cnt = 1\r\n for dy, dx in dyx:\r\n y = i + dy\r\n x = j + dx\r\n if 0 <= y < h and 0 <= x < w and a[y][x] < val:\r\n cnt += paths(y, x)\r\n cnt %= MOD\r\n path[i][j] = cnt\r\n return cnt\r\n\r\n tot = 0\r\n for i in range(h):\r\n for j in range(w):\r\n tot += paths(i, j)\r\n tot %= MOD\r\n print(tot)\r\n\r\n class ABC038:\r\n @staticmethod\r\n def a():\r\n s = sys.stdin.readline().rstrip()\r\n print(\"YES\" if s[-1] == \"T\" else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c, d = map(int, sys.stdin.read().split())\r\n print(\"YES\" if a == c or b == c or a == d or b == d else \"NO\")\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n cnt = n\r\n tmp = 1\r\n for i in range(n):\r\n if a[i + 1] > a[i]:\r\n tmp += 1\r\n else:\r\n cnt += tmp * (tmp - 1) // 2\r\n tmp = 1\r\n print(cnt)\r\n\r\n @staticmethod\r\n def d():\r\n n, *wh = map(int, sys.stdin.read().split())\r\n wh = sorted(zip(*[iter(wh)] * 2), key=lambda x: (-x[0], x[1]))\r\n w = [x[1] for x in wh][::-1]\r\n res = [inf] * n\r\n for x in w:\r\n res[bi_l(res, x)] = x\r\n print(bi_l(res, inf))\r\n\r\n class ABC039:\r\n @staticmethod\r\n def a():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n print((a * b + b * c + c * a) * 2)\r\n\r\n @staticmethod\r\n def b():\r\n x = int(sys.stdin.readline().rstrip())\r\n for n in range(1, int(x**0.5) + 1):\r\n if pow(n, 4) == x:\r\n print(n)\r\n return\r\n\r\n @staticmethod\r\n def c():\r\n board = \"WBWBWWBWBWBW\" * 3\r\n convert = \"Do, *, Re, *, Mi, Fa, *, So, *, La, *, Si\".split(\", \")\r\n s = sys.stdin.readline().rstrip()\r\n print(convert[board.index(s)])\r\n\r\n @staticmethod\r\n def d():\r\n h, w = map(int, sys.stdin.readline().split())\r\n s = sys.stdin.read().split()\r\n dyx = list(itertools.product((-1, 0, 1), repeat=2))\r\n black_certain = set()\r\n black_before = set()\r\n for i in range(h):\r\n for j in range(w):\r\n black_cand = set()\r\n for dy, dx in dyx:\r\n y = i + dy\r\n x = j + dx\r\n if y < 0 or y >= h or x < 0 or x >= w:\r\n continue\r\n if s[y][x] == \".\":\r\n break\r\n black_cand.add((y, x))\r\n else:\r\n black_before.add((i, j))\r\n black_certain |= black_cand\r\n for i in range(h):\r\n for j in range(w):\r\n if s[i][j] == \"#\" and not (i, j) in black_certain:\r\n print(\"impossible\")\r\n return\r\n print(\"possible\")\r\n for i in range(h):\r\n row = \"\"\r\n for j in range(w):\r\n row += \"#\" if (i, j) in black_before else \".\"\r\n print(\"\".join(row))\r\n\r\n class ABC040:\r\n @staticmethod\r\n def a():\r\n n, x = map(int, sys.stdin.readline().split())\r\n print(min(x - 1, n - x))\r\n\r\n @staticmethod\r\n def b():\r\n n = int(sys.stdin.readline().rstrip())\r\n res = inf\r\n for i in range(1, int(n**0.5) + 1):\r\n res = min(res, n // i - i + n % i)\r\n print(res)\r\n\r\n @staticmethod\r\n def c():\r\n n, *h = map(int, sys.stdin.read().split())\r\n h = [h[0]] + h\r\n cost = [None] * (n + 1)\r\n cost[0] = cost[1] = 0\r\n for i in range(2, n + 1):\r\n cost[i] = min(\r\n cost[i - 2] + abs(h[i] - h[i - 2]),\r\n cost[i - 1] + abs(h[i] - h[i - 1]),\r\n )\r\n print(cost[n])\r\n\r\n @staticmethod\r\n def d():\r\n n, m = map(int, sys.stdin.readline().split())\r\n uf = GeometryTopology.UnionFind(n=n)\r\n queue = []\r\n for _ in range(m):\r\n a, b, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2 * y), a - 1, b - 1))\r\n q = int(sys.stdin.readline().rstrip())\r\n for i in range(q):\r\n v, y = map(int, sys.stdin.readline().split())\r\n heappush(queue, (-(2 * y + 1), v - 1, i))\r\n res = [None] * q\r\n while queue:\r\n y, i, j = heappop(queue)\r\n if y & 1:\r\n res[j] = uf.size[uf.find_root(i)]\r\n else:\r\n uf.unite(i, j)\r\n print(*res, sep=\"\\n\")\r\n\r\n class ABC041:\r\n @staticmethod\r\n def a():\r\n s, i = sys.stdin.read().split()\r\n i = int(i)\r\n print(s[i - 1])\r\n\r\n @staticmethod\r\n def b():\r\n a, b, c = map(int, sys.stdin.readline().split())\r\n ans = a * b % MOD * c % MOD\r\n print(ans)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n for i, h in sorted(enumerate(a), key=lambda x: -x[1]):\r\n print(i + 1)\r\n\r\n @staticmethod\r\n def d():\r\n n, m, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*[iter(xy)] * 2)\r\n edges = [0] * n\r\n for x, y in xy:\r\n x -= 1\r\n y -= 1\r\n edges[x] |= 1 << y\r\n comb = [None] * (1 << n)\r\n comb[0] = 1\r\n\r\n def count(edges, bit):\r\n if comb[bit] is not None:\r\n return comb[bit]\r\n comb[bit] = 0\r\n for i in range(n):\r\n if (bit >> i) & 1 and not edges[i]:\r\n nxt_bit = bit & ~(1 << i)\r\n nxt_edges = edges.copy()\r\n for j in range(n):\r\n nxt_edges[j] &= ~(1 << i)\r\n cnt = count(nxt_edges, nxt_bit)\r\n comb[bit] += cnt\r\n return comb[bit]\r\n\r\n print(count(edges, (1 << n) - 1))\r\n\r\n class ABC042:\r\n @staticmethod\r\n def a():\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n c = Counter(a)\r\n print(\"YES\" if c[5] == 2 and c[7] == 1 else \"NO\")\r\n\r\n @staticmethod\r\n def b():\r\n n, l, *s = sys.stdin.read().split()\r\n print(\"\".join(sorted(s)))\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *d = sys.stdin.read().split()\r\n l = len(n)\r\n ok = sorted(set(string.digits) - set(d))\r\n cand = [\r\n int(\"\".join(p)) for p in itertools.product(ok, repeat=l)\r\n ] + [int(min(x for x in ok if x > \"0\") + min(ok) * l)]\r\n print(cand[bi_l(cand, int(n))])\r\n\r\n @staticmethod\r\n def d():\r\n h, w, a, b = map(int, sys.stdin.read().split())\r\n combinatorics = Combinatorics(n=2 * 10**5, mod=MOD)\r\n tot = combinatorics.mod_choose(h + w - 2, h - 1)\r\n i = np.arange(h - a, h)\r\n ng = np.sum(\r\n combinatorics.mod_choose(i + b - 1, i)\r\n * combinatorics.mod_choose(h - i + w - b - 2, h - 1 - i)\r\n % MOD\r\n )\r\n tot -= ng\r\n tot %= MOD\r\n print(tot)\r\n\r\n class ABC043:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n print((1 + n) * n // 2)\r\n\r\n @staticmethod\r\n def b():\r\n s = sys.stdin.readline().rstrip()\r\n t = \"\"\r\n for c in s:\r\n if c == \"B\":\r\n t = t[:-1]\r\n else:\r\n t += c\r\n print(t)\r\n\r\n @staticmethod\r\n def c():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a = np.array(a)\r\n x = np.around(a.sum() / n).astype(int)\r\n print(np.sum((a - x) ** 2))\r\n\r\n @staticmethod\r\n def d():\r\n s = sys.stdin.readline().rstrip()\r\n n = len(s)\r\n for i in range(n - 1):\r\n if s[i] == s[i + 1]:\r\n print(i + 1, i + 2)\r\n return\r\n for i in range(n - 2):\r\n if s[i] == s[i + 2]:\r\n print(i + 1, i + 3)\r\n return\r\n print(-1, -1)\r\n\r\n class ABC170:\r\n @staticmethod\r\n def a():\r\n x = [int(x) for x in sys.stdin.readline().split()]\r\n for i in range(5):\r\n if x[i] != i + 1:\r\n print(i + 1)\r\n break\r\n\r\n @staticmethod\r\n def b():\r\n x, y = map(int, sys.stdin.readline().split())\r\n print(\"Yes\" if 2 * x <= y <= 4 * x and y % 2 == 0 else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n x, n, *p = map(int, sys.stdin.read().split())\r\n a = list(set(range(102)) - set(p))\r\n a = [(abs(y - x), y) for y in a]\r\n print(sorted(a)[0][1])\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n cand = set(a)\r\n cnt = 0\r\n for x, c in sorted(Counter(a).items()):\r\n cnt += c == 1 and x in cand\r\n cand -= set(range(x * 2, 10**6 + 1, x))\r\n print(cnt)\r\n\r\n @staticmethod\r\n def e():\r\n n, q = map(int, sys.stdin.readline().split())\r\n queue = []\r\n m = 2 * 10**5\r\n infants = [[] for _ in range(m)]\r\n highest_rate = [None] * m\r\n where = [None] * n\r\n rate = [None] * n\r\n\r\n def entry(i, k):\r\n where[i] = k\r\n while infants[k]:\r\n r, j = heappop(infants[k])\r\n if where[j] != k or j == i:\r\n continue\r\n if rate[i] >= -r:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (r, j))\r\n break\r\n else:\r\n highest_rate[k] = rate[i]\r\n heappush(queue, (rate[i], k, i))\r\n heappush(infants[k], (-rate[i], i))\r\n\r\n def transfer(i, k):\r\n now = where[i]\r\n while infants[now]:\r\n r, j = heappop(infants[now])\r\n if where[j] != now or j == i:\r\n continue\r\n if highest_rate[now] != -r:\r\n highest_rate[now] = -r\r\n heappush(queue, (-r, now, j))\r\n heappush(infants[now], (r, j))\r\n break\r\n else:\r\n highest_rate[now] = None\r\n entry(i, k)\r\n\r\n def inquire():\r\n while True:\r\n r, k, i = heappop(queue)\r\n if where[i] != k or r != highest_rate[k]:\r\n continue\r\n heappush(queue, (r, k, i))\r\n return r\r\n\r\n for i in range(n):\r\n a, b = map(int, sys.stdin.readline().split())\r\n rate[i] = a\r\n entry(i, b - 1)\r\n for _ in range(q):\r\n c, d = map(int, sys.stdin.readline().split())\r\n transfer(c - 1, d - 1)\r\n print(inquire())\r\n\r\n class ABC171:\r\n @staticmethod\r\n def a():\r\n c = sys.stdin.readline().rstrip()\r\n print(\"A\" if c < \"a\" else \"a\")\r\n\r\n @staticmethod\r\n def b():\r\n n, k, *p = map(int, sys.stdin.read().split())\r\n print(sum(sorted(p)[:k]))\r\n\r\n @staticmethod\r\n def c():\r\n n = int(sys.stdin.readline().rstrip())\r\n n -= 1\r\n l = 1\r\n while True:\r\n if n < pow(26, l):\r\n break\r\n n -= pow(26, l)\r\n l += 1\r\n res = \"\".join(\r\n [chr(ord(\"a\") + d) for d in NumberTheory.base_convert(n, 26)][\r\n ::-1\r\n ]\r\n )\r\n res = \"a\" * (l - len(res)) + res\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n a = [int(x) for x in sys.stdin.readline().split()]\r\n s = sum(a)\r\n cnt = Counter(a)\r\n q = int(sys.stdin.readline().rstrip())\r\n for _ in range(q):\r\n b, c = map(int, sys.stdin.readline().split())\r\n s += (c - b) * cnt[b]\r\n print(s)\r\n cnt[c] += cnt[b]\r\n cnt[b] = 0\r\n\r\n @staticmethod\r\n def e():\r\n n, *a = map(int, sys.stdin.read().split())\r\n s = 0\r\n for x in a:\r\n s ^= x\r\n b = map(lambda x: x ^ s, a)\r\n print(*b, sep=\" \")\r\n\r\n class ABC172:\r\n @staticmethod\r\n def a():\r\n a = int(sys.stdin.readline().rstrip())\r\n print(a * (1 + a + a**2))\r\n\r\n @staticmethod\r\n def b():\r\n s, t = sys.stdin.read().split()\r\n print(sum(s[i] != t[i] for i in range(len(s))))\r\n\r\n @staticmethod\r\n def c():\r\n n, m, k = map(int, sys.stdin.readline().split())\r\n a = [0] + [int(x) for x in sys.stdin.readline().split()]\r\n b = [int(x) for x in sys.stdin.readline().split()]\r\n (*sa,) = itertools.accumulate(a)\r\n (*sb,) = itertools.accumulate(b)\r\n res = 0\r\n for i in range(n + 1):\r\n r = k - sa[i]\r\n if r < 0:\r\n break\r\n res = max(res, i + bi_r(sb, r))\r\n print(res)\r\n\r\n @staticmethod\r\n def d():\r\n n = int(sys.stdin.readline().rstrip())\r\n f = np.zeros(n + 1, dtype=np.int64)\r\n for i in range(1, n + 1):\r\n f[i::i] += 1\r\n print((np.arange(1, n + 1) * f[1:]).sum())\r\n\r\n class ABC173:\r\n @staticmethod\r\n def a():\r\n n = int(sys.stdin.readline().rstrip())\r\n charge = (n + 999) // 1000 * 1000 - n\r\n print(charge)\r\n\r\n @staticmethod\r\n def b():\r\n n, *s = sys.stdin.read().split()\r\n c = Counter(s)\r\n for v in \"AC, WA, TLE, RE\".split(\", \"):\r\n print(f\"{v} x {c[v]}\")\r\n\r\n @staticmethod\r\n def c():\r\n h, w, k = map(int, sys.stdin.readline().split())\r\n c = [sys.stdin.readline().rstrip() for _ in range(h)]\r\n tot = 0\r\n for i in range(1 << h):\r\n for j in range(1 << w):\r\n cnt = 0\r\n for y in range(h):\r\n for x in range(w):\r\n if i >> y & 1 or j >> x & 1:\r\n continue\r\n cnt += c[y][x] == \"#\"\r\n tot += cnt == k\r\n print(tot)\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a.sort(reverse=True)\r\n res = (\r\n a[0]\r\n + sum(a[1 : 1 + (n - 2) // 2]) * 2\r\n + a[1 + (n - 2) // 2] * (n & 1)\r\n )\r\n print(res)\r\n\r\n @staticmethod\r\n def e():\r\n MOD = 10**9 + 7\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n minus = [x for x in a if x < 0]\r\n plus = [x for x in a if x > 0]\r\n if len(plus) + len(minus) // 2 * 2 >= k: # plus\r\n (*minus,) = map(abs, minus)\r\n minus.sort(reverse=True)\r\n plus.sort(reverse=True)\r\n cand = []\r\n if len(minus) & 1:\r\n minus = minus[:-1]\r\n for i in range(0, len(minus) - 1, 2):\r\n cand.append(minus[i] * minus[i + 1] % MOD)\r\n if k & 1:\r\n res = plus[0]\r\n plus = plus[1:]\r\n else:\r\n res = 1\r\n if len(plus) & 1:\r\n plus = plus[:-1]\r\n for i in range(0, len(plus) - 1, 2):\r\n cand.append(plus[i] * plus[i + 1] % MOD)\r\n cand.sort(reverse=True)\r\n for x in cand[: k // 2]:\r\n res *= x\r\n res %= MOD\r\n print(res)\r\n elif 0 in a:\r\n print(0)\r\n else:\r\n cand = sorted(map(abs, a))\r\n res = 1\r\n for i in range(k):\r\n res *= cand[i]\r\n res %= MOD\r\n res = MOD - res\r\n print(res)\r\n pass\r\n\r\n class ABC174:\r\n @staticmethod\r\n def a():\r\n print(\"Yes\" if int(sys.stdin.readline().rstrip()) >= 30 else \"No\")\r\n\r\n class ACL001:\r\n @staticmethod\r\n def a():\r\n n, *xy = map(int, sys.stdin.read().split())\r\n (*xy,) = zip(*[iter(xy)] * 2)\r\n print(xy)\r\n pass\r\n\r\n class MSolutions2020:\r\n @staticmethod\r\n def a():\r\n x = int(sys.stdin.readline().rstrip())\r\n x -= 400\r\n print(8 - x // 200)\r\n\r\n @staticmethod\r\n def b():\r\n r, g, b, k = map(int, sys.stdin.read().split())\r\n while k and g <= r:\r\n g *= 2\r\n k -= 1\r\n while k and b <= g:\r\n b *= 2\r\n k -= 1\r\n print(\"Yes\" if r < g < b else \"No\")\r\n\r\n @staticmethod\r\n def c():\r\n n, k, *a = map(int, sys.stdin.read().split())\r\n for i in range(k, n):\r\n print(\"Yes\" if a[i] > a[i - k] else \"No\")\r\n\r\n @staticmethod\r\n def d():\r\n n, *a = map(int, sys.stdin.read().split())\r\n a += [-1]\r\n m = 1000\r\n s = 0\r\n for i in range(n):\r\n if a[i + 1] == a[i]:\r\n continue\r\n elif a[i + 1] > a[i]:\r\n cnt = m // a[i]\r\n m -= a[i] * cnt\r\n s += cnt\r\n else:\r\n m += a[i] * s\r\n s = 0\r\n print(m)\r\n\r\n\r\nclass Codeforces:\r\n pass\r\n\r\n\r\nclass ProjectEuler:\r\n @staticmethod\r\n def p1():\r\n def f(n, x):\r\n return (x + n // x * x) * (n // x) // 2\r\n\r\n n = 1000\r\n ans = f(n - 1, 3) + f(n - 1, 5) - f(n - 1, 15)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p2():\r\n fib = [1, 2]\r\n while fib[-1] < 4 * 10**6:\r\n fib.append(fib[-1] + fib[-2])\r\n print(sum(fib[1:-1:3]))\r\n\r\n @staticmethod\r\n def p3():\r\n pn = NumberTheory.PrimeNumbers()\r\n res = pn.factorize(600851475143)\r\n print(max(res.keys()))\r\n\r\n @staticmethod\r\n def p4():\r\n def is_palindrome(n):\r\n n = str(n)\r\n return n == n[::-1]\r\n\r\n cand = []\r\n for a in range(100, 1000):\r\n for b in range(a, 1000):\r\n n = a * b\r\n if is_palindrome(n):\r\n cand.append(n)\r\n print(max(cand))\r\n\r\n @staticmethod\r\n def p5():\r\n pn = NumberTheory.PrimeNumbers()\r\n res = defaultdict(int)\r\n for i in range(1, 21):\r\n for p, c in pn.factorize(i).items():\r\n res[p] = max(res[p], c)\r\n ans = 1\r\n for p, c in res.items():\r\n ans *= pow(p, c)\r\n print(ans)\r\n\r\n @staticmethod\r\n def p6():\r\n a = np.arange(101)\r\n b = np.cumsum(a**2)\r\n a = a.cumsum()\r\n print(a[100] ** 2 - b[100])\r\n\r\n @staticmethod\r\n def p7():\r\n nt = NumberTheory.PrimeNumbers()\r\n print(sorted(nt)[10000])\r\n\r\n @staticmethod\r\n def p8():\r\n n = \"7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450\"\r\n n = [int(d) for d in list(n)]\r\n res = 0\r\n for i in range(988):\r\n x = 1\r\n for j in range(13):\r\n x *= n[i + j]\r\n res = max(res, x)\r\n print(res)\r\n\r\n @staticmethod\r\n def p9():\r\n for a in range(1, 997):\r\n for b in range(a, 998 - a):\r\n c = 1000 - a - b\r\n if a**2 + b**2 == c**2:\r\n print(a * b * c)\r\n return\r\n\r\n @staticmethod\r\n def p10():\r\n pn = NumberTheory.PrimeNumbers(2 * 10**6 + 1)\r\n print(sum(pn))\r\n\r\n @staticmethod\r\n def p11():\r\n grid = \"08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48\"\r\n # grid = np.array(grid.split(), dtype=np.int64).reshape(20, -1)\r\n # cand = []\r\n # for i in range(20):\r\n # bl1 = i+3 < 20\r\n # for j in range(20):\r\n # bl2 = j+3 < 20\r\n # if bl1:\r\n # np.prod\r\n # tmp = 1\r\n # for d in range(4):\r\n # tmp *= grid[i+d, j]\r\n print(grid)\r\n\r\n pass\r\n\r\n\r\nclass Yukicoder:\r\n def __init__(self):\r\n pass\r\n\r\n def __call__(self):\r\n print(1)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n AtCoder.ABC028.d()\r\n", "import sys\r\n\r\nimport numpy as np\r\nfrom scipy.sparse import csr_matrix\r\nfrom scipy.sparse.csgraph import floyd_warshall\r\n\r\nn = int(sys.stdin.readline().rstrip())\r\nA = np.array(sys.stdin.read().split(), dtype=np.float64).reshape(n, n)\r\n\r\n\r\ndef main():\r\n B = floyd_warshall(csr_matrix(A), directed=False).astype(np.int64)\r\n if np.any(B < A):\r\n return -1\r\n\r\n np.fill_diagonal(A, np.inf)\r\n\r\n total_length = 0\r\n for v in range(n - 1):\r\n for u in range(v + 1, n):\r\n detours = A[v] + A[u]\r\n if np.all(detours > B[v, u]):\r\n total_length += B[v, u]\r\n\r\n return total_length\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ans = main()\r\n print(ans)\r\n", "import sys\nimport typing\n\nimport numpy as np\n\n\nclass CompressArray():\n def retrieve(\n self,\n i: int,\n ) -> int:\n return self.__v[i]\n\n\n def __call__(\n self,\n a: np.array,\n ) -> np.array:\n v = np.unique(a)\n self.__v = v\n i = np.searchsorted(v, a)\n return i\n\n\ndef main():\n h, w, n = map(\n int, input().split(),\n )\n a, b = np.array(\n sys.stdin.read().split(),\n dtype=np.int64,\n ).reshape(n, 2).T\n\n compress = CompressArray()\n a = compress(a) + 1\n b = compress(b) + 1\n for a, b in zip(a, b):\n print(a, b)\n\n\nmain()\n", "import sys\r\n\r\nimport numpy as np\r\n\r\nn, q, *lr = map(int, sys.stdin.read().split())\r\nl, r = np.array(lr).reshape(q, 2).T\r\nl -= 1\r\nr -= 1\r\n\r\n\r\ndef main():\r\n res = np.zeros(n + 1, dtype=np.int32)\r\n np.add.at(res, l, 1)\r\n np.subtract.at(res, r + 1, 1)\r\n np.cumsum(res, out=res)\r\n print(\"\".join((res[:-1] & 1).astype(str)))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n", "import sys\r\nimport typing\r\n\r\nimport numpy as np\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n a = np.array(\r\n sys.stdin.read().split(),\r\n dtype=np.int64,\r\n )\r\n rank = np.argsort(a)[::-1] + 1\r\n rank = rank.tolist()\r\n print(*rank, sep=\"\\n\")\r\n\r\n\r\nmain()\r\n" ]
[ [ "numpy.array", "scipy.sparse.csgraph.floyd_warshall", "scipy.sparse.csr_matrix" ], [ "numpy.dot", "numpy.amax", "numpy.resize", "numpy.minimum", "numpy.sqrt", "numpy.cumsum", "numpy.zeros_like", "numpy.fill_diagonal", "numpy.any", "numpy.logical_or.at", "scipy.optimize.bisect", "numpy.hstack", "scipy.sparse.csgraph.shortest_path", "numpy.pad", "numpy.unique", "numpy.arange", "numpy.eye", "numpy.full", "numpy.flatnonzero", "numpy.sin", "numpy.count_nonzero", "numpy.ravel", "numpy.zeros", "numpy.nonzero", "numpy.amin", "numpy.minimum.at", "numpy.bitwise_xor.reduce", "scipy.optimize", "scipy.sparse.csr_matrix", "numpy.logical_or", "numpy.append", "numpy.identity", "numpy.equal", "numpy.argsort", "numpy.array", "scipy.ndimage.distance_transform_cdt", "numpy.sum", "scipy.sparse.csgraph.connected_components", "numpy.add.at", "numpy.absolute", "numpy.maximum", "scipy.optimize.brenth", "numpy.subtract.at", "numpy.sort", "numpy.ones", "numpy.bincount" ], [ "numpy.negative", "numpy.sum" ], [ "numpy.zeros" ], [ "numpy.amin", "numpy.arange", "numpy.maximum", "numpy.minimum" ], [ "numpy.zeros", "numpy.empty" ], [ "numpy.dot", "numpy.amax", "numpy.resize", "numpy.minimum", "numpy.sqrt", "numpy.cumsum", "numpy.all", "numpy.zeros_like", "scipy.optimize.bisect", "numpy.hstack", "scipy.sparse.csgraph.shortest_path", "numpy.pad", "numpy.arange", "numpy.eye", "numpy.flatnonzero", "numpy.full", "numpy.sin", "numpy.count_nonzero", "numpy.ravel", "numpy.zeros", "numpy.nonzero", "numpy.amin", "numpy.bitwise_xor.reduce", "scipy.sparse.csr_matrix", "numpy.logical_or", "numpy.identity", "numpy.array", "scipy.ndimage.distance_transform_cdt", "numpy.sum", "numpy.add.at", "numpy.maximum", "scipy.optimize.brenth", "numpy.subtract.at", "numpy.sort", "numpy.ones", "scipy.special.comb", "numpy.bincount" ], [ "numpy.arange", "numpy.empty", "numpy.full", "numpy.argsort", "numpy.zeros", "numpy.vstack" ], [ "numpy.maximum", "numpy.zeros" ], [ "numpy.add.at", "numpy.zeros", "numpy.cumsum", "numpy.full" ], [ "numpy.empty" ], [ "numpy.amax", "numpy.resize", "numpy.minimum", "numpy.sqrt", "numpy.cumsum", "scipy.optimize.bisect", "numpy.pad", "numpy.arange", "numpy.eye", "numpy.flatnonzero", "numpy.full", "numpy.count_nonzero", "numpy.ravel", "numpy.zeros", "numpy.amin", "numpy.bitwise_xor.reduce", "scipy.sparse.csr_matrix", "numpy.array", "numpy.sum", "numpy.add.at", "numpy.maximum", "numpy.subtract.at", "numpy.sort", "numpy.ones", "scipy.special.comb" ], [ "numpy.zeros", "numpy.empty" ], [ "numpy.sum" ], [ "numpy.dot", "numpy.amax", "numpy.resize", "numpy.minimum", "numpy.sqrt", "numpy.cumsum", "numpy.zeros_like", "numpy.any", "numpy.logical_or.at", "scipy.optimize.bisect", "numpy.hstack", "scipy.sparse.csgraph.shortest_path", "numpy.pad", "numpy.unique", "numpy.arange", "numpy.eye", "numpy.full", "numpy.flatnonzero", "numpy.sin", "numpy.count_nonzero", "numpy.ravel", "numpy.zeros", "numpy.nonzero", "numpy.amin", "numpy.minimum.at", "numpy.bitwise_xor.reduce", "scipy.optimize", "scipy.sparse.csr_matrix", "numpy.logical_or", "numpy.append", "numpy.identity", "numpy.equal", "numpy.argsort", "numpy.array", "scipy.ndimage.distance_transform_cdt", "numpy.sum", "scipy.sparse.csgraph.connected_components", "numpy.add.at", "numpy.absolute", "numpy.maximum", "scipy.optimize.brenth", "numpy.subtract.at", "numpy.sort", "numpy.ones", "scipy.special.comb", "numpy.bincount" ], [ "numpy.amax", "numpy.zeros", "numpy.pad" ], [ "numpy.sum", "numpy.bincount" ], [ "numpy.argsort", "numpy.zeros", "numpy.full" ], [ "numpy.sum", "numpy.any", "scipy.sparse.csr_matrix" ], [ "numpy.dot", "numpy.amax", "numpy.resize", "numpy.minimum", "numpy.sqrt", "numpy.cumsum", "numpy.zeros_like", "numpy.fill_diagonal", "numpy.searchsorted", "numpy.any", "numpy.logical_or.at", "scipy.optimize.bisect", "numpy.hstack", "scipy.sparse.csgraph.shortest_path", "numpy.pad", "numpy.unique", "numpy.arange", "numpy.eye", "numpy.full", "numpy.flatnonzero", "numpy.sin", "numpy.diff", "numpy.count_nonzero", "numpy.ravel", "numpy.zeros", "numpy.nonzero", "numpy.amin", "numpy.minimum.at", "numpy.bitwise_xor.reduce", "scipy.optimize", "scipy.sparse.csr_matrix", "numpy.logical_or", "numpy.append", "numpy.identity", "numpy.equal", "numpy.argsort", "numpy.array", "scipy.ndimage.distance_transform_cdt", "numpy.sum", "scipy.sparse.csgraph.connected_components", "numpy.add.at", "numpy.absolute", "numpy.maximum", "numpy.abs", "scipy.optimize.brenth", "numpy.subtract.at", "numpy.sort", "numpy.ones", "numpy.argwhere", "numpy.bincount", "numpy.maximum.accumulate", "numpy.vstack" ], [ "numpy.unique", "numpy.arange", "numpy.full", "numpy.zeros", "numpy.empty" ], [ "numpy.all" ], [ "numpy.arange", "numpy.full" ], [ "numpy.zeros", "numpy.empty" ], [ "numpy.zeros", "numpy.full" ], [ "numpy.hstack", "numpy.unique" ], [ "numpy.zeros" ], [ "numpy.dot", "numpy.amax", "numpy.resize", "numpy.minimum", "numpy.sqrt", "numpy.cumsum", "numpy.zeros_like", "numpy.any", "numpy.logical_or.at", "scipy.optimize.bisect", "numpy.hstack", "scipy.sparse.csgraph.shortest_path", "numpy.pad", "numpy.unique", "numpy.arange", "numpy.eye", "numpy.full", "numpy.flatnonzero", "numpy.sin", "numpy.count_nonzero", "numpy.ravel", "numpy.zeros", "numpy.nonzero", "numpy.amin", "numpy.minimum.at", "numpy.bitwise_xor.reduce", "scipy.optimize", "scipy.sparse.csr_matrix", "numpy.logical_or", "numpy.append", "numpy.identity", "numpy.equal", "numpy.argsort", "numpy.array", "scipy.ndimage.distance_transform_cdt", "numpy.sum", "scipy.sparse.csgraph.connected_components", "numpy.add.at", "numpy.absolute", "numpy.maximum", "scipy.optimize.brenth", "numpy.subtract.at", "numpy.sort", "numpy.ones", "scipy.special.comb", "numpy.bincount" ], [ "numpy.arange", "numpy.array", "numpy.sum", "numpy.count_nonzero" ], [ "scipy.misc.comb" ], [ "numpy.hstack", "numpy.flatnonzero", "numpy.arange", "numpy.unique" ], [ "numpy.amax", "numpy.resize", "numpy.minimum", "numpy.sqrt", "numpy.cumsum", "scipy.optimize.bisect", "numpy.pad", "numpy.arange", "numpy.eye", "numpy.flatnonzero", "numpy.full", "numpy.count_nonzero", "numpy.ravel", "numpy.zeros", "numpy.bitwise_xor.reduce", "scipy.sparse.csr_matrix", "numpy.array", "numpy.sum", "numpy.add.at", "numpy.subtract.at", "numpy.sort", "numpy.ones" ], [ "numpy.arange", "numpy.full" ], [ "numpy.zeros", "numpy.bincount", "numpy.ones" ], [ "numpy.arange", "numpy.zeros", "numpy.empty", "numpy.unique" ], [ "numpy.zeros", "numpy.full" ], [ "numpy.hstack", "numpy.array" ], [ "numpy.array" ], [ "numpy.arange", "numpy.array", "numpy.sum", "numpy.count_nonzero" ], [ "numpy.argsort" ], [ "numpy.dot", "numpy.amax", "numpy.resize", "numpy.minimum", "numpy.sqrt", "numpy.cumsum", "numpy.all", "numpy.zeros_like", "scipy.optimize.bisect", "numpy.hstack", "scipy.sparse.csgraph.shortest_path", "numpy.pad", "numpy.arange", "numpy.eye", "numpy.full", "numpy.flatnonzero", "numpy.sin", "numpy.count_nonzero", "numpy.ravel", "numpy.zeros", "numpy.nonzero", "numpy.amin", "numpy.bitwise_xor.reduce", "scipy.sparse.csr_matrix", "numpy.logical_or", "numpy.identity", "numpy.array", "scipy.ndimage.distance_transform_cdt", "numpy.sum", "numpy.add.at", "numpy.maximum", "scipy.optimize.brenth", "numpy.subtract.at", "numpy.sort", "numpy.ones", "scipy.special.comb", "numpy.bincount" ], [ "numpy.all", "scipy.sparse.csr_matrix", "numpy.any", "numpy.fill_diagonal" ], [ "numpy.searchsorted", "numpy.unique" ], [ "numpy.add.at", "numpy.subtract.at", "numpy.cumsum", "numpy.array", "numpy.zeros" ], [ "numpy.argsort" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.14", "1.6", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.14", "1.6", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.14", "1.6", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.14", "1.6", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "0.19", "0.18", "1.2", "0.12", "1.0", "0.17", "0.16" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.14", "1.6", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]