Dataset Viewer
Auto-converted to Parquet
repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
adcrn/knest
[ "a274dc9ddb642cc30f837e225f000bf33430eb43", "a274dc9ddb642cc30f837e225f000bf33430eb43" ]
[ "utils/compare.py", "utils/image_man.py" ]
[ "# UCF Senior Design 2017-18\n# Group 38\n\nfrom PIL import Image\nimport cv2\nimport imagehash\nimport math\nimport numpy as np\n\nDIFF_THRES = 20\nLIMIT = 2\nRESIZE = 1000\n\n\ndef calc_hash(img):\n \"\"\"\n Calculate the wavelet hash of the image\n img: (ndarray) image file\n \"\"\"\n # resize image if height > 1000\n img = resize(img)\n return imagehash.whash(Image.fromarray(img))\n\n\ndef compare(hash1, hash2):\n \"\"\"\n Calculate the difference between two images\n hash1: (array) first wavelet hash\n hash2: (array) second wavelet hash\n \"\"\"\n return hash1 - hash2\n\n\ndef limit(img, std_hash, count):\n \"\"\"\n Determine whether image should be removed from image dictionary in main.py\n img: (ndarray) image file\n std_hash: (array) wavelet hash of comparison standard\n count: (int) global count of images similar to comparison standard\n \"\"\"\n # calculate hash for given image\n cmp_hash = calc_hash(img)\n\n # compare to standard\n diff = compare(std_hash, cmp_hash)\n\n # image is similar to standard\n if diff <= DIFF_THRES:\n # if there are 3 similar images already, remove image\n if count >= LIMIT:\n return 'remove'\n\n # non-similar image found\n else:\n # update comparison standard\n return 'update_std'\n\n # else continue reading images with same standard\n return 'continue'\n\n\ndef resize(img):\n \"\"\"\n Resize an image\n img: (ndarray) RGB color image\n \"\"\"\n # get dimensions of image\n width = np.shape(img)[1]\n height = np.shape(img)[0]\n\n # if height of image is greater than 1000, resize it to 1000\n if width > RESIZE:\n # keep resize proportional\n scale = RESIZE / width\n resized_img = cv2.resize(\n img, (RESIZE, math.floor(height / scale)), cv2.INTER_AREA)\n # return resized image\n return resized_img\n\n # if height of image is less than 1000, return image unresized\n return img\n\n\ndef set_standard(images, filename):\n \"\"\"\n Set new comparison standard and update information\n images: (dictionary) dictionary containing all the image data\n filename: (String) name of the image file\n \"\"\"\n return filename, calc_hash(images[filename]), 0\n", "# UCF Senior Design 2017-18\n# Group 38\n\nfrom PIL import Image\nimport math\nimport numpy as np\nimport piexif\n\nSCALING_FACTOR = 3\n\n\ndef man(boxes, image_array, landscape=True, scaling_factor=SCALING_FACTOR):\n \"\"\"\n Crop and manipulate the image for final output.\n image_array: (Array) array representation of the image\n boxes: (Dict) bounding box around subject;\n form: (ymin, xmin, ymax, xmax)\n scaling_factor (Integer) the amount by which to scale the\n bounding box of the object; this is done as a way\n to include some of the environment in the final image\n \"\"\"\n image = Image.fromarray(image_array)\n width, height = image.size\n img_center_x = width / 2\n\n dist_face_center_x = math.inf\n central_face_x, central_face_y = 0, 0\n\n for i in boxes['faces']:\n\n # Get the bounding box coordinates of the face.\n fb_xmin, fb_ymin, fb_xmax, fb_ymax = i\n\n # Calculate the center of the face bounding box.\n fb_center_x, fb_center_y = (\n fb_xmax + fb_xmin) / 2, (fb_ymax + fb_ymin) / 2\n\n # Calculate the distance of the x-component of the\n # the face box centerfrom the center of the image.\n delta_center_x = math.fabs(fb_center_x - img_center_x)\n\n # Get the closest face box center coordinates over all face boxes.\n if delta_center_x < dist_face_center_x:\n dist_face_center_x = delta_center_x\n central_face_x, central_face_y = fb_center_x, fb_center_y\n\n # Initialize bounding box extrema.\n sm_xmin, sm_ymin, lar_xmax, lar_ymax = math.inf, math.inf, 0, 0\n\n # Calculate the factor by which we multiply the width and height of box.\n factor = math.sqrt(scaling_factor)\n\n # Get the extrema of the bounding boxes returned from the detection graph.\n for i in boxes['birds']:\n sm_xmin = min(sm_xmin, i[0])\n sm_ymin = min(sm_ymin, i[1])\n lar_xmax = max(lar_xmax, i[2])\n lar_ymax = max(lar_ymax, i[3])\n\n # Calculate the width and height of the final crop area.\n bb_width, bb_height = lar_xmax - sm_xmin, lar_ymax - sm_ymin\n new_width, new_height = round(\n bb_width * factor, 0), round(bb_height * factor, 0)\n\n if landscape:\n if new_width * 1.5 < new_height or math.fabs(new_width - new_height) < new_height * .5:\n new_width = new_height * 1.5\n\n if landscape:\n if new_width * 1.5 < new_height or math.fabs(new_width - new_height) < new_height * .5:\n new_width = new_height * 1.5\n\n # Calculate the amounts by which to adjust the face_box coordinates.\n width_diff, height_diff = new_width / 2, new_height / 2\n\n # Set the new dimensions for the final crop box.\n final_xmin, final_xmax = central_face_x - \\\n width_diff, central_face_x + width_diff\n final_ymin, final_ymax = central_face_y - \\\n height_diff, central_face_y + height_diff\n\n # Edge case handling.\n if final_xmin < 0: final_xmin = 0\n if final_xmax > width: final_xmax = width\n if final_ymin < 0: final_ymin = 0\n if final_ymax > height: final_ymax = height\n\n # Crop and attempt to save image.\n cropped_area = image.crop((final_xmin, final_ymin, final_xmax, final_ymax))\n\n try:\n final_image = np.asarray(cropped_area)\n return final_image, True\n except IOError:\n print(\"File could not be written properly.\")\n return False\n\n\ndef exif(filename, image_array):\n \"\"\"\n Transfer the EXIF metadata from the original photograph to the\n cropped version, along with some changes to a few values.\n filename: (String) filename from original photo\n new_array: (Array) array_representation of cropped image\n \"\"\"\n try:\n # Get the EXIF metadata from the original image.\n exif_dict = piexif.load(filename)\n except piexif._exceptions.InvalidImageDataError:\n return None\n\n # Some photos mysteriously do not contain EXIF data, so\n # check if it actually exists and then return properly.\n if len(exif_dict[\"Exif\"].items()) == 0:\n return None\n\n else:\n # Set the image height and width of new EXIF to the new\n # crop dimensions. Values come from the Piexif documentation.\n exif_dict[\"Exif\"][40963], exif_dict[\"Exif\"][40962], _ = image_array.shape\n\n # Convert EXIF dictionary to a bytes object for writing with PIL.\n exif_bytes = piexif.dump(exif_dict)\n\n return exif_bytes\n" ]
[ [ "numpy.shape" ], [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dongmengshi/easylearn
[ "df528aaa69c3cf61f5459a04671642eb49421dfb", "df528aaa69c3cf61f5459a04671642eb49421dfb", "df528aaa69c3cf61f5459a04671642eb49421dfb", "df528aaa69c3cf61f5459a04671642eb49421dfb" ]
[ "eslearn/utils/lc_featureSelection_variance.py", "eslearn/machine_learning/test/GCNNCourseCodes/metrics.py", "eslearn/machine_learning/test/gcn_test.py", "eslearn/utils/lc_cacl_MAD.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 24 14:38:20 2018\ndimension reduction with VarianceThreshold using sklearn.\nFeature selector that removes all low-variance features.\n@author: lenovo\n\"\"\"\nfrom sklearn.feature_selection import VarianceThreshold\nimport numpy as np\n#\nnp.random.seed(1)\nX = np.random.randn(100, 10)\nX = np.hstack([X, np.zeros([100, 5])])\n#\n\n\ndef featureSelection_variance(X, thrd):\n sel = VarianceThreshold(threshold=thrd)\n X_selected = sel.fit_transform(X)\n mask = sel.get_support()\n return X_selected, mask\n\n\nX = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]\nselector = VarianceThreshold()\nselector.fit_transform(X)\nselector.variances_\n", "import tensorflow as tf\n\n\ndef masked_softmax_cross_entropy(preds, labels, mask):\n \"\"\"Softmax cross-entropy loss with masking.\"\"\"\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels) \n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss *= mask\n return tf.reduce_mean(loss)\n\ndef sigmoid_cross_entropy(preds, labels):\n \"\"\"Softmax cross-entropy loss with masking.\"\"\"\n loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels) \n return tf.reduce_mean(loss)\n\ndef softmax_cross_entropy(preds, labels):\n \"\"\"Softmax cross-entropy loss with masking.\"\"\"\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels) \n return tf.reduce_mean(loss)\n\ndef masked_accuracy(preds, labels, mask):\n \"\"\"Accuracy with masking.\"\"\"\n correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1))\n accuracy_all = tf.cast(correct_prediction, tf.float32)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n accuracy_all *= mask\n return tf.reduce_mean(accuracy_all)\n\ndef inductive_multiaccuracy(preds, labels):\n \"\"\"Accuracy with masking.\"\"\"\n\n correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1)) \n return tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n\ndef inductive_accuracy(preds, labels):\n \"\"\"Accuracy with masking.\"\"\"\n\n predicted = tf.nn.sigmoid(preds)\n correct_pred = tf.equal(tf.round(predicted), labels)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) \n return accuracy\n", "import torch\nfrom torch.nn import Linear\nimport torch.nn.functional as F\nfrom torch_geometric.nn.conv import MessagePassing\nfrom torch_geometric.data import Data\n\n\nclass CGConv(MessagePassing):\n r\"\"\"The crystal graph convolutional operator from the\n `\"Crystal Graph Convolutional Neural Networks for an\n Accurate and Interpretable Prediction of Material Properties\"\n <https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.120.145301>`_\n paper\n\n .. math::\n \\mathbf{x}^{\\prime}_i = \\mathbf{x}_i + \\sum_{j \\in \\mathcal{N}(i)}\n \\sigma \\left( \\mathbf{z}_{i,j} \\mathbf{W}_f + \\mathbf{b}_f \\right)\n \\odot g \\left( \\mathbf{z}_{i,j} \\mathbf{W}_s + \\mathbf{b}_s \\right)\n\n where :math:`\\mathbf{z}_{i,j} = [ \\mathbf{x}_i, \\mathbf{x}_j,\n \\mathbf{e}_{i,j} ]` denotes the concatenation of central node features,\n neighboring node features and edge features.\n In addition, :math:`\\sigma` and :math:`g` denote the sigmoid and softplus\n functions, respectively.\n\n Args:\n channels (int): Size of each input sample.\n dim (int): Edge feature dimensionality.\n aggr (string, optional): The aggregation operator to use\n (:obj:`\"add\"`, :obj:`\"mean\"`, :obj:`\"max\"`).\n (default: :obj:`\"add\"`)\n bias (bool, optional): If set to :obj:`False`, the layer will not learn\n an additive bias. (default: :obj:`True`)\n **kwargs (optional): Additional arguments of\n :class:`torch_geometric.nn.conv.MessagePassing`.\n \"\"\"\n def __init__(self, channels, dim, aggr='add', bias=True, **kwargs):\n super(CGConv, self).__init__(aggr=aggr, **kwargs)\n self.in_channels = channels\n self.out_channels = channels\n self.dim = dim\n\n self.lin_f = Linear(2 * channels + dim, channels, bias=bias)\n self.lin_s = Linear(2 * channels + dim, channels, bias=bias)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.lin_f.reset_parameters()\n self.lin_s.reset_parameters()\n\n\n def forward(self, data):\n \"\"\"\"\"\"\n x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr\n return self.propagate(edge_index, x=x, edge_attr=edge_attr)\n\n\n def message(self, x_i, x_j, edge_attr):\n z = torch.cat([x_i, x_j, edge_attr], dim=-1)\n return self.lin_f(z).sigmoid() * F.softplus(self.lin_s(z))\n\n def update(self, aggr_out, x):\n return aggr_out + x\n\n def __repr__(self):\n return '{}({}, {}, dim={})'.format(self.__class__.__name__,\n self.in_channels, self.out_channels,\n self.dim)\n\nif __name__ == \"__main__\":\n # Generate Data\n edge_index = torch.tensor([[0, 1, 1, 2],[1, 0, 2, 1]], dtype=torch.long)\n x = torch.tensor([[-1], [0], [1]], dtype=torch.float)\n y = torch.tensor([[-1], [1], [1]], dtype=torch.float)\n edge_attr = torch.tensor([[1], [0], [0]], dtype=torch.float)\n data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)\n\n # Training\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = CGConv(1, 1).to(device)\n data = data.to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)\n\n model.train()\n for epoch in range(20):\n optimizer.zero_grad()\n out = model(data)\n loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])\n loss.backward()\n optimizer.step()\n\n # Evaluation\n model.eval()\n _, pred = model(data).max(dim=1)\n correct = float (pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())\n acc = correct / data.test_mask.sum().item()\n print('Accuracy: {:.4f}'.format(acc))", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 6 11:18:08 2018\nMAD,median absolute deviation for dimension reduction\nMAD=median(|Xi−median(X)|)\nrefer to {Linked dimensions of psychopathology\nand connectivity in functional brain networks}\n@author: Li Chao\n\"\"\"\nimport numpy as np\n\n\ndef select_features_using_MAD(M, perc=0.1):\n # perc: how many percentages of feature\n # that have top MAD to be selected\n MAD = cacl_MAD(M)\n Ind_descendOrd = np.argsort(MAD)[::-1] # decend order\n Ind_select = Ind_descendOrd[0:int(len(Ind_descendOrd) * perc)]\n feature_selected = M[:, Ind_select]\n return feature_selected\n\n\ndef cacl_MAD(M):\n # caculate MAD\n # row is sample, col is feature\n my_median = np.median(M, 0)\n my_abs = np.abs(M - my_median)\n MAD = np.median(my_abs, 0)\n return MAD\n" ]
[ [ "sklearn.feature_selection.VarianceThreshold", "numpy.random.randn", "numpy.zeros", "numpy.random.seed" ], [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.nn.sigmoid", "tensorflow.reduce_mean", "tensorflow.cast", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.argmax", "tensorflow.round" ], [ "torch.nn.functional.nll_loss", "torch.cat", "torch.tensor", "torch.nn.Linear", "torch.cuda.is_available" ], [ "numpy.argsort", "numpy.median", "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
silent567/examples
[ "e9de12549125ecd93a4924f6b8e2bbf66d7635d9" ]
[ "mnist/my_multi_tune3.py" ]
[ "#!/usr/bin/env python\n# coding=utf-8\n\nfrom my_multi_main3 import main\nimport numpy as np\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\nparser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\nparser.add_argument('--norm-flag', type=bool, default=False,\n help='Triggering the Layer Normalization flag for attention scores')\nparser.add_argument('--gamma', type=float, default=None,\n help='Controlling the sparisty of gfusedmax/sparsemax, the smaller, the more sparse')\nparser.add_argument('--lam', type=float, default=1.0,\n help='Lambda: Controlling the smoothness of gfusedmax, the larger, the smoother')\nparser.add_argument('--max-type', type=str, default='softmax',choices=['softmax','sparsemax','gfusedmax'],\n help='mapping function in attention')\nparser.add_argument('--optim-type', type=str, default='SGD',choices=['SGD','Adam'],\n help='mapping function in attention')\nparser.add_argument('--head-cnt', type=int, default=2, metavar='S', choices=[1,2,4,5,10],\n help='Number of heads for attention (default: 1)')\n\nargs = parser.parse_args()\n\nhyperparameter_choices = {\n 'lr':list(10**np.arange(-4,-1,0.5)),\n 'norm_flag': [True,False],\n 'gamma':list(10**np.arange(-1,3,0.5))+[None,],\n 'lam':list(10**np.arange(-2,2,0.5)),\n 'max_type':['softmax','sparsemax','gfusedmax'],\n # 'max_type':['sparsemax'],\n 'optim_type':['SGD','Adam'],\n 'head_cnt':[1,2,4,5,10,20]\n}\n\nparam_num = 25\nrecord = np.zeros([param_num,len(hyperparameter_choices)+1])\nrecord_name = 'record3_multi_%s.csv'%time.strftime('%Y-%m-%d_%H-%M-%S',time.localtime())\nfor n in range(param_num):\n for param_index,(k,v) in enumerate(hyperparameter_choices.items()):\n print(param_index,k)\n value_index = np.random.choice(len(v))\n if isinstance(v[value_index],str) or isinstance(v[value_index],bool) or v[value_index] is None:\n record[n,param_index] = value_index\n else:\n record[n,param_index] = v[value_index]\n setattr(args,k,v[value_index])\n record[n,-1] = main(args)\n np.savetxt(record_name, record, delimiter=',')\n\n\n\n" ]
[ [ "numpy.savetxt", "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
neonbjb/DL-Art-School
[ "a6f0f854b987ac724e258af8b042ea4459a571bc", "a6f0f854b987ac724e258af8b042ea4459a571bc", "a6f0f854b987ac724e258af8b042ea4459a571bc", "a6f0f854b987ac724e258af8b042ea4459a571bc" ]
[ "codes/data/image_corruptor.py", "codes/models/glean/stylegan2_latent_bank.py", "codes/models/spleeter/util.py", "codes/train.py" ]
[ "import functools\nimport random\nfrom math import cos, pi\n\nimport cv2\nimport kornia\nimport numpy as np\nimport torch\nfrom kornia.augmentation import ColorJitter\n\nfrom data.util import read_img\nfrom PIL import Image\nfrom io import BytesIO\n\n\n# Get a rough visualization of the above distribution. (Y-axis is meaningless, just spreads data)\nfrom utils.util import opt_get\n\n'''\nif __name__ == '__main__':\n import numpy as np\n import matplotlib.pyplot as plt\n data = np.asarray([get_rand() for _ in range(5000)])\n plt.plot(data, np.random.uniform(size=(5000,)), 'x')\n plt.show()\n'''\n\n\ndef kornia_color_jitter_numpy(img, setting):\n if setting * 255 > 1:\n # I'm using Kornia's ColorJitter, which requires pytorch arrays in b,c,h,w format.\n img = torch.from_numpy(img).permute(2,0,1).unsqueeze(0)\n img = ColorJitter(setting, setting, setting, setting)(img)\n img = img.squeeze(0).permute(1,2,0).numpy()\n return img\n\n\n# Performs image corruption on a list of images from a configurable set of corruption\n# options.\nclass ImageCorruptor:\n def __init__(self, opt):\n self.opt = opt\n self.reset_random()\n self.blur_scale = opt['corruption_blur_scale'] if 'corruption_blur_scale' in opt.keys() else 1\n self.fixed_corruptions = opt['fixed_corruptions'] if 'fixed_corruptions' in opt.keys() else []\n self.num_corrupts = opt['num_corrupts_per_image'] if 'num_corrupts_per_image' in opt.keys() else 0\n self.cosine_bias = opt_get(opt, ['cosine_bias'], True)\n if self.num_corrupts == 0:\n return\n else:\n self.random_corruptions = opt['random_corruptions'] if 'random_corruptions' in opt.keys() else []\n\n def reset_random(self):\n if 'random_seed' in self.opt.keys():\n self.rand = random.Random(self.opt['random_seed'])\n else:\n self.rand = random.Random()\n\n # Feeds a random uniform through a cosine distribution to slightly bias corruptions towards \"uncorrupted\".\n # Return is on [0,1] with a bias towards 0.\n def get_rand(self):\n r = self.rand.random()\n if self.cosine_bias:\n return 1 - cos(r * pi / 2)\n else:\n return r\n\n def corrupt_images(self, imgs, return_entropy=False):\n if self.num_corrupts == 0 and not self.fixed_corruptions:\n if return_entropy:\n return imgs, []\n else:\n return imgs\n\n if self.num_corrupts == 0:\n augmentations = []\n else:\n augmentations = random.choices(self.random_corruptions, k=self.num_corrupts)\n\n # Sources of entropy\n corrupted_imgs = []\n entropy = []\n undo_fns = []\n applied_augs = augmentations + self.fixed_corruptions\n for img in imgs:\n for aug in augmentations:\n r = self.get_rand()\n img, undo_fn = self.apply_corruption(img, aug, r, applied_augs)\n if undo_fn is not None:\n undo_fns.append(undo_fn)\n for aug in self.fixed_corruptions:\n r = self.get_rand()\n img, undo_fn = self.apply_corruption(img, aug, r, applied_augs)\n entropy.append(r)\n if undo_fn is not None:\n undo_fns.append(undo_fn)\n # Apply undo_fns after all corruptions are finished, in same order.\n for ufn in undo_fns:\n img = ufn(img)\n corrupted_imgs.append(img)\n\n\n if return_entropy:\n return corrupted_imgs, entropy\n else:\n return corrupted_imgs\n\n def apply_corruption(self, img, aug, rand_val, applied_augmentations):\n undo_fn = None\n if 'color_quantization' in aug:\n # Color quantization\n quant_div = 2 ** (int(rand_val * 10 / 3) + 2)\n img = img * 255\n img = (img // quant_div) * quant_div\n img = img / 255\n elif 'color_jitter' in aug:\n lo_end = 0\n hi_end = .2\n setting = rand_val * (hi_end - lo_end) + lo_end\n img = kornia_color_jitter_numpy(img, setting)\n elif 'gaussian_blur' in aug:\n img = cv2.GaussianBlur(img, (0,0), self.blur_scale*rand_val*1.5)\n elif 'motion_blur' in aug:\n # Motion blur\n intensity = self.blur_scale*rand_val * 3 + 1\n angle = random.randint(0,360)\n k = np.zeros((intensity, intensity), dtype=np.float32)\n k[(intensity - 1) // 2, :] = np.ones(intensity, dtype=np.float32)\n k = cv2.warpAffine(k, cv2.getRotationMatrix2D((intensity / 2 - 0.5, intensity / 2 - 0.5), angle, 1.0),\n (intensity, intensity))\n k = k * (1.0 / np.sum(k))\n img = cv2.filter2D(img, -1, k)\n elif 'block_noise' in aug:\n # Large distortion blocks in part of an img, such as is used to mask out a face.\n pass\n elif 'lq_resampling' in aug:\n # Random mode interpolation HR->LR->HR\n if 'lq_resampling4x' == aug:\n scale = 4\n else:\n if rand_val < .3:\n scale = 1\n elif rand_val < .7:\n scale = 2\n else:\n scale = 4\n if scale > 1:\n interpolation_modes = [cv2.INTER_NEAREST, cv2.INTER_CUBIC, cv2.INTER_LINEAR, cv2.INTER_LANCZOS4]\n mode = random.randint(0,4) % len(interpolation_modes)\n # Downsample first, then upsample using the random mode.\n img = cv2.resize(img, dsize=(img.shape[1]//scale, img.shape[0]//scale), interpolation=mode)\n def lq_resampling_undo_fn(scale, img):\n return cv2.resize(img, dsize=(img.shape[1]*scale, img.shape[0]*scale), interpolation=cv2.INTER_LINEAR)\n undo_fn = functools.partial(lq_resampling_undo_fn, scale)\n elif 'color_shift' in aug:\n # Color shift\n pass\n elif 'interlacing' in aug:\n # Interlacing distortion\n pass\n elif 'chromatic_aberration' in aug:\n # Chromatic aberration\n pass\n elif 'noise' in aug:\n # Random noise\n if 'noise-5' == aug:\n noise_intensity = 5 / 255.0\n else:\n noise_intensity = (rand_val*6) / 255.0\n img += np.random.rand(*img.shape) * noise_intensity\n elif 'jpeg' in aug:\n if 'noise' not in applied_augmentations and 'noise-5' not in applied_augmentations:\n if aug == 'jpeg':\n lo=10\n range=20\n elif aug == 'jpeg-low':\n lo=15\n range=10\n elif aug == 'jpeg-medium':\n lo=23\n range=25\n elif aug == 'jpeg-broad':\n lo=15\n range=60\n elif aug == 'jpeg-normal':\n lo=47\n range=35\n else:\n raise NotImplementedError(\"specified jpeg corruption doesn't exist\")\n # JPEG compression\n qf = (int((1-rand_val)*range) + lo)\n # Use PIL to perform a mock compression to a data buffer, then swap back to cv2.\n img = (img * 255).astype(np.uint8)\n img = Image.fromarray(img)\n buffer = BytesIO()\n img.save(buffer, \"JPEG\", quality=qf, optimize=True)\n buffer.seek(0)\n jpeg_img_bytes = np.asarray(bytearray(buffer.read()), dtype=\"uint8\")\n img = read_img(\"buffer\", jpeg_img_bytes, rgb=True)\n elif 'saturation' in aug:\n # Lightening / saturation\n saturation = rand_val * .3\n img = np.clip(img + saturation, a_max=1, a_min=0)\n elif 'greyscale' in aug:\n img = np.tile(np.mean(img, axis=2, keepdims=True), [1,1,3])\n elif 'none' not in aug:\n raise NotImplementedError(\"Augmentation doesn't exist\")\n\n return img, undo_fn\n", "import torch\nimport torch.nn as nn\n\nfrom models.arch_util import ConvGnLelu\nfrom models.stylegan.stylegan2_rosinality import Generator\n\n\nclass Stylegan2LatentBank(nn.Module):\n def __init__(self, pretrained_model_file, encoder_nf=64, encoder_max_nf=512, max_dim=1024, latent_dim=512, encoder_levels=4, decoder_levels=3):\n super().__init__()\n\n # Initialize the bank.\n self.bank = Generator(size=max_dim, style_dim=latent_dim, n_mlp=8, channel_multiplier=2) # Assumed using 'f' generators with mult=2.\n state_dict = torch.load(pretrained_model_file)\n self.bank.load_state_dict(state_dict, strict=True)\n\n # Shut off training of the latent bank.\n for p in self.bank.parameters():\n p.requires_grad = False\n p.DO_NOT_TRAIN = True\n\n # These are from `stylegan_rosinality.py`, search for `self.channels = {`.\n stylegan_encoder_dims = [512, 512, 512, 512, 512, 256, 128, 64, 32]\n\n # Initialize the fusion blocks. TODO: Try using the StyledConvs instead of regular ones.\n encoder_output_dims = reversed([min(encoder_nf * 2 ** i, encoder_max_nf) for i in range(encoder_levels)])\n input_dims_by_layer = [eod + sed for eod, sed in zip(encoder_output_dims, stylegan_encoder_dims)]\n self.fusion_blocks = nn.ModuleList([ConvGnLelu(in_filters, out_filters, kernel_size=3, activation=True, norm=False, bias=True)\n for in_filters, out_filters in zip(input_dims_by_layer, stylegan_encoder_dims)])\n\n self.decoder_levels = decoder_levels\n self.decoder_start = encoder_levels - 1\n self.total_levels = encoder_levels + decoder_levels - 1\n\n # This forward mirrors the forward() pass from the rosinality stylegan2 implementation, with the additions called\n # for from the GLEAN paper. GLEAN mods are annotated with comments.\n # Removed stuff:\n # - Support for split latents (we're spoonfeeding them)\n # - Support for fixed noise inputs\n # - RGB computations -> we only care about the latents\n # - Style MLP -> GLEAN computes the Style inputs directly.\n # - Later layers -> GLEAN terminates at 256 resolution.\n def forward(self, convolutional_features, latent_vectors):\n\n out = self.bank.input(latent_vectors[:, 0]) # The input here is only used to fetch the batch size.\n out = self.bank.conv1(out, latent_vectors[:, 0], noise=None)\n\n k = 0\n decoder_outputs = []\n for conv1, conv2 in zip(self.bank.convs[::2], self.bank.convs[1::2]):\n if k < len(self.fusion_blocks):\n out = torch.cat([convolutional_features[-k-1], out], dim=1)\n out = self.fusion_blocks[k](out)\n\n out = conv1(out, latent_vectors[:, k], noise=None)\n out = conv2(out, latent_vectors[:, k], noise=None)\n\n if k >= self.decoder_start:\n decoder_outputs.append(out)\n if k >= self.total_levels:\n break\n\n k += 1\n\n return decoder_outputs\n", "import numpy as np\nimport tensorflow as tf\n\nfrom .unet import UNet\n\n\ndef tf2pytorch(checkpoint_path, num_instrumments):\n tf_vars = {}\n init_vars = tf.train.list_variables(checkpoint_path)\n # print(init_vars)\n for name, shape in init_vars:\n try:\n # print('Loading TF Weight {} with shape {}'.format(name, shape))\n data = tf.train.load_variable(checkpoint_path, name)\n tf_vars[name] = data\n except Exception as e:\n print('Load error')\n conv_idx = 0\n tconv_idx = 0\n bn_idx = 0\n outputs = []\n for i in range(num_instrumments):\n output = {}\n outputs.append(output)\n\n for j in range(1,7):\n if conv_idx == 0:\n conv_suffix = \"\"\n else:\n conv_suffix = \"_\" + str(conv_idx)\n\n if bn_idx == 0:\n bn_suffix = \"\"\n else:\n bn_suffix = \"_\" + str(bn_idx)\n\n output['down{}_conv.weight'.format(j)] = np.transpose(\n tf_vars[\"conv2d{}/kernel\".format(conv_suffix)], (3, 2, 0, 1))\n # print('conv dtype: ',output['down{}.0.weight'.format(j)].dtype)\n output['down{}_conv.bias'.format(\n j)] = tf_vars[\"conv2d{}/bias\".format(conv_suffix)]\n\n output['down{}_act.0.weight'.format(\n j)] = tf_vars[\"batch_normalization{}/gamma\".format(bn_suffix)]\n output['down{}_act.0.bias'.format(\n j)] = tf_vars[\"batch_normalization{}/beta\".format(bn_suffix)]\n output['down{}_act.0.running_mean'.format(\n j)] = tf_vars['batch_normalization{}/moving_mean'.format(bn_suffix)]\n output['down{}_act.0.running_var'.format(\n j)] = tf_vars['batch_normalization{}/moving_variance'.format(bn_suffix)]\n\n conv_idx += 1\n bn_idx += 1\n\n # up blocks\n for j in range(1, 7):\n if tconv_idx == 0:\n tconv_suffix = \"\"\n else:\n tconv_suffix = \"_\" + str(tconv_idx)\n\n if bn_idx == 0:\n bn_suffix = \"\"\n else:\n bn_suffix= \"_\" + str(bn_idx)\n\n output['up{}.0.weight'.format(j)] = np.transpose(\n tf_vars[\"conv2d_transpose{}/kernel\".format(tconv_suffix)], (3,2,0, 1))\n output['up{}.0.bias'.format(\n j)] = tf_vars[\"conv2d_transpose{}/bias\".format(tconv_suffix)]\n output['up{}.2.weight'.format(\n j)] = tf_vars[\"batch_normalization{}/gamma\".format(bn_suffix)]\n output['up{}.2.bias'.format(\n j)] = tf_vars[\"batch_normalization{}/beta\".format(bn_suffix)]\n output['up{}.2.running_mean'.format(\n j)] = tf_vars['batch_normalization{}/moving_mean'.format(bn_suffix)]\n output['up{}.2.running_var'.format(\n j)] = tf_vars['batch_normalization{}/moving_variance'.format(bn_suffix)]\n tconv_idx += 1\n bn_idx += 1\n\n if conv_idx == 0:\n suffix = \"\"\n else:\n suffix = \"_\" + str(conv_idx)\n output['up7.0.weight'] = np.transpose(\n tf_vars['conv2d{}/kernel'.format(suffix)], (3, 2, 0, 1))\n output['up7.0.bias'] = tf_vars['conv2d{}/bias'.format(suffix)]\n conv_idx += 1\n\n return outputs", "import os\nimport math\nimport argparse\nimport random\nimport logging\nfrom tqdm import tqdm\n\nimport torch\nfrom data.data_sampler import DistIterSampler\nfrom trainer.eval.evaluator import create_evaluator\n\nfrom utils import util, options as option\nfrom data import create_dataloader, create_dataset\nfrom trainer.ExtensibleTrainer import ExtensibleTrainer\nfrom time import time\n\nfrom utils.util import opt_get\n\n\ndef init_dist(backend, **kwargs):\n # These packages have globals that screw with Windows, so only import them if needed.\n import torch.distributed as dist\n import torch.multiprocessing as mp\n\n \"\"\"initialization for distributed training\"\"\"\n if mp.get_start_method(allow_none=True) != 'spawn':\n mp.set_start_method('spawn')\n rank = int(os.environ['RANK'])\n num_gpus = torch.cuda.device_count()\n torch.cuda.set_device(rank % num_gpus)\n dist.init_process_group(backend=backend, **kwargs)\n\nclass Trainer:\n\n def init(self, opt, launcher, all_networks={}):\n self._profile = False\n self.val_compute_psnr = opt_get(opt, ['eval', 'compute_psnr'], False)\n self.val_compute_fea = opt_get(opt, ['eval', 'compute_fea'], False)\n\n #### loading resume state if exists\n if opt['path'].get('resume_state', None):\n # distributed resuming: all load into default GPU\n device_id = torch.cuda.current_device()\n resume_state = torch.load(opt['path']['resume_state'],\n map_location=lambda storage, loc: storage.cuda(device_id))\n option.check_resume(opt, resume_state['iter']) # check resume options\n else:\n resume_state = None\n\n #### mkdir and loggers\n if self.rank <= 0: # normal training (self.rank -1) OR distributed training (self.rank 0)\n if resume_state is None:\n util.mkdir_and_rename(\n opt['path']['experiments_root']) # rename experiment folder if exists\n util.mkdirs(\n (path for key, path in opt['path'].items() if not key == 'experiments_root' and path is not None\n and 'pretrain_model' not in key and 'resume' not in key))\n\n # config loggers. Before it, the log will not work\n util.setup_logger('base', opt['path']['log'], 'train_' + opt['name'], level=logging.INFO,\n screen=True, tofile=True)\n self.logger = logging.getLogger('base')\n self.logger.info(option.dict2str(opt))\n # tensorboard logger\n if opt['use_tb_logger'] and 'debug' not in opt['name']:\n self.tb_logger_path = os.path.join(opt['path']['experiments_root'], 'tb_logger')\n version = float(torch.__version__[0:3])\n if version >= 1.1: # PyTorch 1.1\n from torch.utils.tensorboard import SummaryWriter\n else:\n self.self.logger.info(\n 'You are using PyTorch {}. Tensorboard will use [tensorboardX]'.format(version))\n from tensorboardX import SummaryWriter\n self.tb_logger = SummaryWriter(log_dir=self.tb_logger_path)\n else:\n util.setup_logger('base', opt['path']['log'], 'train', level=logging.INFO, screen=True)\n self.logger = logging.getLogger('base')\n\n # convert to NoneDict, which returns None for missing keys\n opt = option.dict_to_nonedict(opt)\n self.opt = opt\n\n #### wandb init\n if opt['wandb'] and self.rank <= 0:\n import wandb\n os.makedirs(os.path.join(opt['path']['log'], 'wandb'), exist_ok=True)\n wandb.init(project=opt['name'], dir=opt['path']['log'])\n\n #### random seed\n seed = opt['train']['manual_seed']\n if seed is None:\n seed = random.randint(1, 10000)\n if self.rank <= 0:\n self.logger.info('Random seed: {}'.format(seed))\n seed += self.rank # Different multiprocessing instances should behave differently.\n util.set_random_seed(seed)\n\n torch.backends.cudnn.benchmark = opt_get(opt, ['cuda_benchmarking_enabled'], True)\n # torch.backends.cudnn.deterministic = True\n if opt_get(opt, ['anomaly_detection'], False):\n torch.autograd.set_detect_anomaly(True)\n\n # Save the compiled opt dict to the global loaded_options variable.\n util.loaded_options = opt\n\n #### create train and val dataloader\n dataset_ratio = 1 # enlarge the size of each epoch\n for phase, dataset_opt in opt['datasets'].items():\n if phase == 'train':\n self.train_set, collate_fn = create_dataset(dataset_opt, return_collate=True)\n train_size = int(math.ceil(len(self.train_set) / dataset_opt['batch_size']))\n total_iters = int(opt['train']['niter'])\n self.total_epochs = int(math.ceil(total_iters / train_size))\n if opt['dist']:\n self.train_sampler = DistIterSampler(self.train_set, self.world_size, self.rank, dataset_ratio)\n self.total_epochs = int(math.ceil(total_iters / (train_size * dataset_ratio)))\n shuffle = False\n else:\n self.train_sampler = None\n shuffle = True\n self.train_loader = create_dataloader(self.train_set, dataset_opt, opt, self.train_sampler, collate_fn=collate_fn, shuffle=shuffle)\n if self.rank <= 0:\n self.logger.info('Number of train images: {:,d}, iters: {:,d}'.format(\n len(self.train_set), train_size))\n self.logger.info('Total epochs needed: {:d} for iters {:,d}'.format(\n self.total_epochs, total_iters))\n elif phase == 'val':\n self.val_set, collate_fn = create_dataset(dataset_opt, return_collate=True)\n self.val_loader = create_dataloader(self.val_set, dataset_opt, opt, None, collate_fn=collate_fn)\n if self.rank <= 0:\n self.logger.info('Number of val images in [{:s}]: {:d}'.format(\n dataset_opt['name'], len(self.val_set)))\n else:\n raise NotImplementedError('Phase [{:s}] is not recognized.'.format(phase))\n assert self.train_loader is not None\n\n #### create model\n self.model = ExtensibleTrainer(opt, cached_networks=all_networks)\n\n ### Evaluators\n self.evaluators = []\n if 'eval' in opt.keys() and 'evaluators' in opt['eval'].keys():\n # In \"pure\" mode, we propagate through the normal training steps, but use validation data instead and average\n # the total loss. A validation dataloader is required.\n if opt_get(opt, ['eval', 'pure'], False):\n assert hasattr(self, 'val_loader')\n\n for ev_key, ev_opt in opt['eval']['evaluators'].items():\n self.evaluators.append(create_evaluator(self.model.networks[ev_opt['for']],\n ev_opt, self.model.env))\n\n #### resume training\n if resume_state:\n self.logger.info('Resuming training from epoch: {}, iter: {}.'.format(\n resume_state['epoch'], resume_state['iter']))\n\n self.start_epoch = resume_state['epoch']\n self.current_step = resume_state['iter']\n self.model.resume_training(resume_state, 'amp_opt_level' in opt.keys()) # handle optimizers and schedulers\n else:\n self.current_step = -1 if 'start_step' not in opt.keys() else opt['start_step']\n self.start_epoch = 0\n if 'force_start_step' in opt.keys():\n self.current_step = opt['force_start_step']\n opt['current_step'] = self.current_step\n\n def do_step(self, train_data):\n if self._profile:\n print(\"Data fetch: %f\" % (time() - _t))\n _t = time()\n\n opt = self.opt\n self.current_step += 1\n #### update learning rate\n self.model.update_learning_rate(self.current_step, warmup_iter=opt['train']['warmup_iter'])\n\n #### training\n if self._profile:\n print(\"Update LR: %f\" % (time() - _t))\n _t = time()\n self.model.feed_data(train_data, self.current_step)\n self.model.optimize_parameters(self.current_step)\n if self._profile:\n print(\"Model feed + step: %f\" % (time() - _t))\n _t = time()\n\n #### log\n if self.current_step % opt['logger']['print_freq'] == 0 and self.rank <= 0:\n logs = self.model.get_current_log(self.current_step)\n message = '[epoch:{:3d}, iter:{:8,d}, lr:('.format(self.epoch, self.current_step)\n for v in self.model.get_current_learning_rate():\n message += '{:.3e},'.format(v)\n message += ')] '\n for k, v in logs.items():\n if 'histogram' in k:\n self.tb_logger.add_histogram(k, v, self.current_step)\n elif isinstance(v, dict):\n self.tb_logger.add_scalars(k, v, self.current_step)\n else:\n message += '{:s}: {:.4e} '.format(k, v)\n # tensorboard logger\n if opt['use_tb_logger'] and 'debug' not in opt['name']:\n self.tb_logger.add_scalar(k, v, self.current_step)\n if opt['wandb'] and self.rank <= 0:\n import wandb\n wandb.log(logs)\n self.logger.info(message)\n\n #### save models and training states\n if self.current_step % opt['logger']['save_checkpoint_freq'] == 0:\n if self.rank <= 0:\n self.logger.info('Saving models and training states.')\n self.model.save(self.current_step)\n self.model.save_training_state(self.epoch, self.current_step)\n if 'alt_path' in opt['path'].keys():\n import shutil\n print(\"Synchronizing tb_logger to alt_path..\")\n alt_tblogger = os.path.join(opt['path']['alt_path'], \"tb_logger\")\n shutil.rmtree(alt_tblogger, ignore_errors=True)\n shutil.copytree(self.tb_logger_path, alt_tblogger)\n\n #### validation\n if opt_get(opt, ['eval', 'pure'], False) and self.current_step % opt['train']['val_freq'] == 0:\n metrics = []\n for val_data in tqdm(self.val_loader):\n self.model.feed_data(val_data, self.current_step, perform_micro_batching=False)\n metrics.append(self.model.test())\n reduced_metrics = {}\n for metric in metrics:\n for k, v in metric.as_dict().items():\n if isinstance(v, torch.Tensor) and len(v.shape) == 0:\n if k in reduced_metrics.keys():\n reduced_metrics[k].append(v)\n else:\n reduced_metrics[k] = [v]\n if self.rank <= 0:\n for k, v in reduced_metrics.items():\n val = torch.stack(v).mean().item()\n self.tb_logger.add_scalar(f'val_{k}', val, self.current_step)\n print(f\">>Eval {k}: {val}\")\n if opt['wandb']:\n import wandb\n wandb.log({f'eval_{k}': torch.stack(v).mean().item() for k,v in reduced_metrics.items()})\n\n if len(self.evaluators) != 0 and self.current_step % opt['train']['val_freq'] == 0:\n eval_dict = {}\n for eval in self.evaluators:\n if eval.uses_all_ddp or self.rank <= 0:\n eval_dict.update(eval.perform_eval())\n if self.rank <= 0:\n print(\"Evaluator results: \", eval_dict)\n for ek, ev in eval_dict.items():\n self.tb_logger.add_scalar(ek, ev, self.current_step)\n if opt['wandb']:\n import wandb\n wandb.log(eval_dict)\n\n\n def do_training(self):\n self.logger.info('Start training from epoch: {:d}, iter: {:d}'.format(self.start_epoch, self.current_step))\n for epoch in range(self.start_epoch, self.total_epochs + 1):\n self.epoch = epoch\n if opt['dist']:\n self.train_sampler.set_epoch(epoch)\n tq_ldr = tqdm(self.train_loader)\n\n _t = time()\n for train_data in tq_ldr:\n self.do_step(train_data)\n\n def create_training_generator(self, index):\n self.logger.info('Start training from epoch: {:d}, iter: {:d}'.format(self.start_epoch, self.current_step))\n for epoch in range(self.start_epoch, self.total_epochs + 1):\n self.epoch = epoch\n if self.opt['dist']:\n self.train_sampler.set_epoch(epoch)\n tq_ldr = tqdm(self.train_loader, position=index)\n\n _t = time()\n for train_data in tq_ldr:\n yield self.model\n self.do_step(train_data)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/train_diffusion_vocoder_clips.yml')\n parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none', help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n args = parser.parse_args()\n opt = option.parse(args.opt, is_train=True)\n if args.launcher != 'none':\n # export CUDA_VISIBLE_DEVICES for running in distributed mode.\n if 'gpu_ids' in opt.keys():\n gpu_list = ','.join(str(x) for x in opt['gpu_ids'])\n os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list\n print('export CUDA_VISIBLE_DEVICES=' + gpu_list)\n trainer = Trainer()\n\n #### distributed training settings\n if args.launcher == 'none': # disabled distributed training\n opt['dist'] = False\n trainer.rank = -1\n if len(opt['gpu_ids']) == 1:\n torch.cuda.set_device(opt['gpu_ids'][0])\n print('Disabled distributed training.')\n else:\n opt['dist'] = True\n init_dist('nccl')\n trainer.world_size = torch.distributed.get_world_size()\n trainer.rank = torch.distributed.get_rank()\n\n trainer.init(opt, args.launcher)\n trainer.do_training()\n" ]
[ [ "numpy.clip", "torch.from_numpy", "numpy.ones", "numpy.mean", "numpy.random.rand", "numpy.zeros", "numpy.sum" ], [ "torch.cat", "torch.load" ], [ "tensorflow.train.load_variable", "tensorflow.train.list_variables" ], [ "torch.multiprocessing.set_start_method", "torch.distributed.init_process_group", "torch.cuda.set_device", "torch.cuda.current_device", "torch.autograd.set_detect_anomaly", "torch.multiprocessing.get_start_method", "torch.stack", "torch.distributed.get_rank", "torch.cuda.device_count", "torch.distributed.get_world_size" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pclucas14/continuum
[ "09034db1371e9646ca660fd4d4df73e61bf77067", "09034db1371e9646ca660fd4d4df73e61bf77067" ]
[ "tests/test_background_swap.py", "continuum/scenarios/base.py" ]
[ "import os\n\nfrom torch.utils.data import DataLoader\nfrom continuum.datasets import CIFAR10, InMemoryDataset\nfrom continuum.datasets import MNIST\nimport torchvision\nfrom continuum.scenarios import TransformationIncremental\nimport pytest\nimport numpy as np\n\nfrom continuum.transforms.bg_swap import BackgroundSwap\n\nDATA_PATH = os.environ.get(\"CONTINUUM_DATA_PATH\")\n\n# Uncomment for debugging via image output\n# import matplotlib.pyplot as plt\n\n\ndef test_bg_swap_fast():\n \"\"\"\n Fast test for background swap.\n \"\"\"\n bg_x = np.ones(shape=[2, 5, 5, 3]) * -1\n bg_y = np.random.rand(2)\n\n fg = np.random.normal(loc=.5, scale=.1, size=[5, 5])\n bg = InMemoryDataset(bg_x, bg_y)\n\n bg_swap = BackgroundSwap(bg, input_dim=(5, 5), normalize_bg=None)\n\n spliced_1_channel = bg_swap(fg)[:, :, 0]\n\n assert np.array_equal((spliced_1_channel <= -1), (fg <= .5))\n\n\[email protected]\ndef test_background_swap_numpy():\n \"\"\"\n Test background swap on a single ndarray input.\n \"\"\"\n mnist = MNIST(DATA_PATH, download=True, train=True)\n cifar = CIFAR10(DATA_PATH, download=True, train=True)\n\n bg_swap = BackgroundSwap(cifar, input_dim=(28, 28))\n\n im = mnist.get_data()[0][0]\n im = bg_swap(im)\n\n # Uncomment for debugging\n # plt.imshow(im, interpolation='nearest')\n # plt.show()\n\n\[email protected]\ndef test_background_swap_torch():\n \"\"\"\n Test background swap on a single tensor input.\n \"\"\"\n cifar = CIFAR10(DATA_PATH, download=True, train=True)\n\n mnist = torchvision.datasets.MNIST(DATA_PATH, train=True, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor()\n ]))\n\n bg_swap = BackgroundSwap(cifar, input_dim=(28, 28))\n im = mnist[0][0]\n\n im = bg_swap(im)\n\n # Uncomment for debugging\n # plt.imshow(im.permute(1, 2, 0), interpolation='nearest')\n # plt.show()\n\n\[email protected]\ndef test_background_tranformation():\n \"\"\"\n Example code using TransformationIncremental to create a setting with 3 tasks.\n \"\"\"\n cifar = CIFAR10(DATA_PATH, train=True)\n mnist = MNIST(DATA_PATH, download=False, train=True)\n nb_task = 3\n list_trsf = []\n for i in range(nb_task):\n list_trsf.append([torchvision.transforms.ToTensor(), BackgroundSwap(cifar, bg_label=i, input_dim=(28, 28)),\n torchvision.transforms.ToPILImage()])\n scenario = TransformationIncremental(mnist, base_transformations=[torchvision.transforms.ToTensor()],\n incremental_transformations=list_trsf)\n folder = \"tests/samples/background_trsf/\"\n if not os.path.exists(folder):\n os.makedirs(folder)\n for task_id, task_data in enumerate(scenario):\n task_data.plot(path=folder, title=f\"background_{task_id}.jpg\", nb_samples=100, shape=[28, 28, 3])\n loader = DataLoader(task_data)\n _, _, _ = next(iter(loader))\n", "import abc\nfrom typing import Callable, List, Union\n\nimport numpy as np\nfrom torchvision import transforms\n\nfrom continuum.datasets import _ContinuumDataset\nfrom continuum.tasks import TaskSet, TaskType\nfrom continuum.transforms.segmentation import Compose as SegmentationCompose\n\n\nclass _BaseScenario(abc.ABC):\n \"\"\"Abstract loader.\n\n DO NOT INSTANTIATE THIS CLASS.\n\n :param cl_dataset: A Continuum dataset.\n :param nb_tasks: The number of tasks to do.\n :param transformations: A list of transformations applied to all tasks. If\n it's a list of list, then the transformation will be\n different per task.\n \"\"\"\n\n def __init__(\n self,\n cl_dataset: _ContinuumDataset,\n nb_tasks: int,\n transformations: Union[List[Callable], List[List[Callable]]] = None\n ) -> None:\n\n self.cl_dataset = cl_dataset\n self._nb_tasks = nb_tasks\n self.transformations = transformations\n self._counter = 0\n\n if transformations is None:\n self.transformations = self.cl_dataset.transformations\n if self.cl_dataset.data_type == TaskType.SEGMENTATION:\n composer = SegmentationCompose\n else:\n composer = transforms.Compose\n if self.transformations is not None and isinstance(self.transformations[0], list):\n # We have list of list of callable, where each sublist is dedicated to\n # a task.\n if len(self.transformations) != nb_tasks:\n raise ValueError(\n f\"When using different transformations per task, there must be as as much transformations\"\n f\" ({len(transformations)}) than there are tasks ({nb_tasks})\"\n f\", which is not currently the case.\"\n )\n self.trsf = [composer(trsf) for trsf in self.transformations]\n else:\n self.trsf = composer(self.transformations)\n\n @abc.abstractmethod\n def _setup(self, nb_tasks: int) -> int:\n raise NotImplementedError\n\n @property\n def train(self) -> bool:\n \"\"\"Returns whether we are in training or testing mode.\n\n This property is dependent on the dataset, not the actual scenario.\n \"\"\"\n return self.cl_dataset.train\n\n @property\n def nb_samples(self) -> int:\n \"\"\"Total number of samples in the whole continual setting.\"\"\"\n return len(self.dataset[0]) # type: ignore\n\n @property\n def nb_classes(self) -> int:\n \"\"\"Total number of classes in the whole continual setting.\"\"\"\n return len(np.unique(self.dataset[1])) # type: ignore\n\n @property\n def classes(self) -> List:\n \"\"\"list of classes in the whole continual setting.\"\"\"\n return np.unique(self.dataset[1]) # type: ignore\n\n @property\n def nb_tasks(self) -> int:\n \"\"\"Number of tasks in the whole continual setting.\"\"\"\n return len(self)\n\n def __len__(self) -> int:\n \"\"\"Returns the number of tasks.\n\n :return: Number of tasks.\n \"\"\"\n return self._nb_tasks\n\n def __iter__(self):\n \"\"\"Used for iterating through all tasks with the CLLoader in a for loop.\"\"\"\n self._counter = 0\n return self\n\n def __next__(self) -> TaskSet:\n \"\"\"An iteration/task in the for loop.\"\"\"\n if self._counter >= len(self):\n raise StopIteration\n task = self[self._counter]\n self._counter += 1\n return task\n\n def __getitem__(self, task_index: Union[int, slice]):\n \"\"\"Returns a task by its unique index.\n\n :param task_index: The unique index of a task. As for List, you can use\n indexing between [0, len], negative indexing, or\n even slices.\n :return: A train PyTorch's Datasets.\n \"\"\"\n if isinstance(task_index, slice) and isinstance(self.trsf, list):\n raise ValueError(\n f\"You cannot select multiple task ({task_index}) when you have a \"\n \"different set of transformations per task\"\n )\n\n x, y, t, _, data_indexes = self._select_data_by_task(task_index)\n\n return TaskSet(\n x, y, t,\n trsf=self.trsf[task_index] if isinstance(self.trsf, list) else self.trsf,\n data_type=self.cl_dataset.data_type,\n bounding_boxes=self.cl_dataset.bounding_boxes,\n data_indexes=data_indexes\n )\n\n def _select_data_by_task(\n self,\n task_index: Union[int, slice, np.ndarray]\n ) -> Union[np.ndarray, np.ndarray, np.ndarray, Union[int, List[int]]]:\n \"\"\"Selects a subset of the whole data for a given task.\n\n This class returns the \"task_index\" in addition of the x, y, t data.\n This task index is either an integer or a list of integer when the user\n used a slice. We need this variable when in segmentation to disentangle\n samples with multiple task ids.\n\n :param task_index: The unique index of a task. As for List, you can use\n indexing between [0, len], negative indexing, or\n even slices.\n :return: A tuple of numpy array being resp. (1) the data, (2) the targets,\n (3) task ids, and (4) the actual task required by the user.\n \"\"\"\n\n # conversion of task_index into a list\n\n if isinstance(task_index, slice):\n start = task_index.start if task_index.start is not None else 0\n stop = task_index.stop if task_index.stop is not None else len(self) + 1\n step = task_index.step if task_index.step is not None else 1\n task_index = list(range(start, stop, step))\n if len(task_index) == 0:\n raise ValueError(f\"Invalid slicing resulting in no data (start={start}, end={stop}, step={step}).\")\n\n if isinstance(task_index, np.ndarray):\n task_index = list(task_index)\n\n x, y, t = self.dataset # type: ignore\n\n if isinstance(task_index, list):\n task_index = [\n t if t >= 0 else _handle_negative_indexes(t, len(self)) for t in task_index\n ]\n if len(t.shape) == 2:\n data_indexes = np.unique(np.where(t[:, task_index] == 1)[0])\n else:\n data_indexes = np.where(np.isin(t, task_index))[0]\n else:\n if task_index < 0:\n task_index = _handle_negative_indexes(task_index, len(self))\n\n if len(t.shape) == 2:\n data_indexes = np.where(t[:, task_index] == 1)[0]\n else:\n data_indexes = np.where(t == task_index)[0]\n\n if self.cl_dataset.data_type == TaskType.H5:\n # for h5 TaskType, x is just the filename containing all data\n # no need for slicing here\n selected_x = x\n else:\n selected_x = x[data_indexes]\n selected_y = y[data_indexes]\n selected_t = t[data_indexes]\n\n if self.cl_dataset.need_class_remapping: # TODO: to remove with TransformIncremental\n # A remapping of the class ids is done to handle some special cases\n # like PermutedMNIST or RotatedMNIST.\n selected_y = self.cl_dataset.class_remapping(selected_y)\n\n return selected_x, selected_y, selected_t, task_index, data_indexes\n\n\ndef _handle_negative_indexes(index: int, total_len: int) -> int:\n if index < 0:\n index = index % total_len\n return index\n" ]
[ [ "numpy.array_equal", "torch.utils.data.DataLoader", "numpy.ones", "numpy.random.normal", "numpy.random.rand" ], [ "numpy.where", "numpy.isin", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
g-nightingale/tox_examples
[ "d7714375c764580b4b8af9db61332ced4e851def" ]
[ "packaging/squarer/ml_squarer.py" ]
[ "import numpy as np\n\n\ndef train_ml_squarer() -> None:\n print(\"Training!\")\n\n\ndef square() -> int:\n \"\"\"Square a number...maybe\"\"\"\n return np.random.randint(1, 100)\n\n\nif __name__ == '__main__':\n train_ml_squarer()" ]
[ [ "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GOOGLE-M/SGC
[ "78ad8d02b80808302e38559e2d0f430f66a809bd", "78ad8d02b80808302e38559e2d0f430f66a809bd", "78ad8d02b80808302e38559e2d0f430f66a809bd", "78ad8d02b80808302e38559e2d0f430f66a809bd" ]
[ "venv/lib/python3.7/site-packages/torch/utils/benchmark/utils/timer.py", "venv/lib/python3.7/site-packages/caffe2/python/operator_test/locally_connected_op_test.py", "venv/lib/python3.7/site-packages/torch/nn/parallel/distributed.py", "venv/lib/python3.7/site-packages/torch/nn/init.py" ]
[ "\"\"\"Timer class based on the timeit.Timer class, but torch aware.\"\"\"\nimport enum\nimport timeit\nimport textwrap\nfrom typing import Any, Callable, Dict, List, NoReturn, Optional, Type, Union\n\nimport numpy as np\nimport torch\nfrom torch.utils.benchmark.utils import common, cpp_jit\nfrom torch.utils.benchmark.utils._stubs import TimerClass, TimeitModuleType\nfrom torch.utils.benchmark.utils.valgrind_wrapper import timer_interface as valgrind_timer_interface\n\n\n__all__ = [\"Timer\", \"timer\", \"Language\"]\n\n\nif torch.has_cuda and torch.cuda.is_available():\n def timer() -> float:\n torch.cuda.synchronize()\n return timeit.default_timer()\nelse:\n timer = timeit.default_timer\n\n\nclass Language(enum.Enum):\n PYTHON = 0\n CPP = 1\n\n\nclass CPPTimer:\n def __init__(\n self,\n stmt: str,\n setup: str,\n timer: Callable[[], float],\n globals: Dict[str, Any],\n ) -> None:\n if timer is not timeit.default_timer:\n raise NotImplementedError(\n \"PyTorch was built with CUDA and a GPU is present; however \"\n \"Timer does not yet support GPU measurements. If your \"\n \"code is CPU only, pass `timer=timeit.default_timer` to the \"\n \"Timer's constructor to indicate this. (Note that this will \"\n \"produce incorrect results if the GPU is in fact used, as \"\n \"Timer will not synchronize CUDA.)\"\n )\n\n if globals:\n raise ValueError(\"C++ timing does not support globals.\")\n\n self._stmt: str = textwrap.dedent(stmt)\n self._setup: str = textwrap.dedent(setup)\n self._timeit_module: Optional[TimeitModuleType] = None\n\n def timeit(self, number: int) -> float:\n if self._timeit_module is None:\n self._timeit_module = cpp_jit.compile_timeit_template(\n self._stmt,\n self._setup,\n )\n\n return self._timeit_module.timeit(number)\n\n\nclass Timer(object):\n \"\"\"Helper class for measuring execution time of PyTorch statements.\n\n For a full tutorial on how to use this class, see:\n https://pytorch.org/tutorials/recipes/recipes/benchmark.html\n\n The PyTorch Timer is based on `timeit.Timer` (and in fact uses\n `timeit.Timer` internally), but with several key differences:\n\n 1) Runtime aware:\n Timer will perform warmups (important as some elements of PyTorch are\n lazily initialized), set threadpool size so that comparisons are\n apples-to-apples, and synchronize asynchronous CUDA functions when\n necessary.\n\n 2) Focus on replicates:\n When measuring code, and particularly complex kernels / models,\n run-to-run variation is a significant confounding factor. It is\n expected that all measurements should include replicates to quantify\n noise and allow median computation, which is more robust than mean.\n To that effect, this class deviates from the `timeit` API by\n conceptually merging `timeit.Timer.repeat` and `timeit.Timer.autorange`.\n (Exact algorithms are discussed in method docstrings.) The `timeit`\n method is replicated for cases where an adaptive strategy is not\n desired.\n\n 3) Optional metadata:\n When defining a Timer, one can optionally specify `label`, `sub_label`,\n `description`, and `env`. (Defined later) These fields are included in\n the representation of result object and by the `Compare` class to group\n and display results for comparison.\n\n 4) Instruction counts\n In addition to wall times, Timer can run a statement under Callgrind\n and report instructions executed.\n\n Directly analogous to `timeit.Timer` constructor arguments:\n\n `stmt`, `setup`, `timer`, `globals`\n\n PyTorch Timer specific constructor arguments:\n\n `label`, `sub_label`, `description`, `env`, `num_threads`\n\n Args:\n stmt: Code snippet to be run in a loop and timed.\n\n setup: Optional setup code. Used to define variables used in `stmt`\n\n timer:\n Callable which returns the current time. If PyTorch was built\n without CUDA or there is no GPU present, this defaults to\n `timeit.default_timer`; otherwise it will synchronize CUDA before\n measuring the time.\n\n globals:\n A dict which defines the global variables when `stmt` is being\n executed. This is the other method for providing variables which\n `stmt` needs.\n\n label:\n String which summarizes `stmt`. For instance, if `stmt` is\n \"torch.nn.functional.relu(torch.add(x, 1, out=out))\"\n one might set label to \"ReLU(x + 1)\" to improve readability.\n\n sub_label:\n Provide supplemental information to disambiguate measurements\n with identical stmt or label. For instance, in our example\n above sub_label might be \"float\" or \"int\", so that it is easy\n to differentiate:\n \"ReLU(x + 1): (float)\"\n\n \"ReLU(x + 1): (int)\"\n when printing Measurements or summarizing using `Compare`.\n\n description:\n String to distinguish measurements with identical label and\n sub_label. The principal use of `description` is to signal to\n `Compare` the columns of data. For instance one might set it\n based on the input size to create a table of the form: ::\n\n | n=1 | n=4 | ...\n ------------- ...\n ReLU(x + 1): (float) | ... | ... | ...\n ReLU(x + 1): (int) | ... | ... | ...\n\n\n using `Compare`. It is also included when printing a Measurement.\n\n env:\n This tag indicates that otherwise identical tasks were run in\n different environments, and are therefore not equivilent, for\n instance when A/B testing a change to a kernel. `Compare` will\n treat Measurements with different `env` specification as distinct\n when merging replicate runs.\n\n num_threads:\n The size of the PyTorch threadpool when executing `stmt`. Single\n threaded performace is important as both a key inference workload\n and a good indicator of intrinsic algorithmic efficiency, so the\n default is set to one. This is in contrast to the default PyTorch\n threadpool size which tries to utilize all cores.\n \"\"\"\n\n _timer_cls: Type[TimerClass] = timeit.Timer\n\n def __init__(\n self,\n stmt: str = \"pass\",\n setup: str = \"pass\",\n timer: Callable[[], float] = timer,\n globals: Optional[Dict[str, Any]] = None,\n label: Optional[str] = None,\n sub_label: Optional[str] = None,\n description: Optional[str] = None,\n env: Optional[str] = None,\n num_threads: int = 1,\n language: Union[Language, str] = Language.PYTHON,\n ):\n if not isinstance(stmt, str):\n raise ValueError(\"Currently only a `str` stmt is supported.\")\n\n # We copy `globals` to prevent mutations from leaking.\n # (For instance, `eval` adds the `__builtins__` key)\n self._globals = dict(globals or {})\n if language in (Language.PYTHON, \"py\", \"python\"):\n # Include `torch` if not specified as a convenience feature.\n self._globals.setdefault(\"torch\", torch)\n self._language: Language = Language.PYTHON\n\n elif language in (Language.CPP, \"cpp\", \"c++\"):\n assert self._timer_cls is timeit.Timer, \"_timer_cls has already been swapped.\"\n self._timer_cls = CPPTimer\n setup = (\"\" if setup == \"pass\" else setup)\n self._language = Language.CPP\n\n else:\n raise ValueError(f\"Invalid language `{language}`.\")\n\n # Convenience adjustment so that multi-line code snippets defined in\n # functions do not IndentationError (Python) or look odd (C++). The\n # leading newline removal is for the initial newline that appears when\n # defining block strings. For instance:\n # textwrap.dedent(\"\"\"\n # print(\"This is a stmt\")\n # \"\"\")\n # produces '\\nprint(\"This is a stmt\")\\n'.\n #\n # Stripping this down to 'print(\"This is a stmt\")' doesn't change\n # what gets executed, but it makes __repr__'s nicer.\n stmt = textwrap.dedent(stmt)\n stmt = (stmt[1:] if stmt and stmt[0] == \"\\n\" else stmt).rstrip()\n setup = textwrap.dedent(setup)\n setup = (setup[1:] if setup and setup[0] == \"\\n\" else setup).rstrip()\n\n self._timer = self._timer_cls(\n stmt=stmt,\n setup=setup,\n timer=timer,\n globals=valgrind_timer_interface.CopyIfCallgrind.unwrap_all(self._globals),\n )\n self._task_spec = common.TaskSpec(\n stmt=stmt,\n setup=setup,\n label=label,\n sub_label=sub_label,\n description=description,\n env=env,\n num_threads=num_threads,\n )\n\n def timeit(self, number: int = 1000000) -> common.Measurement:\n \"\"\"Mirrors the semantics of timeit.Timer.timeit().\n\n Execute the main statement (`stmt`) `number` times.\n https://docs.python.org/3/library/timeit.html#timeit.Timer.timeit\n \"\"\"\n with common.set_torch_threads(self._task_spec.num_threads):\n # Warmup\n self._timer.timeit(number=max(int(number // 100), 1))\n\n return common.Measurement(\n number_per_run=number,\n raw_times=[self._timer.timeit(number=number)],\n task_spec=self._task_spec\n )\n\n def repeat(self, repeat: int = -1, number: int = -1) -> None:\n raise NotImplementedError(\"See `Timer.blocked_autorange.`\")\n\n def autorange(self, callback: Optional[Callable[[int, float], NoReturn]] = None) -> None:\n raise NotImplementedError(\"See `Timer.blocked_autorange.`\")\n\n def _threaded_measurement_loop(\n self,\n number: int,\n time_hook: Callable[[], float],\n stop_hook: Callable[[List[float]], bool],\n min_run_time: float,\n max_run_time: Optional[float] = None,\n callback: Optional[Callable[[int, float], NoReturn]] = None\n ) -> List[float]:\n total_time = 0.0\n can_stop = False\n times: List[float] = []\n with common.set_torch_threads(self._task_spec.num_threads):\n while (total_time < min_run_time) or (not can_stop):\n time_spent = time_hook()\n times.append(time_spent)\n total_time += time_spent\n if callback:\n callback(number, time_spent)\n can_stop = stop_hook(times)\n if max_run_time and total_time > max_run_time:\n break\n return times\n\n def _estimate_block_size(self, min_run_time: float) -> int:\n with common.set_torch_threads(self._task_spec.num_threads):\n # Estimate the block size needed for measurement to be negligible\n # compared to the inner loop. This also serves as a warmup.\n overhead = np.median([self._timer.timeit(0) for _ in range(5)])\n number = 1\n while True:\n time_taken = self._timer.timeit(number)\n relative_overhead = overhead / time_taken\n if relative_overhead <= 1e-4 and time_taken >= min_run_time / 1000:\n break\n if time_taken > min_run_time:\n break\n number *= 10\n return number\n\n def adaptive_autorange(\n self,\n threshold: float = 0.1,\n *,\n min_run_time: float = 0.01,\n max_run_time: float = 10.0,\n callback: Optional[Callable[[int, float], NoReturn]] = None,\n ) -> common.Measurement:\n number = self._estimate_block_size(min_run_time=0.05)\n\n def time_hook() -> float:\n return self._timer.timeit(number)\n\n def stop_hook(times: List[float]) -> bool:\n if len(times) > 3:\n return common.Measurement(\n number_per_run=number,\n raw_times=times,\n task_spec=self._task_spec\n ).meets_confidence(threshold=threshold)\n return False\n times = self._threaded_measurement_loop(\n number, time_hook, stop_hook, min_run_time, max_run_time, callback=callback)\n\n return common.Measurement(\n number_per_run=number,\n raw_times=times,\n task_spec=self._task_spec\n )\n\n def blocked_autorange(\n self,\n callback: Optional[Callable[[int, float], NoReturn]] = None,\n min_run_time: float = 0.2,\n ) -> common.Measurement:\n \"\"\"Measure many replicates while keeping timer overhead to a minimum.\n\n At a high level, blocked_autorange executes the following pseudo-code::\n\n `setup`\n\n total_time = 0\n while total_time < min_run_time\n start = timer()\n for _ in range(block_size):\n `stmt`\n total_time += (timer() - start)\n\n Note the variable `block_size` in the inner loop. The choice of block\n size is important to measurement quality, and must balance two\n competing objectives:\n\n 1) A small block size results in more replicates and generally\n better statistics.\n\n 2) A large block size better amortizes the cost of `timer`\n invocation, and results in a less biased measurement. This is\n important because CUDA syncronization time is non-trivial\n (order single to low double digit microseconds) and would\n otherwise bias the measurement.\n\n blocked_autorange sets block_size by running a warmup period,\n increasing block size until timer overhead is less than 0.1% of\n the overall computation. This value is then used for the main\n measurement loop.\n\n Returns:\n A `Measurement` object that contains measured runtimes and\n repetition counts, and can be used to compute statistics.\n (mean, median, etc.)\n \"\"\"\n number = self._estimate_block_size(min_run_time)\n\n def time_hook() -> float:\n return self._timer.timeit(number)\n\n def stop_hook(times: List[float]) -> bool:\n return True\n\n times = self._threaded_measurement_loop(\n number, time_hook, stop_hook,\n min_run_time=min_run_time,\n callback=callback)\n\n return common.Measurement(\n number_per_run=number,\n raw_times=times,\n task_spec=self._task_spec\n )\n\n def collect_callgrind(\n self,\n number: int = 100,\n collect_baseline: bool = True\n ) -> valgrind_timer_interface.CallgrindStats:\n \"\"\"Collect instruction counts using Callgrind.\n\n Unlike wall times, instruction counts are deterministic\n (modulo non-determinism in the program itself and small amounts of\n jitter from the Python interpreter.) This makes them ideal for detailed\n performance analysis. This method runs `stmt` in a separate process\n so that Valgrind can instrument the program. Performance is severely\n degraded due to the instrumentation, howevever this is ameliorated by\n the fact that a small number of iterations is generally sufficient to\n obtain good measurements.\n\n In order to to use this method `valgrind`, `callgrind_control`, and\n `callgrind_annotate` must be installed.\n\n Because there is a process boundary between the caller (this process)\n and the `stmt` execution, `globals` cannot contain arbitrary in-memory\n data structures. (Unlike timing methods) Instead, globals are\n restricted to builtins, `nn.Modules`'s, and TorchScripted functions/modules\n to reduce the surprise factor from serialization and subsequent\n deserialization. The `GlobalsBridge` class provides more detail on this\n subject. Take particular care with nn.Modules: they rely on pickle and\n you may need to add an import to `setup` for them to transfer properly.\n\n By default, a profile for an empty statement will be collected and\n cached to indicate how many instructions are from the Python loop which\n drives `stmt`.\n\n Returns:\n A `CallgrindStats` object which provides instruction counts and\n some basic facilities for analyzing and manipulating results.\n \"\"\"\n if not isinstance(self._task_spec.stmt, str):\n raise ValueError(\"`collect_callgrind` currently only supports string `stmt`\")\n\n # Check that the statement is valid. It doesn't guarantee success, but it's much\n # simpler and quicker to raise an exception for a faulty `stmt` or `setup` in\n # the parent process rather than the valgrind subprocess.\n self._timer.timeit(1)\n is_python = (self._language == Language.PYTHON)\n assert is_python or not self._globals\n return valgrind_timer_interface.wrapper_singleton().collect_callgrind(\n task_spec=self._task_spec,\n globals=self._globals,\n number=number,\n collect_baseline=collect_baseline and is_python,\n is_python=is_python)\n", "\n\n\n\nimport numpy as np\nfrom hypothesis import given, settings, assume\nimport hypothesis.strategies as st\n\nfrom caffe2.python import core, utils, workspace\nimport caffe2.python.hypothesis_test_util as hu\nimport caffe2.python.serialized_test.serialized_test_util as serial\n\n\n\nclass TestLocallyConnectedOp(serial.SerializedTestCase):\n @given(N=st.integers(1, 3),\n C=st.integers(1, 3),\n H=st.integers(1, 5),\n W=st.integers(1, 5),\n M=st.integers(1, 3),\n kernel=st.integers(1, 3),\n op_name=st.sampled_from([\"LC\", \"LC2D\"]),\n order=st.sampled_from([\"NCHW\", \"NHWC\"]),\n use_bias=st.booleans(),\n **hu.gcs)\n @settings(deadline=10000)\n def test_lc_2d(\n self, N, C, H, W, M, kernel, op_name, order, use_bias, gc, dc):\n if H < kernel:\n kernel = H\n if W < kernel:\n kernel = W\n\n assume(C == kernel * N)\n\n op = core.CreateOperator(\n op_name,\n [\"X\", \"W\", \"b\"] if use_bias else [\"X\", \"W\"],\n [\"Y\"],\n kernels=[kernel, kernel],\n order=order,\n engine=\"\",\n )\n\n Y_H = H - kernel + 1\n Y_W = W - kernel + 1\n if order == \"NCHW\":\n X = np.random.rand(N, C, H, W).astype(np.float32) - 0.5\n W = np.random.rand(Y_H, Y_W, M, C, kernel,\n kernel).astype(np.float32) - 0.5\n else:\n X = np.random.rand(N, H, W, C).astype(np.float32) - 0.5\n W = np.random.rand(Y_H, Y_W, M, kernel, kernel,\n C).astype(np.float32) - 0.5\n b = np.random.rand(Y_H, Y_W, M).astype(np.float32) - 0.5\n inputs = [X, W, b] if use_bias else [X, W]\n\n def lc_2d_nchw(X, W, b=None):\n N, C, XH, XW = X.shape\n YH, YW, M, _, KH, KW = W.shape\n\n def conv(n, m, yh, yw):\n sum = b[yh, yw, m] if b is not None else 0\n for c in range(C):\n for kh in range(KH):\n for kw in range(KW):\n hh = yh + kh\n ww = yw + kw\n sum += X[n, c, hh, ww] * W[yh, yw, m, c, kh, kw]\n return sum\n\n output = np.zeros((N, M, YH, YW), dtype=np.float32)\n for n in range(N):\n for m in range(M):\n for yh in range(YH):\n for yw in range(YW):\n output[n, m, yh, yw] = conv(n, m, yh, yw)\n return [output]\n\n def lc_2d_nhwc(X, W, b=None):\n XT = utils.NHWC2NCHW(X)\n WT = np.transpose(W, [0, 1, 2, 5, 3, 4])\n output = lc_2d_nchw(XT, WT, b)\n return [utils.NCHW2NHWC(output[0])]\n\n ref_op = lc_2d_nchw if order == \"NCHW\" else lc_2d_nhwc\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=inputs,\n reference=ref_op,\n )\n self.assertDeviceChecks(dc, op, inputs, [0])\n for i in range(len(inputs)):\n self.assertGradientChecks(gc, op, inputs, i, [0])\n\n @given(N=st.integers(1, 3),\n C=st.integers(1, 3),\n size=st.integers(1, 5),\n M=st.integers(1, 3),\n kernel=st.integers(1, 3),\n op_name=st.sampled_from([\"LC\", \"LC1D\"]),\n use_bias=st.booleans(),\n **hu.gcs)\n @settings(deadline=1000)\n def test_lc_1d(self, N, C, size, M, kernel, op_name, use_bias, gc, dc):\n if workspace.has_hip_support:\n # Skip as test flaky on ROCM with deadline set to 1000\n return\n if size < kernel:\n kernel = size\n\n op = core.CreateOperator(\n op_name,\n [\"X\", \"W\", \"b\"] if use_bias else [\"X\", \"W\"],\n [\"Y\"],\n kernels=[kernel],\n order=\"NCHW\",\n engine=\"\",\n )\n\n L = size - kernel + 1\n X = np.random.rand(N, C, size).astype(np.float32) - 0.5\n W = np.random.rand(L, M, C, kernel).astype(np.float32) - 0.5\n b = np.random.rand(L, M).astype(np.float32) - 0.5\n inputs = [X, W, b] if use_bias else [X, W]\n\n def lc_1d_nchw(X, W, b=None):\n N, C, XL = X.shape\n YL, M, _, KL = W.shape\n\n def conv(n, m, yl):\n sum = b[yl, m] if b is not None else 0\n for c in range(C):\n for kl in range(KL):\n ll = yl + kl\n sum += X[n, c, ll] * W[yl, m, c, kl]\n return sum\n\n output = np.zeros((N, M, YL), dtype=np.float32)\n for n in range(N):\n for m in range(M):\n for yl in range(YL):\n output[n, m, yl] = conv(n, m, yl)\n return [output]\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=inputs,\n reference=lc_1d_nchw,\n )\n self.assertDeviceChecks(dc, op, inputs, [0])\n for i in range(len(inputs)):\n self.assertGradientChecks(gc, op, inputs, i, [0])\n\n @given(N=st.integers(1, 1),\n C=st.integers(1, 1),\n T=st.integers(2, 2),\n H=st.integers(2, 2),\n W=st.integers(2, 2),\n M=st.integers(1, 1),\n kernel=st.integers(2, 2),\n op_name=st.sampled_from([\"LC\", \"LC3D\"]),\n use_bias=st.booleans(),\n **hu.gcs)\n @settings(deadline=1000)\n def test_lc_3d(self, N, C, T, H, W, M, kernel, op_name, use_bias, gc, dc):\n if T < kernel:\n kernel = T\n if H < kernel:\n kernel = H\n if W < kernel:\n kernel = W\n\n op = core.CreateOperator(\n op_name,\n [\"X\", \"W\", \"b\"] if use_bias else [\"X\", \"W\"],\n [\"Y\"],\n kernels=[kernel, kernel, kernel],\n order=\"NCHW\",\n engine=\"\",\n )\n\n Y_T = T - kernel + 1\n Y_H = H - kernel + 1\n Y_W = W - kernel + 1\n X = np.random.rand(N, C, T, H, W).astype(np.float32) - 0.5\n W = np.random.rand(Y_T, Y_H, Y_W, M, C, kernel,\n kernel, kernel).astype(np.float32) - 0.5\n b = np.random.rand(Y_T, Y_H, Y_W, M).astype(np.float32) - 0.5\n inputs = [X, W, b] if use_bias else [X, W]\n\n def lc_3d_nchw(X, W, b=None):\n N, C, XT, XH, XW = X.shape\n YT, YH, YW, M, _, KT, KH, KW = W.shape\n\n def conv(n, m, yt, yh, yw):\n sum = b[yt, yh, yw, m] if b is not None else 0\n for c in range(C):\n for kt in range(KT):\n for kh in range(KH):\n for kw in range(KW):\n tt = yt + kt\n hh = yh + kh\n ww = yw + kw\n sum += X[n, c, tt, hh, ww] * \\\n W[yt, yh, yw, m, c, kt, kh, kw]\n return sum\n\n output = np.zeros((N, M, YT, YH, YW), dtype=np.float32)\n for n in range(N):\n for m in range(M):\n for yt in range(YT):\n for yh in range(YH):\n for yw in range(YW):\n output[n, m, yt, yh, yw] = conv(\n n, m, yt, yh, yw)\n return [output]\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=inputs,\n reference=lc_3d_nchw,\n )\n self.assertDeviceChecks(dc, op, inputs, [0])\n for i in range(len(inputs)):\n self.assertGradientChecks(gc, op, inputs, i, [0])\n", "from contextlib import contextmanager\nimport copy\nimport itertools\nimport os\nimport inspect\nimport logging\nimport warnings\nfrom typing import NamedTuple\n\nimport torch\n\nfrom . import comm\nimport torch.distributed as dist\n\nRPC_AVAILABLE = False\nif dist.is_available():\n from torch.distributed.distributed_c10d import _get_default_group\n from torch.distributed.distributed_c10d import ReduceOp\nif torch.distributed.rpc.is_available():\n RPC_AVAILABLE = True\n from torch.distributed.rpc import RRef\nfrom ..modules import Module\nfrom .replicate import replicate\nfrom .scatter_gather import scatter_kwargs, gather, is_namedtuple\nfrom .parallel_apply import parallel_apply\nfrom torch._utils import _get_device_index, _get_all_device_indices\nfrom ._functions import _get_stream\n\n\ndef _find_tensors(obj):\n r\"\"\"\n Recursively find all tensors contained in the specified object.\n \"\"\"\n if RPC_AVAILABLE and isinstance(obj, RRef):\n # If the current node is the owner of the RRef, unwrap it and try to\n # find Tensors.\n # TODO: Expand to remote RRefs.\n if obj.is_owner():\n return _find_tensors(obj.local_value())\n if isinstance(obj, torch.Tensor):\n return [obj]\n if isinstance(obj, (list, tuple)):\n return itertools.chain(*map(_find_tensors, obj))\n if isinstance(obj, dict):\n return itertools.chain(*map(_find_tensors, obj.values()))\n return []\n\ndef _dump_DDP_relevant_env_vars():\n relevant_env_vars = [\n \"RANK\",\n \"LOCAL_RANK\",\n \"WORLD_SIZE\",\n \"MASTER_PORT\",\n \"MASTER_ADDR\",\n \"CUDA_VISIBLE_DEVICES\",\n \"GLOO_SOCKET_IFNAME\",\n \"GLOO_DEVICE_TRANSPORT\",\n \"NCCL_SOCKET_IFNAME\",\n \"NCCL_BLOCKING_WAIT\",\n \"NCCL_DEBUG\",\n \"NCCL_DEBUG_SUBSYS\",\n \"NCCL_IB_DISABLE\",\n # More NCCL env vars:\n \"NCCL_P2P_DISABLE\",\n \"NCCL_P2P_LEVEL\",\n \"NCCL_SHM_DISABLE\",\n \"NCCL_SOCKET_NTHREADS\",\n \"NCCL_NSOCKS_PERTHREAD\",\n \"NCCL_BUFFSIZE\",\n \"NCCL_NTHREADS\",\n \"NCCL_RINGS\",\n \"NCCL_MAX_NCHANNELS\",\n \"NCCL_MIN_NCHANNELS\",\n \"NCCL_CHECKS_DISABLE\",\n \"NCCL_CHECK_POINTERS\",\n \"NCCL_LAUNCH_MODE\",\n \"NCCL_IB_HCA\",\n \"NCCL_IB_TIMEOUT\",\n \"NCCL_IB_RETRY_CNT\",\n \"NCCL_IB_GID_INDEX\",\n \"NCCL_IB_SL\",\n \"NCCL_IB_TC\",\n \"NCCL_IB_AR_THRESHOLD\",\n \"NCCL_IB_CUDA_SUPPORT\",\n \"NCCL_NET_GDR_LEVEL\",\n \"NCCL_NET_GDR_READ\",\n \"NCCL_SINGLE_RING_THRESHOLD\",\n \"NCCL_LL_THRESHOLD\",\n \"NCCL_TREE_THRESHOLD\",\n \"NCCL_ALGO\",\n \"NCCL_PROTO\",\n \"NCCL_IGNORE_CPU_AFFINITY\",\n \"NCCL_DEBUG_FILE\",\n \"NCCL_COLLNET_ENABLE\",\n \"NCCL_TOPO_FILE\",\n \"NCCL_TOPO_DUMP_FILE\",\n ]\n formatted_output = \"\"\n for var in relevant_env_vars:\n value = os.environ[var] if var in os.environ else \"N/A\"\n formatted_output += \"env:%s=%s\\n\" % (var, value)\n print(formatted_output)\n\n\n\nclass _DDPUnevenInputsConfig(NamedTuple):\n ddp_join_enabled: bool\n ddp_join_divide_by_initial_world_size: bool\n\n\nclass DistributedDataParallel(Module):\n r\"\"\"Implements distributed data parallelism that is based on\n ``torch.distributed`` package at the module level.\n\n This container parallelizes the application of the given module by\n splitting the input across the specified devices by chunking in the batch\n dimension. The module is replicated on each machine and each device, and\n each such replica handles a portion of the input. During the backwards\n pass, gradients from each node are averaged.\n\n The batch size should be larger than the number of GPUs used locally.\n\n See also: :ref:`distributed-basics` and :ref:`cuda-nn-ddp-instead`.\n The same constraints on input as in :class:`torch.nn.DataParallel` apply.\n\n Creation of this class requires that ``torch.distributed`` to be already\n initialized, by calling :func:`torch.distributed.init_process_group`.\n\n ``DistributedDataParallel`` is proven to be significantly faster than\n :class:`torch.nn.DataParallel` for single-node multi-GPU data\n parallel training.\n\n To use ``DistributedDataParallel`` on a host with N GPUs, you should spawn\n up ``N`` processes, ensuring that each process exclusively works on a single\n GPU from 0 to N-1. This can be done by either setting\n ``CUDA_VISIBLE_DEVICES`` for every process or by calling:\n\n >>> torch.cuda.set_device(i)\n\n where i is from 0 to N-1. In each process, you should refer the following\n to construct this module:\n\n >>> torch.distributed.init_process_group(\n >>> backend='nccl', world_size=N, init_method='...'\n >>> )\n >>> model = DistributedDataParallel(model, device_ids=[i], output_device=i)\n\n In order to spawn up multiple processes per node, you can use either\n ``torch.distributed.launch`` or ``torch.multiprocessing.spawn``.\n\n .. note ::\n Please refer to `PyTorch Distributed Overview <https://pytorch.org/tutorials/beginner/dist_overview.html>`__\n for a brief introduction to all features related to distributed training.\n\n .. note:: ``nccl`` backend is currently the fastest and highly recommended\n backend when using GPUs. This applies to both single-node and\n multi-node distributed training.\n\n .. note:: This module also supports mixed-precision distributed training.\n This means that your model can have different types of parameters such\n as mixed types of ``fp16`` and ``fp32``, the gradient reduction on these\n mixed types of parameters will just work fine.\n\n .. note:: If you use ``torch.save`` on one process to checkpoint the module,\n and ``torch.load`` on some other processes to recover it, make sure that\n ``map_location`` is configured properly for every process. Without\n ``map_location``, ``torch.load`` would recover the module to devices\n where the module was saved from.\n\n .. note:: When a model is trained on ``M`` nodes with ``batch=N``, the\n gradient will be ``M`` times smaller when compared to the same model\n trained on a single node with ``batch=M*N`` if the loss is summed (NOT\n averaged as usual) across instances in a batch (because the gradients\n between different nodes are averaged). You should take this into\n consideration when you want to obtain a mathematically equivalent\n training process compared to the local training counterpart. But in most\n cases, you can just treat a DistributedDataParallel wrapped model, a\n DataParallel wrapped model and an ordinary model on a single GPU as the\n same (E.g. using the same learning rate for equivalent batch size).\n\n .. note::\n Parameters are never broadcast between processes. The module performs\n an all-reduce step on gradients and assumes that they will be modified\n by the optimizer in all processes in the same way. Buffers\n (e.g. BatchNorm stats) are broadcast from the module in process of rank\n 0, to all other replicas in the system in every iteration.\n\n .. note::\n If you are using DistributedDataParallel in conjunction with the\n :ref:`distributed-rpc-framework`, you should always use\n :meth:`torch.distributed.autograd.backward` to compute gradients and\n :class:`torch.distributed.optim.DistributedOptimizer` for optimizing\n parameters.\n\n Example::\n\n >>> import torch.distributed.autograd as dist_autograd\n >>> from torch.nn.parallel import DistributedDataParallel as DDP\n >>> from torch import optim\n >>> from torch.distributed.optim import DistributedOptimizer\n >>> from torch.distributed.rpc import RRef\n >>>\n >>> t1 = torch.rand((3, 3), requires_grad=True)\n >>> t2 = torch.rand((3, 3), requires_grad=True)\n >>> rref = rpc.remote(\"worker1\", torch.add, args=(t1, t2))\n >>> ddp_model = DDP(my_model)\n >>>\n >>> # Setup optimizer\n >>> optimizer_params = [rref]\n >>> for param in ddp_model.parameters():\n >>> optimizer_params.append(RRef(param))\n >>>\n >>> dist_optim = DistributedOptimizer(\n >>> optim.SGD,\n >>> optimizer_params,\n >>> lr=0.05,\n >>> )\n >>>\n >>> with dist_autograd.context() as context_id:\n >>> pred = ddp_model(rref.to_here())\n >>> loss = loss_func(pred, loss)\n >>> dist_autograd.backward(context_id, loss)\n >>> dist_optim.step()\n\n .. warning::\n Constructor, forward method, and differentiation of the output (or a\n function of the output of this module) are distributed synchronization\n points. Take that into account in case different processes might be\n executing different code.\n\n .. warning::\n This module assumes all parameters are registered in the model by the\n time it is created. No parameters should be added nor removed later.\n Same applies to buffers.\n\n .. warning::\n This module assumes all parameters are registered in the model of each\n distributed processes are in the same order. The module itself will\n conduct gradient ``allreduce`` following the reverse order of the\n registered parameters of the model. In other words, it is users'\n responsibility to ensure that each distributed process has the exact\n same model and thus the exact same parameter registration order.\n\n .. warning::\n This module allows parameters with non-rowmajor-contiguous strides.\n For example, your model may contain some parameters whose\n :class:`torch.memory_format` is ``torch.contiguous_format``\n and others whose format is ``torch.channels_last``. However,\n corresponding parameters in different processes must have the\n same strides.\n\n .. warning::\n This module doesn't work with :func:`torch.autograd.grad` (i.e. it will\n only work if gradients are to be accumulated in ``.grad`` attributes of\n parameters).\n\n .. warning::\n If you plan on using this module with a ``nccl`` backend or a ``gloo``\n backend (that uses Infiniband), together with a DataLoader that uses\n multiple workers, please change the multiprocessing start method to\n ``forkserver`` (Python 3 only) or ``spawn``. Unfortunately\n Gloo (that uses Infiniband) and NCCL2 are not fork safe, and you will\n likely experience deadlocks if you don't change this setting.\n\n .. warning::\n Forward and backward hooks defined on :attr:`module` and its submodules\n won't be invoked anymore, unless the hooks are initialized in the\n :meth:`forward` method.\n\n .. warning::\n You should never try to change your model's parameters after wrapping\n up your model with ``DistributedDataParallel``. Because, when\n wrapping up your model with ``DistributedDataParallel``, the constructor\n of ``DistributedDataParallel`` will register the additional gradient\n reduction functions on all the parameters of the model itself at the\n time of construction. If you change the model's parameters afterwards,\n gradient redunction functions no longer match the correct set of\n parameters.\n\n .. warning::\n Using ``DistributedDataParallel`` in conjunction with the\n :ref:`distributed-rpc-framework` is experimental and subject to change.\n\n .. warning::\n The ``gradient_as_bucket_view`` mode does not yet work with Automatic\n Mixed Precision (AMP). AMP maintains stashed gradients that are used for\n unscaling gradients. With ``gradient_as_bucket_view=True``, these\n stashed gradients will point to communication buckets in the first\n iteration. In the next iteration, the communication buckets are mutated\n and thus these stashed gradients will be unexpectedly mutated as well,\n which might lead to wrong results.\n\n Args:\n module (Module): module to be parallelized\n device_ids (list of int or torch.device): CUDA devices. This should\n only be provided when the input module resides on a single\n CUDA device. For single-device modules, the i'th\n :attr:`module` replica is placed on ``device_ids[i]``. For\n multi-device modules and CPU modules, ``device_ids`` must be\n ``None`` or an empty list, and input data for the forward\n pass must be placed on the correct device. (default: all\n visible devices for single-device modules)\n output_device (int or torch.device): Device location of output for\n single-device CUDA modules. For multi-device modules and\n CPU modules, it must be ``None``, and the module itself\n dictates the output location. (default: ``device_ids[0]``\n for single-device modules)\n broadcast_buffers (bool): Flag that enables syncing (broadcasting)\n buffers of the module at beginning of the ``forward``\n function. (default: ``True``)\n process_group: The process group to be used for distributed data\n all-reduction. If ``None``, the default process group, which\n is created by :func:`torch.distributed.init_process_group`,\n will be used. (default: ``None``)\n bucket_cap_mb: ``DistributedDataParallel`` will bucket parameters into\n multiple buckets so that gradient reduction of each\n bucket can potentially overlap with backward computation.\n :attr:`bucket_cap_mb` controls the bucket size in\n MegaBytes (MB). (default: 25)\n find_unused_parameters (bool): Traverse the autograd graph from all\n tensors contained in the return value of the\n wrapped module's ``forward`` function. Parameters\n that don't receive gradients as part of this\n graph are preemptively marked as being ready to\n be reduced. Note that all ``forward`` outputs\n that are derived from module parameters must\n participate in calculating loss and later the\n gradient computation. If they don't, this wrapper\n will hang waiting for autograd to produce\n gradients for those parameters. Any outputs\n derived from module parameters that are otherwise\n unused can be detached from the autograd graph\n using ``torch.Tensor.detach``. (default: ``False``)\n check_reduction: This argument is deprecated.\n gradient_as_bucket_view (bool): This is a prototype feature and subject\n to changes. When set to ``True``, gradients will be views\n pointing to different offsets of ``allreduce`` communication\n buckets. This can reduce peak memory usage, where the\n saved memory size will be equal to the total gradients\n size. Moreover, it avoids the overhead of copying between\n gradients and ``allreduce`` communication buckets. When\n gradients are views, ``detach_()`` cannot be called on the\n gradients. If hitting such errors, please fix it by\n referring to the :meth:`~torch.optim.Optimizer.zero_grad`\n function in ``torch/optim/optimizer.py`` as a solution.\n\n\n Attributes:\n module (Module): the module to be parallelized.\n\n Example::\n\n >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')\n >>> net = torch.nn.parallel.DistributedDataParallel(model, pg)\n \"\"\"\n def __init__(self, module, device_ids=None,\n output_device=None, dim=0, broadcast_buffers=True,\n process_group=None,\n bucket_cap_mb=25,\n find_unused_parameters=False,\n check_reduction=False,\n gradient_as_bucket_view=False):\n\n super(DistributedDataParallel, self).__init__()\n\n assert any((p.requires_grad for p in module.parameters())), (\n \"DistributedDataParallel is not needed when a module \"\n \"doesn't have any parameter that requires a gradient.\"\n )\n\n self.is_multi_device_module = len({p.device for p in module.parameters()}) > 1\n distinct_device_types = {p.device.type for p in module.parameters()}\n assert len(distinct_device_types) == 1, (\n \"DistributedDataParallel's input module must be on \"\n \"the same type of devices, but input module parameters locate in {}.\"\n ).format(distinct_device_types)\n self.device_type = list(distinct_device_types)[0]\n\n if self.device_type == \"cpu\" or self.is_multi_device_module:\n assert not device_ids and not output_device, (\n \"DistributedDataParallel device_ids and output_device arguments \"\n \"only work with single-device GPU modules, but got \"\n \"device_ids {}, output_device {}, and module parameters {}.\"\n ).format(device_ids, output_device, {p.device for p in module.parameters()})\n\n self.device_ids = None\n self.output_device = None\n else:\n # Use all devices by default for single-device GPU modules\n if device_ids is None:\n device_ids = _get_all_device_indices()\n\n self.device_ids = [_get_device_index(x, True) for x in device_ids]\n\n if output_device is None:\n output_device = device_ids[0]\n\n self.output_device = _get_device_index(output_device, True)\n\n if process_group is None:\n self.process_group = _get_default_group()\n else:\n self.process_group = process_group\n\n self.dim = dim\n self.module = module\n self.device = list(self.module.parameters())[0].device\n self.broadcast_buffers = broadcast_buffers\n self.find_unused_parameters = find_unused_parameters\n self.require_backward_grad_sync = True\n self.require_forward_param_sync = True\n self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(\n ddp_join_enabled=False, ddp_join_divide_by_initial_world_size=False\n )\n self.gradient_as_bucket_view = gradient_as_bucket_view\n if hasattr(module, '_ddp_params_and_buffers_to_ignore'):\n self.parameters_to_ignore = module._ddp_params_and_buffers_to_ignore\n else:\n self.parameters_to_ignore = []\n\n if check_reduction:\n # This argument is no longer used since the reducer\n # will ensure reduction completes even if some parameters\n # do not receive gradients.\n warnings.warn(\n \"The `check_reduction` argument in `DistributedDataParallel` \"\n \"module is deprecated. Please avoid using it.\"\n )\n pass\n\n # Check that a module does not have Uninitialized parameters\n for param in module.parameters():\n if isinstance(param, torch.nn.parameter.UninitializedParameter):\n raise RuntimeError(\n 'Modules with uninitialized parameters can\\'t be used with `DistributedDataParallel`. '\n 'Run a dummy forward pass to correctly initialize the modules')\n # used for intra-node param sync and inter-node sync as wel\n self.broadcast_bucket_size = int(250 * 1024 * 1024)\n\n # reduction bucket size\n self.bucket_bytes_cap = int(bucket_cap_mb * 1024 * 1024)\n # Whether to perform input tensor CPU to GPU copies on a side-stream\n self.use_side_stream_for_tensor_copies = os.environ.get(\"PYTORCH_DDP_USE_SIDE_STREAM\", \"1\") == \"1\"\n\n # Sync params and buffers\n self._sync_params_and_buffers(authoritative_rank=0)\n\n self._ddp_init_helper()\n\n def _sync_params_and_buffers(self, authoritative_rank=0):\n module_states = []\n for name, param in self.module.state_dict().items():\n if name not in self.parameters_to_ignore:\n module_states.append(param)\n\n if len(module_states) > 0:\n self._distributed_broadcast_coalesced(\n module_states,\n self.broadcast_bucket_size,\n authoritative_rank)\n\n def _ddp_init_helper(self):\n \"\"\"\n Initialization helper function that does the following:\n\n (1) replicating the module from device[0] to the other devices\n (2) bucketing the parameters for reductions\n (3) resetting the bucketing states\n (4) registering the grad hooks\n (5) passing a handle of DDP to SyncBatchNorm Layer\n \"\"\"\n\n def parameters(m, recurse=True):\n def model_parameters(m):\n ps = m._former_parameters.values() \\\n if hasattr(m, \"_former_parameters\") \\\n else m.parameters(recurse=False)\n for p in ps:\n yield p\n\n for m in m.modules() if recurse else [m]:\n for p in model_parameters(m):\n yield p\n\n if self.device_ids and len(self.device_ids) > 1:\n\n warnings.warn(\n \"Single-Process Multi-GPU is not the recommended mode for \"\n \"DDP. In this mode, each DDP instance operates on multiple \"\n \"devices and creates multiple module replicas within one \"\n \"process. The overhead of scatter/gather and GIL contention \"\n \"in every forward pass can slow down training. \"\n \"Please consider using one DDP instance per device or per \"\n \"module replica by explicitly setting device_ids or \"\n \"CUDA_VISIBLE_DEVICES. \"\n )\n\n # only create replicas for single-device CUDA modules\n #\n # TODO: we don't need to replicate params in here. they're always going to\n # be broadcasted using larger blocks in broadcast_coalesced, so it might be\n # better to not pollute the caches with these small blocks\n self._module_copies = replicate(self.module, self.device_ids, detach=True)\n self._module_copies[0] = self.module\n\n for module_copy in self._module_copies[1:]:\n for param, copy_param in zip(self.module.parameters(), parameters(module_copy)):\n # Reducer requires param copies have the same strides across replicas.\n # Fixes up copy_param strides in case replicate didn't match param strides.\n if param.layout is torch.strided and param.stride() != copy_param.stride():\n with torch.no_grad():\n copy_param.set_(copy_param.clone()\n .as_strided(param.size(), param.stride())\n .copy_(copy_param))\n copy_param.requires_grad = param.requires_grad\n\n else:\n self._module_copies = [self.module]\n\n self.modules_params = [list(parameters(m)) for m in self._module_copies]\n # Collect buffers for modules, filtering out buffers that should be ignored.\n named_module_buffers = [\n [(buffer, buffer_name) for buffer_name, buffer in m.named_buffers()]\n for m in self._module_copies\n ]\n self.modules_buffers = [\n [\n buffer\n for (buffer, buffer_name) in module_buffers\n if buffer_name not in self.parameters_to_ignore\n ]\n for module_buffers in named_module_buffers\n ]\n # Build tuple of (module, parameter) for all parameters that require grads.\n if self.device_ids and len(self.device_ids) > 1:\n # Single-process multi-device mode,does not support self.parameters_to_ignore.\n if self.parameters_to_ignore:\n raise ValueError(\n \"Single-Process multi-device mode does not \"\n \"support ignoring parameters upfront. Please consider \"\n \"using one DDP instance per device.\"\n )\n\n modules_and_parameters = [\n [\n (module, parameter)\n for module in replica.modules()\n for parameter in filter(\n lambda parameter: parameter.requires_grad,\n parameters(module, recurse=False))\n ] for replica in self._module_copies]\n else:\n modules_and_parameters = [\n [\n (module, parameter)\n for module_name, module in replica.named_modules()\n for parameter in [\n param\n # Note that we access module.named_parameters instead of\n # parameters(module). parameters(module) is only needed in the\n # single-process multi device case, where it accesses replicated\n # parameters through _former_parameters.\n for param_name, param in module.named_parameters(recurse=False)\n if param.requires_grad\n and f\"{module_name}.{param_name}\" not in self.parameters_to_ignore\n ]\n ]\n for replica in self._module_copies\n ]\n\n # Build list of parameters.\n parameters = [\n list(parameter for _, parameter in replica)\n for replica in modules_and_parameters]\n\n # Checks if a module will produce a sparse gradient.\n def produces_sparse_gradient(module):\n if isinstance(module, torch.nn.Embedding):\n return module.sparse\n if isinstance(module, torch.nn.EmbeddingBag):\n return module.sparse\n return False\n\n # Build list of booleans indicating whether or not to expect sparse\n # gradients for the corresponding parameters.\n expect_sparse_gradient = [\n list(produces_sparse_gradient(module) for module, _ in replica)\n for replica in modules_and_parameters]\n\n # The bucket size limit is specified in the constructor.\n # Additionally, we allow for a single small bucket for parameters\n # that are defined first, such that their gradients don't spill into\n # a much larger bucket, adding unnecessary latency after gradient\n # computation finishes. Experiments showed 1MB is a reasonable value.\n bucket_indices = dist._compute_bucket_assignment_by_size(\n parameters[0],\n [dist._DEFAULT_FIRST_BUCKET_BYTES, self.bucket_bytes_cap],\n expect_sparse_gradient[0])\n\n # Note: reverse list of buckets because we want to approximate the\n # order in which their gradients are produced, and assume they\n # are used in the forward pass in the order they are defined.\n self.reducer = dist.Reducer(\n parameters,\n list(reversed(bucket_indices)),\n self.process_group,\n expect_sparse_gradient,\n self.bucket_bytes_cap,\n self.find_unused_parameters,\n self.gradient_as_bucket_view)\n\n # Set logging data that can be got during construction time.\n dist._set_construction_logging_data(\n self.reducer,\n self.module.__class__.__name__,\n [] if self.device_ids is None else self.device_ids,\n -1 if self.output_device is None else self.output_device,\n self.broadcast_buffers)\n\n # passing a handle to torch.nn.SyncBatchNorm layer\n self._passing_sync_batchnorm_handle(self._module_copies)\n\n def __getstate__(self):\n self._check_default_group()\n attrs = copy.copy(self.__dict__)\n del attrs['process_group']\n del attrs['reducer']\n return attrs\n\n def __setstate__(self, state):\n # If serializable, then the process group should be the default one\n self.process_group = _get_default_group()\n super(DistributedDataParallel, self).__setstate__(state)\n self.__dict__.setdefault('require_forward_param_sync', True)\n self.__dict__.setdefault('require_backward_grad_sync', True)\n self._ddp_init_helper()\n\n def _check_default_group(self):\n pickle_not_supported = False\n try:\n if self.process_group != _get_default_group():\n pickle_not_supported = True\n except RuntimeError:\n pickle_not_supported = True\n\n if pickle_not_supported:\n raise RuntimeError(\"DDP Pickling/Unpickling are only supported \"\n \"when using DDP with the default process \"\n \"group. That is, when you have called \"\n \"init_process_group and have not passed \"\n \"process_group argument to DDP constructor\")\n\n @contextmanager\n def no_sync(self):\n r\"\"\"\n A context manager to disable gradient synchronizations across DDP\n processes. Within this context, gradients will be accumulated on module\n variables, which will later be synchronized in the first\n forward-backward pass exiting the context.\n\n Example::\n\n >>> ddp = torch.nn.parallel.DistributedDataParallel(model, pg)\n >>> with ddp.no_sync():\n >>> for input in inputs:\n >>> ddp(input).backward() # no synchronization, accumulate grads\n >>> ddp(another_input).backward() # synchronize grads\n \"\"\"\n old_require_backward_grad_sync = self.require_backward_grad_sync\n self.require_backward_grad_sync = False\n try:\n yield\n finally:\n self.require_backward_grad_sync = old_require_backward_grad_sync\n\n def forward(self, *inputs, **kwargs):\n if self.ddp_uneven_inputs_config.ddp_join_enabled:\n ones = torch.ones(\n 1, device=self.device\n )\n work = dist.all_reduce(ones, group=self.process_group, async_op=True)\n self.reducer._set_forward_pass_work_handle(\n work, self.ddp_uneven_inputs_config.ddp_join_divide_by_initial_world_size\n )\n\n # Calling _rebuild_buckets before forward compuation,\n # It may allocate new buckets before deallocating old buckets\n # inside _rebuild_buckets. To save peak memory usage,\n # call _rebuild_buckets before the peak memory usage increases\n # during forward computation.\n # This should be called only once during whole training period.\n if self.reducer._rebuild_buckets():\n logging.info(\"Reducer buckets have been rebuilt in this iteration.\")\n\n if self.require_forward_param_sync:\n self._sync_params()\n\n if self.ddp_uneven_inputs_config.ddp_join_enabled:\n # Notify joined ranks whether they should sync in backwards pass or not.\n self._check_global_requires_backward_grad_sync(is_joined_rank=False)\n\n if self.device_ids:\n if len(self.device_ids) == 1:\n inputs, kwargs = self.to_kwargs(inputs, kwargs, self.device_ids[0])\n output = self.module(*inputs[0], **kwargs[0])\n else:\n inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)\n outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)\n output = self.gather(outputs, self.output_device)\n else:\n output = self.module(*inputs, **kwargs)\n\n if torch.is_grad_enabled() and self.require_backward_grad_sync:\n self.require_forward_param_sync = True\n # We'll return the output object verbatim since it is a freeform\n # object. We need to find any tensors in this object, though,\n # because we need to figure out which parameters were used during\n # this forward pass, to ensure we short circuit reduction for any\n # unused parameters. Only if `find_unused_parameters` is set.\n if self.find_unused_parameters:\n self.reducer.prepare_for_backward(list(_find_tensors(output)))\n else:\n self.reducer.prepare_for_backward([])\n else:\n self.require_forward_param_sync = False\n\n return output\n\n def scatter(self, inputs, kwargs, device_ids):\n return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)\n\n def _recursive_to(self, inputs, target_gpu):\n r\"\"\"\n Recursively moves input to the target_gpu.\n \"\"\"\n def to_map(obj):\n if isinstance(obj, torch.Tensor):\n if not self.use_side_stream_for_tensor_copies:\n return (obj.to(target_gpu), )\n else:\n # Perform CPU -> GPU copies in a background stream. This code is\n # motivated from similar logic in torch/nn/parallel/_functions.py\n stream = _get_stream(target_gpu)\n with torch.cuda.stream(stream):\n output = obj.to(target_gpu)\n # synchronize with the copy stream\n with torch.cuda.device(target_gpu):\n current_stream = torch.cuda.current_stream()\n # Sync the current stream with the copy stream\n current_stream.wait_stream(stream)\n # Ensure tensor memory is not reused until work on\n # main stream is complete\n output.record_stream(current_stream)\n return (output, )\n if is_namedtuple(obj):\n return [type(obj)(*args) for args in zip(*map(to_map, obj))]\n if isinstance(obj, tuple) and len(obj) > 0:\n return list(zip(*map(to_map, obj)))\n if isinstance(obj, list) and len(obj) > 0:\n return [list(i) for i in zip(*map(to_map, obj))]\n if isinstance(obj, dict) and len(obj) > 0:\n return [type(obj)(i) for i in zip(*map(to_map, obj.items()))]\n return [obj]\n\n # Avoid reference cycle\n try:\n res = to_map(inputs)\n finally:\n to_map = None\n return res\n\n def to_kwargs(self, inputs, kwargs, device_id):\n inputs = self._recursive_to(inputs, device_id) if inputs else []\n kwargs = self._recursive_to(kwargs, device_id) if kwargs else []\n if len(inputs) < len(kwargs):\n inputs.extend([() for _ in range(len(kwargs) - len(inputs))])\n elif len(kwargs) < len(inputs):\n kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])\n inputs = tuple(inputs)\n kwargs = tuple(kwargs)\n return inputs, kwargs\n\n def parallel_apply(self, replicas, inputs, kwargs):\n return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])\n\n def gather(self, outputs, output_device):\n return gather(outputs, output_device, dim=self.dim)\n\n def train(self, mode=True):\n super(DistributedDataParallel, self).train(mode)\n for module in self._module_copies[1:]:\n module.train(mode)\n return self\n\n def get_ddp_logging_data(self):\n return dist._get_ddp_logging_data(self.reducer)\n\n # When running in join mode, schedules an allreduce to match the one in the\n # forward pass to determine the no. of currently active processes and whether\n # all processes have joined.\n def _schedule_shadow_all_reduce_for_fwd_pass(self):\n all_active_procs = torch.zeros(\n 1, device=self.device\n )\n dist.all_reduce(all_active_procs, group=self.process_group)\n return all_active_procs.item()\n\n # When running in join mode, schedules an allreduce to notify joined ranks\n # of whether backwards pass synchronization will run this iteraton or not.\n def _check_global_requires_backward_grad_sync(self, is_joined_rank):\n if not is_joined_rank and self.require_backward_grad_sync:\n requires_sync_tensor = torch.ones(1, device=self.device)\n else:\n requires_sync_tensor = torch.zeros(1, device=self.device)\n\n work = dist.all_reduce(\n requires_sync_tensor, group=self.process_group, async_op=True\n )\n return work, requires_sync_tensor\n\n # When running in join mode, checks and performs sync of module buffers if\n # the models have buffers that should be synchronized in the forward pass.\n def _check_and_sync_module_buffers(self):\n if self.will_sync_module_buffers():\n my_rank = dist.get_rank(self.process_group)\n authoritative_rank = self._find_common_rank(my_rank, False)\n self._distributed_broadcast_coalesced(\n self.modules_buffers[0], self.broadcast_bucket_size, authoritative_rank\n )\n\n # When running in join model, agrees upon a common rank and broadcast model\n # parameters to all other ranks.\n def _sync_final_model(self, is_last_joiner):\n # Agree upon the process that will be the authoritative model copy.\n # The current rank is a candidate for being the authoritative copy if\n # is_last_joiner=True. We break ties via picking the larger rank.\n my_rank = dist.get_rank(self.process_group)\n self._authoritative_rank = self._find_common_rank(my_rank, is_last_joiner)\n self._sync_params_and_buffers(authoritative_rank=self._authoritative_rank)\n\n # Schedule allreduce ops to match those scheduled in the reducer's backward\n # pass.\n def _match_all_reduce_for_bwd_pass(self):\n allreduce_work = []\n # Schedule allreduce in the same order as Reducer schedules them, i.e.\n # the order of the buckets. Retrieving the bucket order from the reducer\n # ensures that we keep the same order in join mode, such as when bucket\n # order is rebuilt dynamically.\n all_bucket_tensors = self.reducer.get_bucket_tensors()\n for bucket_tensors in all_bucket_tensors:\n # Joined processes contribute zero gradient. In the case that\n # divide_by_initial_world_size=True, we divide grads by the static\n # world size, if not, the dividing factor is reduced by the number\n # of joined processes.\n zero_tensors = [\n torch.zeros_like(t) for t in bucket_tensors\n ]\n work = self.process_group.allreduce(zero_tensors)\n allreduce_work.append(work)\n for work in allreduce_work:\n work.wait()\n\n # Allreduces the used parameter mapping across ranks.\n def _match_unused_params_allreduce(self):\n locally_used_param_maps = self.reducer._get_local_used_maps()\n self.process_group.allreduce(locally_used_param_maps)\n\n @contextmanager\n def join(self, divide_by_initial_world_size=True, enable=True):\n r\"\"\"\n A context manager to be used in conjunction with an instance of\n :class:`torch.nn.parallel.DistributedDataParallel` to be\n able to train with uneven inputs across participating processes.\n\n This context manager will keep track of already-joined DDP processes,\n and \"shadow\" the forward and backward passes by inserting collective\n communication operations to match with the ones created by non-joined\n DDP processes. This will ensure each collective call has a corresponding\n call by already-joined DDP processes, preventing hangs or errors that\n would otherwise happen when training with uneven inputs across\n processes.\n\n Once all DDP processes have joined, the context manager will broadcast\n the model corresponding to the last joined process to all processes to\n ensure the model is the same across all processes\n (which is guaranteed by DDP).\n\n To use this to enable training with uneven inputs across processes,\n simply wrap this context manager around your training loop. No further\n modifications to the model or data loading is required.\n\n .. warning::\n This module works only with the multi-process, single-device usage\n of :class:`torch.nn.parallel.DistributedDataParallel`,\n which means that a single process works on a single GPU.\n\n .. warning::\n This module currently does not support custom distributed collective\n operations in the forward pass, such as ``SyncBatchNorm`` or other\n custom defined collectives in the model's forward pass.\n\n Args:\n divide_by_initial_world_size (bool): If ``True``, will divide\n gradients by the initial ``world_size`` DDP training was launched\n with. If ``False``, will compute the effective world size\n (number of ranks that have not depleted their inputs yet) and\n divide gradients by that during allreduce. Set\n ``divide_by_initial_world_size=True`` to ensure every input\n sample including the uneven inputs have equal weight in terms of\n how much they contribute to the global gradient. This is\n achieved by always dividing the gradient by the initial\n ``world_size`` even when we encounter uneven inputs. If you set\n this to ``False``, we divide the gradient by the remaining\n number of nodes. This ensures parity with training on a smaller\n ``world_size`` although it also means the uneven inputs would\n contribute more towards the global gradient. Typically, you\n would want to set this to ``True`` for cases where the last few\n inputs of your training job are uneven. In extreme cases, where\n there is a large discrepancy in the number of inputs, setting\n this to ``False`` might provide better results.\n enable (bool): Whether to enable uneven input detection or not. Pass\n in ``enable=False`` to disable in cases where you know that\n inputs are even across participating processes. Default is\n ``True``.\n\n\n Example::\n\n >>> import torch\n >>> import torch.distributed as dist\n >>> import os\n >>> import torch.multiprocessing as mp\n >>> import torch.nn as nn\n >>> # On each spawned worker\n >>> def worker(rank):\n >>> dist.init_process_group(\"nccl\", rank=rank, world_size=2)\n >>> torch.cuda.set_device(rank)\n >>> model = nn.Linear(1, 1, bias=False).to(rank)\n >>> model = torch.nn.parallel.DistributedDataParallel(\n >>> model, device_ids=[rank], output_device=rank\n >>> )\n >>> # Rank 1 gets one more input than rank 0.\n >>> inputs = [torch.tensor([1]).float() for _ in range(10 + rank)]\n >>> with model.join():\n >>> for _ in range(5):\n >>> for inp in inputs:\n >>> loss = model(inp).sum()\n >>> loss.backward()\n >>> # Without the join() API, the below synchronization will hang\n >>> # blocking for rank 1's allreduce to complete.\n >>> torch.cuda.synchronize(device=rank)\n \"\"\"\n try:\n if self.device_ids and len(self.device_ids) > 1:\n raise ValueError(\n \"\"\"DDP join() API does not support Single-Process Multi-GPU\n mode training. The recommended approach for DDP training is\n to spawn a single process that works on a single GPU.\"\"\"\n )\n has_error = False\n self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(\n ddp_join_enabled=enable,\n ddp_join_divide_by_initial_world_size=divide_by_initial_world_size,\n )\n yield\n except Exception as e:\n # Set to skip any processing in the finally block.\n has_error = True\n raise e\n finally:\n # Skip any processing to let the exception immediately be raised if\n # there was one.\n if enable and not has_error:\n all_procs_joined = False\n is_last_joiner = True\n i = 0\n WARN_THRESHOLD = 1000\n warnings.simplefilter(\"once\")\n while not all_procs_joined:\n if i > WARN_THRESHOLD:\n my_rank = dist.get_rank(self.process_group)\n warnings.warn(\n \"Detected uneven input skew of greater \"\n f\"than {WARN_THRESHOLD}. This means that rank {my_rank} \"\n f\"has at least {WARN_THRESHOLD} fewer inputs than \"\n \"other currently active ranks. This level of skew could \"\n \"lead to performance degradation during training.\"\n )\n # Schedules allreduce to match fwd pass allreduce in non-joined procs\n num_active_procs = self._schedule_shadow_all_reduce_for_fwd_pass()\n if num_active_procs == 0:\n all_procs_joined = True\n else:\n # Some DDP process still needs to be joined.\n if is_last_joiner:\n is_last_joiner = False\n # It will rebuild buckets only once during training period\n self.reducer._rebuild_buckets()\n # Schedule a corresponding broadcast if we are syncing module\n # buffers in the forward pass.\n self._check_and_sync_module_buffers()\n\n (\n work,\n should_sync_backwards_tensor,\n ) = self._check_global_requires_backward_grad_sync(\n is_joined_rank=True\n )\n work.wait()\n # If nonzero, then we should sync in the bwd pass.\n should_sync_backwards = should_sync_backwards_tensor.item() != 0\n # Forward param sync is disabled in the next iteration\n # if we are skipping grad sync this iteration. Hence, we\n # set require_forward_param_sync appropriately here.\n self.require_forward_param_sync = should_sync_backwards\n if not should_sync_backwards:\n continue\n # Schedules one allreduce per gradient bucket to match\n # the backwards pass allreduce.\n self._match_all_reduce_for_bwd_pass()\n # Check if we need to allreduce locally unused params.\n if self.find_unused_parameters:\n self._match_unused_params_allreduce()\n # It will push rebuilt params only once during training period\n self.reducer._push_all_rebuilt_params()\n i += 1\n\n # All procs joined. Agree on authoritative rank and broadcast the model.\n self._sync_final_model(is_last_joiner)\n\n def register_comm_hook(self, state: object, hook: callable):\n r\"\"\"\n Registers a communication hook which is an enhancement that provides a\n flexible hook to users where they can specify how DDP aggregates gradients\n across multiple workers.\n\n This hook would be very useful for researchers to try out new ideas. For\n example, this hook can be used to implement several algorithms like GossipGrad\n and gradient compression which involve different communication strategies for\n parameter syncs while running Distributed DataParallel training.\n\n Args:\n state (object): Passed to the hook to maintain any state information during the training process.\n Examples include error feedback in gradient compression,\n peers to communicate with next in GossipGrad, etc.\n\n It is locally stored by each worker\n and shared by all the gradient tensors on the worker.\n hook (callable): Averages gradient tensors across workers and defined as:\n ``hook(state: object, bucket: dist._GradBucket) -> torch.futures.Future``:\n\n This function is called once the bucket is ready. The\n hook can perform whatever processing is needed and return\n a Future indicating completion of any async work (ex: allreduce).\n If the hook doesn't perform any communication, it can also\n just return a completed Future. The Future should hold the\n new value of grad bucket's tensors. Once a bucket is ready,\n c10d reducer would call this hook and use the tensors returned\n by the Future and copy grads to individual parameters.\n\n We also provide an API called ``get_future`` to retrieve a\n Future associated with the completion of ``c10d.ProcessGroup.work``.\n\n .. warning ::\n Grad bucket's tensors will not be predivided by world_size. User is responsible\n to divide by the world_size in case of operations like allreduce.\n\n .. warning ::\n DDP communication hook can only be registered once and should be registered\n before calling backward.\n\n .. warning ::\n The Future object that hook returns should contain a result that has the same\n shape with the tensors inside grad bucket.\n\n .. warning ::\n DDP communication hook does not support single-process multiple-device mode.\n Gradbucket tensors should consist of only a single tensor.\n\n .. warning ::\n ``get_future`` API supports only NCCL backend and will return a ``torch._C.Future``\n which is an internal type and should be used with caution. It can still be used by\n ``register_comm_hook`` API, but it is subject to some subtle differences compared\n to ``torch.futures.Future``.\n\n .. warning ::\n DDP communication hook is experimental and subject to change.\n\n Example::\n Below is an example of a noop hook that returns the same tensors.\n\n >>> def noop(state: object, bucket: dist._GradBucket): -> torch.futures.Future\n >>> fut = torch.futures.Future()\n >>> fut.set_result(bucket.get_tensors())\n >>> return fut\n\n >>> ddp.register_comm_hook(state = None, hook = noop)\n\n Example::\n Below is an example of a Parallel SGD algorithm where gradients are encoded before\n allreduce, and then decoded after allreduce.\n\n >>> def encode_and_decode(state: object, bucket: dist._GradBucket): -> torch.futures.Future\n >>> tensors = [t / process_group.world_size for t in bucket.get_tensors()]\n >>> encoded_tensors = encode(tensors) # encode gradients\n >>> fut = process_group.allreduce(encoded_tensors).get_future()\n >>> # Define the then callback to decode.\n >>> def decode(fut):\n >>> decoded_tensors = decode(fut.value()) # decode gradients\n >>> return decoded_tensors\n >>> return fut.then(decode)\n\n >>> ddp.register_comm_hook(state = None, hook = encode_and_decode)\n \"\"\"\n self._check_comm_hook(hook)\n dist._register_comm_hook(self.reducer, state, hook)\n\n def _register_builtin_comm_hook(\n self, comm_hook_type\n ):\n r\"\"\"\n Registers a built-in communication hook that specifies how DDP\n aggregates gradients across multiple workers.\n The built-in hooks aim to provide efficient C++ implementations for certain hooks,\n which might not be as efficient if implemented in Python using a Python communication hook.\n\n Args:\n comm_hook_type (dist.BuiltinCommHookType): type of communication hook, such as\n ALLREDUCE, FP16_COMPRESS, etc.\n\n .. warning ::\n DDP communication hook can only be registered once and should be registered\n before calling backward.\n\n .. warning ::\n DDP communication hook does not support single-process multiple-device mode.\n Gradbucket tensors should consist of only a single tensor.\n\n .. warning ::\n DDP communication hook is experimental and subject to change.\n\n Example::\n Below is an example of a FP16 compression where gradients are\n compressed into 16-bit floating-point numbers before allreduce, and\n then decompressed after allreduce.\n\n >>> ddp._register_builtin_comm_hook(dist.BuiltinCommHookType.FP16_COMPRESS)\n\n \"\"\"\n dist._register_builtin_comm_hook(self.reducer, comm_hook_type)\n\n def _distributed_broadcast_coalesced(\n self, tensors, buffer_size, authoritative_rank=0\n ):\n dist._broadcast_coalesced(\n self.process_group, tensors, buffer_size, authoritative_rank\n )\n\n def will_sync_module_buffers(self):\n return (\n self.require_forward_param_sync\n and self.broadcast_buffers\n and len(self.modules_buffers[0]) > 0\n )\n\n def _find_common_rank(self, input_rank, rank_cond):\n # -1 indicates that this rank is not under consideration to be the\n # common_rank\n rank_to_use = torch.tensor(\n [input_rank if rank_cond else -1],\n device=self.device,\n )\n dist.all_reduce(rank_to_use, op=ReduceOp.MAX, group=self.process_group)\n if rank_to_use.item() == -1:\n raise ValueError(\n \"BUG! Expected rank_cond to be true for at least one process.\"\n )\n return rank_to_use.item()\n\n def _sync_params(self):\n with torch.no_grad():\n # only do intra-node parameters sync for replicated single-device\n # CUDA modules\n if self.device_ids and len(self.device_ids) > 1:\n # intra-node parameter sync\n result = comm.broadcast_coalesced(\n self.modules_params[0],\n self.device_ids,\n self.broadcast_bucket_size)\n for tensors, module_params in zip(result[1:],\n self.modules_params[1:]):\n for tensor, param in zip(tensors, module_params):\n # Formerly, this spot used param.set_(tensor) to steal tensor's\n # data without a deep copy. Unfortunately, that wiped out the\n # allreduce hook attached to param's AccumulateGrad function,\n # likely causing https://github.com/pytorch/pytorch/issues/37079.\n # TODO: If set_ becomes safe to use here, use set_.\n # Otherwise, find another way to steal tensor's data.\n param.copy_(tensor)\n # Assume we have just run the optimizer and zeroed the\n # grads of the parameters on the root model. We need\n # to zero the grads on all model replicas as well.\n # This snippet is copied from torch.optim.Optimizer.\n if param.grad is not None:\n if param.grad.grad_fn is not None:\n param.grad.detach_()\n else:\n param.grad.requires_grad_(False)\n param.grad.zero_()\n\n # module buffer sync\n if self.will_sync_module_buffers():\n # Synchronize buffers across processes.\n # If we are running DDP with the join manager, we have to agree\n # upon a rank to sync module buffers from, since rank 0 may\n # already have been joined and have stale module buffers.\n if self.ddp_uneven_inputs_config.ddp_join_enabled:\n authoritative_rank = self._find_common_rank(dist.get_rank(), True)\n else:\n # The process with rank 0 is considered the authoritative copy.\n authoritative_rank = 0\n self._distributed_broadcast_coalesced(\n self.modules_buffers[0],\n self.broadcast_bucket_size,\n authoritative_rank,\n )\n # only do intra-node buffer sync for replicated single-device\n # CUDA modules\n if self.device_ids and len(self.device_ids) > 1:\n # intra-node buffer sync\n result = comm.broadcast_coalesced(\n self.modules_buffers[0],\n self.device_ids,\n self.broadcast_bucket_size)\n for tensors, module_buffers in zip(result[1:],\n self.modules_buffers[1:]):\n for tensor, buffer in zip(tensors, module_buffers):\n buffer.set_(tensor)\n\n def _passing_sync_batchnorm_handle(self, module_copies):\n for dev_idx, module in enumerate(module_copies):\n for layer in module.modules():\n if isinstance(layer, torch.nn.modules.SyncBatchNorm):\n assert self.device_type != 'cpu', \"SyncBatchNorm layers only work with GPU modules\"\n layer._specify_ddp_gpu_num(\n len(self.device_ids) if self.device_ids else 1)\n\n def _check_comm_hook(self, hook):\n if not callable(hook):\n raise TypeError(\"Communication hook must be callable.\")\n\n sig = inspect.signature(hook)\n if (\n sig.parameters[\"bucket\"].annotation != inspect._empty\n and sig.parameters[\"bucket\"].annotation != dist._GradBucket\n ):\n raise ValueError(\n \"Communication hook: bucket annotation should be dist._GradBucket.\"\n )\n\n if sig.return_annotation != inspect._empty and (\n sig.return_annotation != torch.futures.Future\n and sig.return_annotation != torch._C.Future\n ):\n raise ValueError(\n \"Communication hook: return annotation should be torch.futures.Future or torch._C.Future.\"\n )\n\n @staticmethod\n def _set_params_and_buffers_to_ignore_for_model(\n module, params_and_buffers_to_ignore\n ):\n # This is a workaround to set parameters and buffers DDP should ignore\n # during synchronization. It will be removed when the API is finalized\n # as part of addressing https://github.com/pytorch/pytorch/issues/43690.\n module._ddp_params_and_buffers_to_ignore = params_and_buffers_to_ignore\n", "import math\nimport warnings\n\nfrom torch import Tensor\nimport torch\n\n\n# These no_grad_* functions are necessary as wrappers around the parts of these\n# functions that use `with torch.no_grad()`. The JIT doesn't support context\n# managers, so these need to be implemented as builtins. Using these wrappers\n# lets us keep those builtins small and re-usable.\ndef _no_grad_uniform_(tensor, a, b):\n with torch.no_grad():\n return tensor.uniform_(a, b)\n\n\ndef _no_grad_normal_(tensor, mean, std):\n with torch.no_grad():\n return tensor.normal_(mean, std)\n\n\ndef _no_grad_trunc_normal_(tensor, mean, std, a, b):\n # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n def norm_cdf(x):\n # Computes standard normal cumulative distribution function\n return (1. + math.erf(x / math.sqrt(2.))) / 2.\n\n if (mean < a - 2 * std) or (mean > b + 2 * std):\n warnings.warn(\"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. \"\n \"The distribution of values may be incorrect.\",\n stacklevel=2)\n\n with torch.no_grad():\n # Values are generated by using a truncated uniform distribution and\n # then using the inverse CDF for the normal distribution.\n # Get upper and lower cdf values\n l = norm_cdf((a - mean) / std)\n u = norm_cdf((b - mean) / std)\n\n # Uniformly fill tensor with values from [l, u], then translate to\n # [2l-1, 2u-1].\n tensor.uniform_(2 * l - 1, 2 * u - 1)\n\n # Use inverse cdf transform for normal distribution to get truncated\n # standard normal\n tensor.erfinv_()\n\n # Transform to proper mean, std\n tensor.mul_(std * math.sqrt(2.))\n tensor.add_(mean)\n\n # Clamp to ensure it's in the proper range\n tensor.clamp_(min=a, max=b)\n return tensor\n\n\ndef _no_grad_fill_(tensor, val):\n with torch.no_grad():\n return tensor.fill_(val)\n\n\ndef _no_grad_zero_(tensor):\n with torch.no_grad():\n return tensor.zero_()\n\n\ndef calculate_gain(nonlinearity, param=None):\n r\"\"\"Return the recommended gain value for the given nonlinearity function.\n The values are as follows:\n\n ================= ====================================================\n nonlinearity gain\n ================= ====================================================\n Linear / Identity :math:`1`\n Conv{1,2,3}D :math:`1`\n Sigmoid :math:`1`\n Tanh :math:`\\frac{5}{3}`\n ReLU :math:`\\sqrt{2}`\n Leaky Relu :math:`\\sqrt{\\frac{2}{1 + \\text{negative\\_slope}^2}}`\n SELU :math:`\\frac{3}{4}`\n ================= ====================================================\n\n Args:\n nonlinearity: the non-linear function (`nn.functional` name)\n param: optional parameter for the non-linear function\n\n Examples:\n >>> gain = nn.init.calculate_gain('leaky_relu', 0.2) # leaky_relu with negative_slope=0.2\n \"\"\"\n linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']\n if nonlinearity in linear_fns or nonlinearity == 'sigmoid':\n return 1\n elif nonlinearity == 'tanh':\n return 5.0 / 3\n elif nonlinearity == 'relu':\n return math.sqrt(2.0)\n elif nonlinearity == 'leaky_relu':\n if param is None:\n negative_slope = 0.01\n elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):\n # True/False are instances of int, hence check above\n negative_slope = param\n else:\n raise ValueError(\"negative_slope {} not a valid number\".format(param))\n return math.sqrt(2.0 / (1 + negative_slope ** 2))\n elif nonlinearity == 'selu':\n return 3.0 / 4 # Value found empirically (https://github.com/pytorch/pytorch/pull/50664)\n else:\n raise ValueError(\"Unsupported nonlinearity {}\".format(nonlinearity))\n\n\ndef uniform_(tensor: Tensor, a: float = 0., b: float = 1.) -> Tensor:\n r\"\"\"Fills the input Tensor with values drawn from the uniform\n distribution :math:`\\mathcal{U}(a, b)`.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n a: the lower bound of the uniform distribution\n b: the upper bound of the uniform distribution\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.uniform_(w)\n \"\"\"\n return _no_grad_uniform_(tensor, a, b)\n\n\ndef normal_(tensor: Tensor, mean: float = 0., std: float = 1.) -> Tensor:\n r\"\"\"Fills the input Tensor with values drawn from the normal\n distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n mean: the mean of the normal distribution\n std: the standard deviation of the normal distribution\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.normal_(w)\n \"\"\"\n return _no_grad_normal_(tensor, mean, std)\n\ndef trunc_normal_(tensor: Tensor, mean: float = 0., std: float = 1., a: float = -2., b: float = 2.) -> Tensor:\n r\"\"\"Fills the input Tensor with values drawn from a truncated\n normal distribution. The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n with values outside :math:`[a, b]` redrawn until they are within\n the bounds. The method used for generating the random values works\n best when :math:`a \\leq \\text{mean} \\leq b`.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n mean: the mean of the normal distribution\n std: the standard deviation of the normal distribution\n a: the minimum cutoff value\n b: the maximum cutoff value\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.trunc_normal_(w)\n \"\"\"\n return _no_grad_trunc_normal_(tensor, mean, std, a, b)\n\n\ndef constant_(tensor: Tensor, val: float) -> Tensor:\n r\"\"\"Fills the input Tensor with the value :math:`\\text{val}`.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n val: the value to fill the tensor with\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.constant_(w, 0.3)\n \"\"\"\n return _no_grad_fill_(tensor, val)\n\n\ndef ones_(tensor: Tensor) -> Tensor:\n r\"\"\"Fills the input Tensor with the scalar value `1`.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.ones_(w)\n \"\"\"\n return _no_grad_fill_(tensor, 1.)\n\n\ndef zeros_(tensor: Tensor) -> Tensor:\n r\"\"\"Fills the input Tensor with the scalar value `0`.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.zeros_(w)\n \"\"\"\n return _no_grad_zero_(tensor)\n\n\ndef eye_(tensor):\n r\"\"\"Fills the 2-dimensional input `Tensor` with the identity\n matrix. Preserves the identity of the inputs in `Linear` layers, where as\n many inputs are preserved as possible.\n\n Args:\n tensor: a 2-dimensional `torch.Tensor`\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.eye_(w)\n \"\"\"\n if tensor.ndimension() != 2:\n raise ValueError(\"Only tensors with 2 dimensions are supported\")\n\n with torch.no_grad():\n torch.eye(*tensor.shape, out=tensor, requires_grad=tensor.requires_grad)\n return tensor\n\n\ndef dirac_(tensor, groups=1):\n r\"\"\"Fills the {3, 4, 5}-dimensional input `Tensor` with the Dirac\n delta function. Preserves the identity of the inputs in `Convolutional`\n layers, where as many input channels are preserved as possible. In case\n of groups>1, each group of channels preserves identity\n\n Args:\n tensor: a {3, 4, 5}-dimensional `torch.Tensor`\n groups (optional): number of groups in the conv layer (default: 1)\n Examples:\n >>> w = torch.empty(3, 16, 5, 5)\n >>> nn.init.dirac_(w)\n >>> w = torch.empty(3, 24, 5, 5)\n >>> nn.init.dirac_(w, 3)\n \"\"\"\n dimensions = tensor.ndimension()\n if dimensions not in [3, 4, 5]:\n raise ValueError(\"Only tensors with 3, 4, or 5 dimensions are supported\")\n\n sizes = tensor.size()\n\n if sizes[0] % groups != 0:\n raise ValueError('dim 0 must be divisible by groups')\n\n out_chans_per_grp = sizes[0] // groups\n min_dim = min(out_chans_per_grp, sizes[1])\n\n with torch.no_grad():\n tensor.zero_()\n\n for g in range(groups):\n for d in range(min_dim):\n if dimensions == 3: # Temporal convolution\n tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2] = 1\n elif dimensions == 4: # Spatial convolution\n tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2,\n tensor.size(3) // 2] = 1\n else: # Volumetric convolution\n tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2,\n tensor.size(3) // 2, tensor.size(4) // 2] = 1\n return tensor\n\n\ndef _calculate_fan_in_and_fan_out(tensor):\n dimensions = tensor.dim()\n if dimensions < 2:\n raise ValueError(\"Fan in and fan out can not be computed for tensor with fewer than 2 dimensions\")\n\n num_input_fmaps = tensor.size(1)\n num_output_fmaps = tensor.size(0)\n receptive_field_size = 1\n if tensor.dim() > 2:\n receptive_field_size = tensor[0][0].numel()\n fan_in = num_input_fmaps * receptive_field_size\n fan_out = num_output_fmaps * receptive_field_size\n\n return fan_in, fan_out\n\n\ndef xavier_uniform_(tensor: Tensor, gain: float = 1.) -> Tensor:\n r\"\"\"Fills the input `Tensor` with values according to the method\n described in `Understanding the difficulty of training deep feedforward\n neural networks` - Glorot, X. & Bengio, Y. (2010), using a uniform\n distribution. The resulting tensor will have values sampled from\n :math:`\\mathcal{U}(-a, a)` where\n\n .. math::\n a = \\text{gain} \\times \\sqrt{\\frac{6}{\\text{fan\\_in} + \\text{fan\\_out}}}\n\n Also known as Glorot initialization.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n gain: an optional scaling factor\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu'))\n \"\"\"\n fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)\n std = gain * math.sqrt(2.0 / float(fan_in + fan_out))\n a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation\n\n return _no_grad_uniform_(tensor, -a, a)\n\n\ndef xavier_normal_(tensor: Tensor, gain: float = 1.) -> Tensor:\n r\"\"\"Fills the input `Tensor` with values according to the method\n described in `Understanding the difficulty of training deep feedforward\n neural networks` - Glorot, X. & Bengio, Y. (2010), using a normal\n distribution. The resulting tensor will have values sampled from\n :math:`\\mathcal{N}(0, \\text{std}^2)` where\n\n .. math::\n \\text{std} = \\text{gain} \\times \\sqrt{\\frac{2}{\\text{fan\\_in} + \\text{fan\\_out}}}\n\n Also known as Glorot initialization.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n gain: an optional scaling factor\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.xavier_normal_(w)\n \"\"\"\n fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)\n std = gain * math.sqrt(2.0 / float(fan_in + fan_out))\n\n return _no_grad_normal_(tensor, 0., std)\n\n\ndef _calculate_correct_fan(tensor, mode):\n mode = mode.lower()\n valid_modes = ['fan_in', 'fan_out']\n if mode not in valid_modes:\n raise ValueError(\"Mode {} not supported, please use one of {}\".format(mode, valid_modes))\n\n fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)\n return fan_in if mode == 'fan_in' else fan_out\n\n\ndef kaiming_uniform_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):\n r\"\"\"Fills the input `Tensor` with values according to the method\n described in `Delving deep into rectifiers: Surpassing human-level\n performance on ImageNet classification` - He, K. et al. (2015), using a\n uniform distribution. The resulting tensor will have values sampled from\n :math:`\\mathcal{U}(-\\text{bound}, \\text{bound})` where\n\n .. math::\n \\text{bound} = \\text{gain} \\times \\sqrt{\\frac{3}{\\text{fan\\_mode}}}\n\n Also known as He initialization.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n a: the negative slope of the rectifier used after this layer (only\n used with ``'leaky_relu'``)\n mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``\n preserves the magnitude of the variance of the weights in the\n forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the\n backwards pass.\n nonlinearity: the non-linear function (`nn.functional` name),\n recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.kaiming_uniform_(w, mode='fan_in', nonlinearity='relu')\n \"\"\"\n fan = _calculate_correct_fan(tensor, mode)\n gain = calculate_gain(nonlinearity, a)\n std = gain / math.sqrt(fan)\n bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation\n with torch.no_grad():\n return tensor.uniform_(-bound, bound)\n\n\ndef kaiming_normal_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):\n r\"\"\"Fills the input `Tensor` with values according to the method\n described in `Delving deep into rectifiers: Surpassing human-level\n performance on ImageNet classification` - He, K. et al. (2015), using a\n normal distribution. The resulting tensor will have values sampled from\n :math:`\\mathcal{N}(0, \\text{std}^2)` where\n\n .. math::\n \\text{std} = \\frac{\\text{gain}}{\\sqrt{\\text{fan\\_mode}}}\n\n Also known as He initialization.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n a: the negative slope of the rectifier used after this layer (only\n used with ``'leaky_relu'``)\n mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``\n preserves the magnitude of the variance of the weights in the\n forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the\n backwards pass.\n nonlinearity: the non-linear function (`nn.functional` name),\n recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu')\n \"\"\"\n fan = _calculate_correct_fan(tensor, mode)\n gain = calculate_gain(nonlinearity, a)\n std = gain / math.sqrt(fan)\n with torch.no_grad():\n return tensor.normal_(0, std)\n\n\ndef orthogonal_(tensor, gain=1):\n r\"\"\"Fills the input `Tensor` with a (semi) orthogonal matrix, as\n described in `Exact solutions to the nonlinear dynamics of learning in deep\n linear neural networks` - Saxe, A. et al. (2013). The input tensor must have\n at least 2 dimensions, and for tensors with more than 2 dimensions the\n trailing dimensions are flattened.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`, where :math:`n \\geq 2`\n gain: optional scaling factor\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.orthogonal_(w)\n \"\"\"\n if tensor.ndimension() < 2:\n raise ValueError(\"Only tensors with 2 or more dimensions are supported\")\n\n rows = tensor.size(0)\n cols = tensor.numel() // rows\n flattened = tensor.new(rows, cols).normal_(0, 1)\n\n if rows < cols:\n flattened.t_()\n\n # Compute the qr factorization\n q, r = torch.qr(flattened)\n # Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf\n d = torch.diag(r, 0)\n ph = d.sign()\n q *= ph\n\n if rows < cols:\n q.t_()\n\n with torch.no_grad():\n tensor.view_as(q).copy_(q)\n tensor.mul_(gain)\n return tensor\n\n\ndef sparse_(tensor, sparsity, std=0.01):\n r\"\"\"Fills the 2D input `Tensor` as a sparse matrix, where the\n non-zero elements will be drawn from the normal distribution\n :math:`\\mathcal{N}(0, 0.01)`, as described in `Deep learning via\n Hessian-free optimization` - Martens, J. (2010).\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n sparsity: The fraction of elements in each column to be set to zero\n std: the standard deviation of the normal distribution used to generate\n the non-zero values\n\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.sparse_(w, sparsity=0.1)\n \"\"\"\n if tensor.ndimension() != 2:\n raise ValueError(\"Only tensors with 2 dimensions are supported\")\n\n rows, cols = tensor.shape\n num_zeros = int(math.ceil(sparsity * rows))\n\n with torch.no_grad():\n tensor.normal_(0, std)\n for col_idx in range(cols):\n row_indices = torch.randperm(rows)\n zero_indices = row_indices[:num_zeros]\n tensor[zero_indices, col_idx] = 0\n return tensor\n\n\n# for backward compatibility\ndef _make_deprecate(meth):\n new_name = meth.__name__\n old_name = new_name[:-1]\n\n def deprecated_init(*args, **kwargs):\n warnings.warn(\"nn.init.{} is now deprecated in favor of nn.init.{}.\"\n .format(old_name, new_name), stacklevel=2)\n return meth(*args, **kwargs)\n\n deprecated_init.__doc__ = r\"\"\"\n {old_name}(...)\n\n .. warning::\n This method is now deprecated in favor of :func:`torch.nn.init.{new_name}`.\n\n See :func:`~torch.nn.init.{new_name}` for details.\"\"\".format(\n old_name=old_name, new_name=new_name)\n deprecated_init.__name__ = old_name\n return deprecated_init\n\n\nuniform = _make_deprecate(uniform_)\nnormal = _make_deprecate(normal_)\nconstant = _make_deprecate(constant_)\neye = _make_deprecate(eye_)\ndirac = _make_deprecate(dirac_)\nxavier_uniform = _make_deprecate(xavier_uniform_)\nxavier_normal = _make_deprecate(xavier_normal_)\nkaiming_uniform = _make_deprecate(kaiming_uniform_)\nkaiming_normal = _make_deprecate(kaiming_normal_)\northogonal = _make_deprecate(orthogonal_)\nsparse = _make_deprecate(sparse_)\n" ]
[ [ "torch.utils.benchmark.utils.cpp_jit.compile_timeit_template", "torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.wrapper_singleton", "torch.cuda.synchronize", "torch.utils.benchmark.utils.common.set_torch_threads", "torch.utils.benchmark.utils.common.Measurement", "torch.utils.benchmark.utils.common.TaskSpec", "torch.cuda.is_available", "torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.CopyIfCallgrind.unwrap_all" ], [ "numpy.zeros", "numpy.random.rand", "numpy.transpose" ], [ "torch.distributed._register_comm_hook", "torch.zeros", "torch.distributed.distributed_c10d._get_default_group", "torch.distributed._set_construction_logging_data", "torch.no_grad", "torch.cuda.stream", "torch.distributed.get_rank", "torch.is_grad_enabled", "torch.distributed._broadcast_coalesced", "torch.distributed.rpc.is_available", "torch.ones", "torch.tensor", "torch.distributed._compute_bucket_assignment_by_size", "torch.distributed._register_builtin_comm_hook", "torch.distributed._get_ddp_logging_data", "torch.cuda.current_stream", "torch.zeros_like", "torch.distributed.is_available", "torch._utils._get_all_device_indices", "torch.cuda.device", "torch._utils._get_device_index", "torch.distributed.all_reduce" ], [ "torch.randperm", "torch.eye", "torch.qr", "torch.no_grad", "torch.diag" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mohammedshariqnawaz/Pedestron
[ "9785feb94f00e07ae24a662525b4678f12d0fdc8" ]
[ "mmdet/models/detectors/csp.py" ]
[ "\nfrom .single_stage import SingleStageDetector\nfrom ..registry import DETECTORS\nfrom mmdet.core import bbox2result\nimport torch.nn as nn\nimport torch\nfrom .. import builder\nimport numpy as np\nimport cv2\nfrom mmdet.core import bbox2roi, bbox2result, build_assigner, build_sampler\n\[email protected]_module\nclass CSP(SingleStageDetector):\n\n def __init__(self,\n backbone,\n neck,\n bbox_head,\n refine_roi_extractor=None,\n refine_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n detached=True,\n return_feature_maps=False):\n super(CSP, self).__init__(backbone, neck, bbox_head, train_cfg,\n test_cfg, pretrained)\n if refine_head is not None:\n self.refine_roi_extractor = builder.build_roi_extractor(\n refine_roi_extractor)\n self.refine_head = builder.build_head(refine_head)\n self.return_feature_maps = return_feature_maps\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n self.detached = detached\n\n def show_input_debug(self, img, classification_maps, scale_maps, offset_maps):\n img_numpy = img.cpu().numpy().copy()[0]\n # img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]\n img_numpy = np.transpose(img_numpy, [1, 2, 0]) + [102.9801, 115.9465, 122.7717]\n img_numpy = img_numpy[:, :, ::-1]\n img_numpy = img_numpy.astype(np.uint8)\n strides = [8, 16, 32, 64, 128]\n img_nows = []\n for i, stride in enumerate(strides):\n img_now = img_numpy.copy()\n # cls_numpy = classification_maps[0][i].cpu().numpy().copy()[0][2]\n cls_numpy = classification_maps[0][i].cpu().numpy().copy()[0][:80]\n scale_numpy = scale_maps[0][i].cpu().numpy().copy()[0][0] * stride\n offset_numpy = offset_maps[0][i].cpu().numpy().copy()[0][:2]\n cs, ys, xs = cls_numpy.nonzero()\n print(len(ys))\n for c, x, y in zip(cs, xs, ys):\n cv2.imshow(str(c), classification_maps[0][i].cpu().numpy().copy()[0][80+c])\n realx = x\n realy = y\n height = scale_numpy[y, x]\n realy = realy + 0.5 + offset_numpy[0][y, x]\n realx = realx + 0.5 + offset_numpy[1][y, x]\n realy = realy * stride\n realx = realx * stride\n top_y = int(realy - height/2)\n top_x = int(realx)\n down_y = int(realy + height/2)\n down_x = int(realx)\n top_left = (int(top_x - height * 0.1), int(top_y))\n down_right = (int(down_x + height * 0.1), down_y)\n cv2.rectangle(img_now, top_left, down_right, (255, 255, 5*int(c)), 2)\n img_nows.append(img_now)\n cv2.imshow(str(i) +'img', img_now)\n cv2.waitKey(0)\n\n def show_input_debug_caltech(self, img, classification_maps, scale_maps, offset_maps):\n for j in range(img.shape[0]):\n img_numpy = img.cpu().numpy().copy()[j]\n img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]\n img_numpy = img_numpy[:, :, ::-1]\n img_numpy = img_numpy.astype(np.uint8)\n strides = [4]\n img_nows = []\n for i, stride in enumerate(strides):\n img_now = img_numpy.copy()\n cls_numpy = classification_maps[j][i].cpu().numpy().copy()[0][2]\n ignore_numpy = classification_maps[j][i].cpu().numpy().copy()[0][1]\n cv2.imshow('ignore', ignore_numpy)\n scale_numpy = scale_maps[j][i].cpu().numpy().copy()[0][0] * stride\n offset_numpy = offset_maps[j][i].cpu().numpy().copy()[0][:2]\n ys, xs = cls_numpy.nonzero()\n print(len(ys))\n for x, y in zip(xs, ys):\n # cv2.imshow(str(c), classification_maps[j][i].cpu().numpy().copy()[0][c])\n realx = x\n realy = y\n height = scale_numpy[y, x]\n realy = realy + 0.5 + offset_numpy[0][y, x]\n realx = realx + 0.5 + offset_numpy[1][y, x]\n realy = realy * stride\n realx = realx * stride\n top_y = int(realy - height/2)\n top_x = int(realx)\n down_y = int(realy + height/2)\n down_x = int(realx)\n top_left = (int(top_x - height * 0.1), int(top_y))\n down_right = (int(down_x + height * 0.1), down_y)\n cv2.rectangle(img_now, top_left, down_right, (255, 255, 125), 2)\n img_nows.append(img_now)\n cv2.imshow(str(i) +'img', img_now)\n cv2.waitKey(0)\n\n def show_input_debug_head(self, img, classification_maps, scale_maps, offset_maps):\n for j in range(img.shape[0]):\n img_numpy = img.cpu().numpy().copy()[j]\n img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]\n img_numpy = img_numpy[:, :, ::-1]\n img_numpy = img_numpy.astype(np.uint8)\n strides = [4]\n img_nows = []\n for i, stride in enumerate(strides):\n img_now = img_numpy.copy()\n cls_numpy = classification_maps[j][i].cpu().numpy().copy()[0][2]\n ignore_numpy = classification_maps[j][i].cpu().numpy().copy()[0][1]\n cv2.imshow('ignore', ignore_numpy)\n scale_numpy = scale_maps[j][i].exp().cpu().numpy().copy()[0][0] * stride\n offset_numpy = offset_maps[j][i].cpu().numpy().copy()[0][:2]\n ys, xs = cls_numpy.nonzero()\n for x, y in zip(xs, ys):\n # cv2.imshow(str(c), classification_maps[j][i].cpu().numpy().copy()[0][c])\n realx = x\n realy = y\n height = scale_numpy[y, x]\n realy = realy + 0.5 + offset_numpy[0][y, x]\n realx = realx + 0.5 + offset_numpy[1][y, x]\n realy = realy * stride\n realx = realx * stride\n top_y = int(realy)\n top_x = int(realx)\n down_y = int(realy + height)\n down_x = int(realx)\n top_left = (int(top_x - height * 0.41/2), int(top_y))\n down_right = (int(down_x + height * 0.41/2), down_y)\n cv2.rectangle(img_now, top_left, down_right, (255, 255, 125), 2)\n img_nows.append(img_now)\n cv2.imshow(str(i) +'img', img_now)\n cv2.waitKey(0)\n\n def show_mot_input_debug(self, img, classification_maps, scale_maps, offset_maps):\n for j in range(img.shape[0]):\n img_numpy = img.cpu().numpy().copy()[j]\n img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]\n # img_numpy = np.transpose(img_numpy, [1, 2, 0]) + [102.9801, 115.9465, 122.7717]\n img_numpy = img_numpy[:, :, ::-1]\n img_numpy = img_numpy.astype(np.uint8)\n strides = [4]\n img_nows = []\n for i, stride in enumerate(strides):\n img_now = img_numpy.copy()\n # cls_numpy = classification_maps[0][i].cpu().numpy().copy()[0][2]\n cls_numpy = classification_maps[j][i].cpu().numpy().copy()[0][2]\n instance_numpy = classification_maps[j][i].cpu().numpy().copy()[0][3]\n scale_numpy = scale_maps[j][i].cpu().numpy().copy()[0][0] * stride\n offset_numpy = offset_maps[j][i].cpu().numpy().copy()[0][:2]\n ys, xs = cls_numpy.nonzero()\n for x, y in zip(xs, ys):\n c=0\n cv2.imshow(str(c), classification_maps[j][i].cpu().numpy().copy()[0][2])\n realx = x\n realy = y\n height = scale_numpy[y, x]\n realy = realy + 0.5 + offset_numpy[0][y, x]\n realx = realx + 0.5 + offset_numpy[1][y, x]\n realy = realy * stride\n realx = realx * stride\n top_y = int(realy - height/2)\n top_x = int(realx)\n down_y = int(realy + height/2)\n down_x = int(realx)\n top_left = (int(top_x - height * 0.1), int(top_y))\n down_right = (int(down_x + height * 0.1), down_y)\n cv2.rectangle(img_now, top_left, down_right, (255, 255, 5*int(c)), 2)\n instance = instance_numpy[y, x]\n cv2.putText(img_now, str(instance), top_left, cv2.FONT_HERSHEY_COMPLEX, 1, 255)\n img_nows.append(img_now)\n cv2.imshow(str(i) +'img', img_now)\n cv2.waitKey(0)\n\n @property\n def refine(self):\n return hasattr(self, 'refine_head') and self.refine_head is not None\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n classification_maps=None,\n scale_maps=None,\n offset_maps=None):\n # for tracking data which batch is produced by dataset instead of data loader\n if type(img) == list:\n img=img[0]\n img_metas=img_metas[0]\n gt_bboxes=gt_bboxes[0]\n gt_labels=gt_labels[0]\n gt_bboxes_ignore = gt_bboxes_ignore[0]\n classification_maps = classification_maps[0]\n scale_maps = scale_maps[0]\n offset_maps = offset_maps[0]\n\n losses = dict()\n x = self.extract_feat(img)\n # self.show_input_debug(img, classification_maps, scale_maps, offset_maps)\n # self.show_input_debug_caltech(img, classification_maps, scale_maps, offset_maps)\n # self.show_mot_input_debug(img, classification_maps, scale_maps, offset_maps)\n # self.show_input_debug_head(img, classification_maps, scale_maps, offset_maps)\n\n outs = self.bbox_head(x)\n loss_inputs = outs + (gt_bboxes, gt_labels, classification_maps, scale_maps, offset_maps, img_metas, self.train_cfg.csp_head if self.refine else self.train_cfg)\n losses_bbox = self.bbox_head.loss(\n *loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)\n losses.update(losses_bbox)\n \n if self.refine:\n if self.detached:\n x = tuple([i.detach() for i in x])\n bbox_inputs = outs + (img_metas, self.train_cfg.csp_head, False)\n bbox_list = self.bbox_head.get_bboxes(*bbox_inputs, no_strides=False) # no_strides to not upscale yet\n \n bbox_list = [\n bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)[0]\n for det_bboxes, det_labels in bbox_list\n ]\n\n bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)\n bbox_sampler = build_sampler(\n self.train_cfg.rcnn.sampler, context=self)\n num_imgs = img.size(0)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n sampling_results = []\n \n for i in range(num_imgs):\n if bbox_list[i].shape[0] == 0 or gt_bboxes[i].shape[0] == 0:\n continue\n bbox = torch.tensor(bbox_list[i]).float().cuda()\n assign_result = bbox_assigner.assign(\n bbox, gt_bboxes[i], gt_bboxes_ignore[i],\n gt_labels[i])\n sampling_result = bbox_sampler.sample(\n assign_result,\n bbox,\n gt_bboxes[i],\n gt_labels[i])\n sampling_results.append(sampling_result)\n\n samp_list = [res.bboxes for res in sampling_results]\n if len(samp_list) == 0:\n losses.update(dict(loss_refine_cls=torch.tensor(0).float().cuda(), acc=torch.tensor(0).float().cuda()))\n return losses\n rois = bbox2roi(samp_list).float()\n if self.refine_head.loss_opinion is not None:\n pred_scores = torch.cat([torch.tensor(bbox[:, 4]).float().cuda() for bbox in bbox_list], dim=0)\n pred_rois = bbox2roi([torch.tensor(bbox).float().cuda() for bbox in bbox_list])\n pred_feats = self.refine_roi_extractor(\n x, pred_rois)\n pred_scores_refine = self.refine_head(pred_feats)\n loss_opinion = self.refine_head.compute_opinion_loss(pred_scores, pred_scores_refine)\n losses.update(loss_opinion)\n bbox_feats = self.refine_roi_extractor(\n x, rois)\n cls_score = self.refine_head(bbox_feats)\n bbox_targets = self.refine_head.get_target(\n sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn)\n loss_refine = self.refine_head.loss(cls_score,\n *bbox_targets[:2])\n losses.update(dict(loss_refine_cls=loss_refine[\"loss_cls\"], distL1=loss_refine[\"dist\"]))\n\n return losses\n\n def simple_test_accuracy(self, img, img_meta):\n gts = img_meta[0][\"gts\"]\n x = self.extract_feat(img)\n if self.detached:\n x = (x[0].detach(),)\n\n rois = bbox2roi(gts)\n if rois.shape[0] == 0:\n return 0, 0\n\n roi_feats = self.refine_roi_extractor(\n x, rois)\n cls_score = self.refine_head.get_scores(roi_feats)\n\n return (cls_score > 0.5).float().sum(), rois.size(0)\n\n def simple_test(self, img, img_meta, rescale=False, return_id=False):\n x = self.extract_feat(img)\n outs = self.bbox_head(x)\n bbox_inputs = outs + (img_meta, self.test_cfg.csp_head if self.refine else self.test_cfg, False) # TODO://Handle rescalling\n if self.return_feature_maps:\n return self.bbox_head.get_bboxes_features(*bbox_inputs)\n bbox_list = self.bbox_head.get_bboxes(*bbox_inputs, no_strides=False)\n im_scale = img_meta[0][\"scale_factor\"]\n if \"id\" in img_meta[0]:\n img_id = img_meta[0][\"id\"]\n else:\n img_id = 0\n if self.refine:\n if self.detached:\n x = (x[0].detach(),)\n bbox_list = [\n bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)[0]\n for det_bboxes, det_labels in bbox_list\n ]\n refine_cfg = self.test_cfg.get('rcnn', None)\n bbox_list = [torch.tensor(bbox).float().cuda() for bbox in bbox_list]\n rois = bbox2roi(bbox_list)\n bbox_list = [bbox/im_scale for bbox in bbox_list]\n if rois.shape[0] == 0:\n cls_score = None\n else:\n roi_feats = self.refine_roi_extractor(\n x, rois)\n cls_score = self.refine_head.get_scores(roi_feats)\n\n res_buffer = []\n if cls_score is not None:\n if refine_cfg is not None:\n res_buffer = self.refine_head.suppress_boxes(rois, cls_score, img_meta, cfg=refine_cfg)\n else:\n res_buffer = self.refine_head.combine_scores(bbox_list, cls_score)\n if return_id:\n return res_buffer, img_id\n return res_buffer\n\n bbox_results = [\n bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)\n for det_bboxes, det_labels in bbox_list\n ]\n if return_id:\n return bbox_results[0], img_id\n return bbox_results[0]\n\n def foward_features(self, features):\n bbox_list = self.bbox_head.get_bboxes(*features)\n bbox_results = [\n bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)\n for det_bboxes, det_labels in bbox_list\n ]\n return bbox_results[0]\n" ]
[ [ "torch.tensor", "numpy.transpose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MichaelAllen1966/stroke_outcome_algorithm
[ "99050bf4e0b19c38c8973fe10234fee4f230a172" ]
[ "clinical_outcome.py" ]
["\"\"\"\nClass to hold clinical outcome model.\nPredicts probability of good outcome of patient(s) (...TRUNCATED)
[ [ "numpy.log", "numpy.zeros", "pandas.DataFrame", "numpy.full" ] ]
[{"matplotlib":[],"numpy":[],"pandas":["0.23","0.21","2.0","1.4","0.19","1.1","1.5","1.2","0.24","0.(...TRUNCATED)
brettelliot/event-study
[ "cffc6a80dbc4b33e68e863488428996af51cc991" ]
[ "examples/earnings_surprises/earnings-converter.py" ]
["import pandas as pd\nfrom pandas.compat import StringIO\nimport numpy\nnumpy.set_printoptions(thre(...TRUNCATED)
[ [ "pandas.compat.StringIO", "numpy.set_printoptions" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
27